CombinedText
stringlengths
4
3.42M
package main import ( "encoding/json" "io/ioutil" "log" "strconv" "github.com/spf13/cobra" "github.com/bobinette/papernet" "github.com/bobinette/papernet/errors" ) func init() { PaperCommand.PersistentFlags().String("store", "data/papernet.db", "address of the bolt db file") PaperCommand.PersistentFlags().String("index", "data/papernet.index", "address of the bolt db file") SavePaperCommand.PersistentFlags().String("file", "", "filename to load the payload") SearchCommand.PersistentFlags().String("file", "", "filename to load the payload") PaperCommand.AddCommand(&SavePaperCommand) PaperCommand.AddCommand(&PaperAllCommand) PaperCommand.AddCommand(&DeletePaperCommand) PaperCommand.AddCommand(&SearchCommand) RootCmd.AddCommand(&PaperCommand) } var PaperCommand = cobra.Command{ Use: "paper", Short: "Find papers based on their IDs", Long: "Find papers based on their IDs", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return errors.New("This command expects ids as arguments") } if args[0] == "help" { return cmd.Help() } ids, err := ints(args) if err != nil { return errors.New("ids should be integers", errors.WithCause(err)) } addr := cmd.Flag("store").Value.String() store, f, err := createStore(addr) defer f() if err != nil { return errors.New("error opening db", errors.WithCause(err)) } papers, err := store.Get(ids...) if err != nil { return errors.New("error getting papers", errors.WithCause(err)) } pj, err := json.Marshal(papers) if err != nil { return errors.New("error marshalling results", errors.WithCause(err)) } cmd.Println(string(pj)) return nil }, } var PaperAllCommand = cobra.Command{ Use: "all", Short: "List all the papers", Long: "List all the papers", Run: func(cmd *cobra.Command, args []string) { if len(args) > 0 && args[0] == "help" { cmd.Help() return } addr := cmd.Flag("store").Value.String() store, f, err := createStore(addr) defer f() if err != nil { log.Fatalln(errors.New("error opening db", errors.WithCause(err))) } papers, err := store.List() if err != nil { log.Fatalln(errors.New("error getting papers", errors.WithCause(err))) } data, err := json.Marshal(papers) if err != nil { log.Fatalln(errors.New("error marshalling results", errors.WithCause(err))) } cmd.Println(string(data)) }, } var DeletePaperCommand = cobra.Command{ Use: "delete", Short: "Delete papers based on their IDs", Long: "Delete papers based on their IDs", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return errors.New("This command expects ids as arguments") } if args[0] == "help" { return cmd.Help() } ids, err := ints(args) if err != nil { return errors.New("ids should be integers", errors.WithCause(err)) } addr := cmd.Flag("store").Value.String() store, f, err := createStore(addr) defer f() if err != nil { return errors.New("error opening db", errors.WithCause(err)) } addr = cmd.Flag("index").Value.String() index, f, err := createIndex(addr) defer f() if err != nil { return errors.New("error opening index", errors.WithCause(err)) } for _, id := range ids { err = store.Delete(id) if err != nil { return errors.New("error deleting in store papers", errors.WithCause(err)) } err = index.Delete(id) if err != nil { return errors.New("error deleting in index papers", errors.WithCause(err)) } cmd.Printf("<Paper %d> deleted\n", id) } return nil }, } var SavePaperCommand = cobra.Command{ Use: "save", Short: "Save a paper", Long: "Insert or update a paper based on the argument payload or a file", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 1 && args[0] == "help" { return cmd.Help() } addr := cmd.Flag("store").Value.String() store, f, err := createStore(addr) defer f() if err != nil { return errors.New("error opening db", errors.WithCause(err)) } filename := cmd.Flag("file").Value.String() var data []byte if filename != "" { data, err = ioutil.ReadFile(filename) if err != nil { return errors.New("error reading payload file", errors.WithCause(err)) } } else { if len(args) != 1 { return errors.New("when no filename is specified, the payload must be passed as argument") } data = []byte(args[0]) } var paper papernet.Paper err = json.Unmarshal(data, &paper) if err != nil { return errors.New("error unmarshalling payload", errors.WithCause(err)) } err = store.Upsert(&paper) if err != nil { return errors.New("error saving paper", errors.WithCause(err)) } cmd.Println("done") return nil }, } var SearchCommand = cobra.Command{ Use: "search", Short: "Search papers", Long: "Search papers based on the argument payload or a file", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 1 && args[0] == "help" { return cmd.Help() } addr := cmd.Flag("store").Value.String() store, f, err := createStore(addr) defer f() if err != nil { return errors.New("error opening db", errors.WithCause(err)) } addr = cmd.Flag("index").Value.String() index, f, err := createIndex(addr) defer f() if err != nil { return errors.New("error opening index", errors.WithCause(err)) } filename := cmd.Flag("file").Value.String() var data []byte if filename != "" { data, err = ioutil.ReadFile(filename) if err != nil { return errors.New("error reading payload file", errors.WithCause(err)) } } else { if len(args) != 1 { return errors.New("when no filename is specified, the payload must be passed as argument") } data = []byte(args[0]) } var search papernet.PaperSearch err = json.Unmarshal(data, &search) if err != nil { return errors.New("error unmarshalling payload", errors.WithCause(err)) } res, err := index.Search(search) if err != nil { return errors.New("error querying index", errors.WithCause(err)) } papers, err := store.Get(res.IDs...) if err != nil { return errors.New("error retrieving papers", errors.WithCause(err)) } pj, err := json.Marshal(papers) if err != nil { return errors.New("error marshalling results", errors.WithCause(err)) } cmd.Println(string(pj)) return nil }, } func ints(strs []string) ([]int, error) { ints := make([]int, len(strs)) for i, str := range strs { n, err := strconv.Atoi(str) if err != nil { return nil, err } ints[i] = n } return ints, nil } Migrate paper commands package main import ( "encoding/json" "io/ioutil" "strconv" "strings" "github.com/BurntSushi/toml" "github.com/spf13/cobra" ppnBolt "github.com/bobinette/papernet/bolt" "github.com/bobinette/papernet/jwt" "github.com/bobinette/papernet/auth/cayley" authServices "github.com/bobinette/papernet/auth/services" "github.com/bobinette/papernet/papernet" "github.com/bobinette/papernet/papernet/auth" "github.com/bobinette/papernet/papernet/bleve" "github.com/bobinette/papernet/papernet/bolt" "github.com/bobinette/papernet/papernet/services" ) type PaperConfig struct { Paper struct { Bolt struct { Store string `toml:"store"` } `toml:"bolt"` Bleve struct { Store string `toml:"store"` } `toml:"bleve"` } `toml:"paper"` // Legacy Bolt struct { Store string `toml:"store"` } `toml:"bolt"` } var ( paperConfig PaperConfig paperRepository papernet.PaperRepository paperIndex papernet.PaperIndex tagService *services.TagService paperService *services.PaperService ) func init() { PaperCommand.AddCommand(&SavePaperCommand) PaperCommand.AddCommand(&DeletePaperCommand) PaperCommand.AddCommand(&SearchCommand) PaperCommand.AddCommand(&PaperMigrateCommand) inheritPersistentPreRun(&SavePaperCommand) inheritPersistentPreRun(&DeletePaperCommand) inheritPersistentPreRun(&SearchCommand) inheritPersistentPreRun(&PaperMigrateCommand) inheritPersistentPreRun(&PaperCommand) RootCmd.AddCommand(&PaperCommand) } var PaperCommand = cobra.Command{ Use: "paper", Short: "Find papers based on their IDs", Long: "Find papers based on their IDs", Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, PersistentPreRun: func(cmd *cobra.Command, args []string) { // Read configuration file data, err := ioutil.ReadFile(configFile) if err != nil { logger.Fatal("could not read configuration file:", err) } // Load user service err = toml.Unmarshal(data, &authConfig) if err != nil { logger.Fatal("error unmarshalling configuration:", err) } // Read key file keyData, err := ioutil.ReadFile(authConfig.Auth.KeyPath) if err != nil { logger.Fatal("could not open key file:", err) } // Create token encoder var key struct { Key string `json:"k"` } err = json.Unmarshal(keyData, &key) if err != nil { logger.Fatal("could not read key file:", err) } tokenEncoder := jwt.NewEncodeDecoder([]byte(key.Key)) // Create user repository store, err := cayley.NewStore(authConfig.Auth.Cayley.Store) if err != nil { logger.Fatal("could not open user graph:", err) } userRepository := cayley.NewUserRepository(store) userService = authServices.NewUserService(userRepository, tokenEncoder) // Load paper service err = toml.Unmarshal(data, &paperConfig) if err != nil { logger.Fatal("error unmarshalling configuration:", err) } // Create paper repository and tag index boltDriver := bolt.Driver{} if boltDriver.Open(paperConfig.Paper.Bolt.Store); err != nil { logger.Fatal("could not open bolt driver:", err) } paperRepo := bolt.PaperRepository{Driver: &boltDriver} paperRepository = &paperRepo tagIndex := bolt.TagIndex{Driver: &boltDriver} // Create paper index index := &bleve.PaperIndex{} if err := index.Open(paperConfig.Paper.Bleve.Store); err != nil { logger.Fatal("could not open paper index:", err) } paperIndex = index // Create user client authClient := auth.NewClient(userService) // Create services tagService = services.NewTagService(&tagIndex) paperService = services.NewPaperService(paperRepository, paperIndex, authClient, tagService) }, } var DeletePaperCommand = cobra.Command{ Use: "delete", Short: "Delete papers based on their IDs", Long: "Delete papers based on their IDs", Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { logger.Fatal("This command expects ids as arguments") } if args[0] == "help" { cmd.Help() return } // @TODO: implement }, } var SavePaperCommand = cobra.Command{ Use: "save", Short: "Save a paper", Long: "Insert or update a paper based on the argument payload or a file", Run: func(cmd *cobra.Command, args []string) { if len(args) == 1 && args[0] == "help" { cmd.Help() return } if len(args) != 1 { logger.Fatal("when no filename is specified, the payload must be passed as argument") } var data []byte if strings.HasPrefix(args[0], "@") { d, err := ioutil.ReadFile(args[0][1:]) if err != nil { logger.Fatal(err) } data = d } else { data = []byte(args[0]) } var paper papernet.Paper err := json.Unmarshal(data, &paper) if err != nil { logger.Fatal("error unmarshalling payload:", err) } // @TODO: implement // err = store.Upsert(&paper) // if err != nil { // logger.Fatal("error saving paper:", err) // } cmd.Println(paper) }, } var SearchCommand = cobra.Command{ Use: "search", Short: "Search papers", Long: "Search papers based on the argument payload or a file", Run: func(cmd *cobra.Command, args []string) { if len(args) == 1 && args[0] == "help" { cmd.Help() return } // @TODO: implement }, } var PaperMigrateCommand = cobra.Command{ Use: "migrate", Short: "Migrate papers to v2", Long: "Migrate papers to v2", Run: func(cmd *cobra.Command, args []string) { if len(args) == 1 && args[0] == "help" { cmd.Help() return } driver := ppnBolt.Driver{} defer driver.Close() err := driver.Open(paperConfig.Bolt.Store) if err != nil { logger.Fatal("could not open db:", err) } paperStore := ppnBolt.PaperStore{Driver: &driver} papers, err := paperStore.List() if err != nil { logger.Fatal("could not get papers:", err) } for _, paper := range papers { paperV2 := papernet.Paper{ ID: paper.ID, Title: paper.Title, Summary: paper.Summary, Authors: paper.Authors, Tags: paper.Tags, References: paper.References, CreatedAt: paper.CreatedAt, UpdatedAt: paper.UpdatedAt, } if err := paperRepository.Upsert(&paperV2); err != nil { logger.Errorf("error migrating paper %d: %v", paper.ID, err) continue } if err := paperIndex.Index(&paperV2); err != nil { logger.Errorf("error indexing paper %d: %v", paper.ID, err) continue } logger.Printf("paper %d migrated", paper.ID) } }, } func ints(strs []string) ([]int, error) { ints := make([]int, len(strs)) for i, str := range strs { n, err := strconv.Atoi(str) if err != nil { return nil, err } ints[i] = n } return ints, nil }
package main import ( "os" "github.com/remind101/empire/pkg/heroku" ) var cmdCertAttach = &Command{ Run: runCertAttach, Usage: "cert-attach <aws_cert_name>", NeedsApp: true, Category: "certs", Short: "attach a certificate to an app", Long: ` Attaches an SSL certificate to an applications web process. When using the ECS backend, this will attach an IAM server certificate to the applications ELB. Before running this command, you should upload your SSL certificate and key to IAM using the AWS CLI. Examples: $ aws iam upload-server-certificate --server-certificate-name myServerCertificate --certificate-body file://public_key_cert_file.pem --private-key file://my_private_key.pem --certificate-chain file://my_certificate_chain_file.pem $ emp cert-attach myServerCertificate -a myapp `, } func runCertAttach(cmd *Command, args []string) { if len(args) == 0 { cmd.PrintUsage() os.Exit(2) } cert := args[0] _, err := client.AppUpdate(mustApp(), &heroku.AppUpdateOpts{ Cert: &cert, }) must(err) } Update to indicate the ARN is used package main import ( "os" "github.com/remind101/empire/pkg/heroku" ) var cmdCertAttach = &Command{ Run: runCertAttach, Usage: "cert-attach <aws_cert_arn>", NeedsApp: true, Category: "certs", Short: "attach a certificate to an app", Long: ` Attaches an SSL certificate to an applications web process. When using the ECS backend, this will attach an IAM server certificate to the applications ELB. Before running this command, you should upload your SSL certificate and key to IAM using the AWS CLI. Examples: $ aws iam upload-server-certificate --server-certificate-name myServerCertificate --certificate-body file://public_key_cert_file.pem --private-key file://my_private_key.pem --certificate-chain file://my_certificate_chain_file.pem # ^^ The above command will return the ARN of the certificate, you'll need that for the command below # Say it returns the arn arn:aws:iam::123456789012:server-certificate/myServerCertificate, you'd use that like this: $ emp cert-attach arn:aws:iam::123456789012:server-certificate/myServerCertificate -a myapp `, } func runCertAttach(cmd *Command, args []string) { if len(args) == 0 { cmd.PrintUsage() os.Exit(2) } cert := args[0] _, err := client.AppUpdate(mustApp(), &heroku.AppUpdateOpts{ Cert: &cert, }) must(err) }
/* Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "io" "strings" "github.com/pkg/errors" "github.com/spf13/cobra" "helm.sh/helm/pkg/action" "helm.sh/helm/pkg/cli/values" "helm.sh/helm/pkg/getter" ) var longLintHelp = ` This command takes a path to a chart and runs a series of tests to verify that the chart is well-formed. If the linter encounters things that will cause the chart to fail installation, it will emit [ERROR] messages. If it encounters issues that break with convention or recommendation, it will emit [WARNING] messages. ` func newLintCmd(out io.Writer) *cobra.Command { client := action.NewLint() valueOpts := &values.Options{} cmd := &cobra.Command{ Use: "lint PATH", Short: "examines a chart for possible issues", Long: longLintHelp, RunE: func(cmd *cobra.Command, args []string) error { paths := []string{"."} if len(args) > 0 { paths = args } client.Namespace = getNamespace() vals, err := valueOpts.MergeValues(getter.All(settings)) if err != nil { return err } result := client.Run(paths, vals) var message strings.Builder fmt.Fprintf(&message, "%d chart(s) linted, %d chart(s) failed\n", result.TotalChartsLinted, len(result.Errors)) for _, err := range result.Errors { fmt.Fprintf(&message, "\t%s\n", err) } for _, msg := range result.Messages { fmt.Fprintf(&message, "\t%s\n", msg) } if len(result.Errors) > 0 { return errors.New(message.String()) } fmt.Fprintf(out, message.String()) return nil }, } f := cmd.Flags() f.BoolVar(&client.Strict, "strict", false, "fail on lint warnings") addValueOptionsFlags(f, valueOpts) return cmd } Make the lint cmd output a bit easier to follow Have tried to give the output of the lint command a bit of a clean up to try to make it easier to follow. This splits the output by chart, moves the summary to the end of the report rather than at the top and fixes the number of failed charts count. Signed-off-by: Thomas O'Donnell <3df1fb6edcd20213913246d2781f9d20d6bd4890@gmail.com> /* Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "io" "strings" "github.com/pkg/errors" "github.com/spf13/cobra" "helm.sh/helm/pkg/action" "helm.sh/helm/pkg/cli/values" "helm.sh/helm/pkg/getter" ) var longLintHelp = ` This command takes a path to a chart and runs a series of tests to verify that the chart is well-formed. If the linter encounters things that will cause the chart to fail installation, it will emit [ERROR] messages. If it encounters issues that break with convention or recommendation, it will emit [WARNING] messages. ` func newLintCmd(out io.Writer) *cobra.Command { client := action.NewLint() valueOpts := &values.Options{} cmd := &cobra.Command{ Use: "lint PATH", Short: "examines a chart for possible issues", Long: longLintHelp, RunE: func(cmd *cobra.Command, args []string) error { paths := []string{"."} if len(args) > 0 { paths = args } client.Namespace = getNamespace() vals, err := valueOpts.MergeValues(getter.All(settings)) if err != nil { return err } var message strings.Builder failed := 0 for _, path := range paths { fmt.Fprintf(&message, "==> Linting %s\n", path) result := client.Run([]string{path}, vals) // All the Errors that are generated by a chart // that failed a lint will be included in the // results.Messages so we only need to print // the Errors if there are no Messages. if len(result.Messages) == 0 { for _, err := range result.Errors { fmt.Fprintf(&message, "Error %s\n", err) } } for _, msg := range result.Messages { fmt.Fprintf(&message, "%s\n", msg) } if len(result.Errors) != 0 { failed++ } // Adding extra new line here to break up the // results, stops this from being a big wall of // text and makes it easier to follow. fmt.Fprint(&message, "\n") } fmt.Fprintf(out, message.String()) var summary strings.Builder fmt.Fprintf(&summary, "%d chart(s) linted, %d chart(s) failed", len(paths), failed) if failed > 0 { return errors.New(summary.String()) } fmt.Fprintf(out, "%s\n", summary.String()) return nil }, } f := cmd.Flags() f.BoolVar(&client.Strict, "strict", false, "fail on lint warnings") addValueOptionsFlags(f, valueOpts) return cmd }
package main import ( "errors" "fmt" "os" "runtime/pprof" flag "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" commander "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" config "github.com/jbenet/go-ipfs/config" core "github.com/jbenet/go-ipfs/core" daemon "github.com/jbenet/go-ipfs/daemon" updates "github.com/jbenet/go-ipfs/updates" u "github.com/jbenet/go-ipfs/util" ) // The IPFS command tree. It is an instance of `commander.Command`. var CmdIpfs = &commander.Command{ UsageLine: "ipfs [<flags>] <command> [<args>]", Short: "global versioned p2p merkledag file system", Long: `ipfs - global versioned p2p merkledag file system Basic commands: init Initialize ipfs local configuration. add <path> Add an object to ipfs. cat <ref> Show ipfs object data. ls <ref> List links from an object. refs <ref> List link hashes from an object. Tool commands: config Manage configuration. version Show ipfs version information. commands List all available commands. Advanced Commands: mount Mount an ipfs read-only mountpoint. serve Serve an interface to ipfs. Use "ipfs help <command>" for more information about a command. `, Run: ipfsCmd, Subcommands: []*commander.Command{ cmdIpfsAdd, cmdIpfsCat, cmdIpfsLs, cmdIpfsRefs, cmdIpfsConfig, cmdIpfsVersion, cmdIpfsCommands, cmdIpfsMount, cmdIpfsInit, cmdIpfsServe, cmdIpfsRun, cmdIpfsName, cmdIpfsBootstrap, cmdIpfsDiag, }, Flag: *flag.NewFlagSet("ipfs", flag.ExitOnError), } // log is the command logger var log = u.Logger("cmd/ipfs") func init() { config, err := config.PathRoot() if err != nil { u.POut("Failure initializing the default Config Directory: ", err) os.Exit(1) } CmdIpfs.Flag.String("c", config, "specify config directory") } func ipfsCmd(c *commander.Command, args []string) error { u.POut(c.Long) return nil } func main() { u.Debug = false // setup logging // u.SetupLogging() done in an init() block now. // if debugging, setup profiling. if u.Debug { ofi, err := os.Create("cpu.prof") if err != nil { fmt.Println(err) return } pprof.StartCPUProfile(ofi) defer ofi.Close() defer pprof.StopCPUProfile() } err := CmdIpfs.Dispatch(os.Args[1:]) if err != nil { if len(err.Error()) > 0 { fmt.Fprintf(os.Stderr, "ipfs %s: %v\n", os.Args[1], err) } os.Exit(1) } return } // localNode constructs a node func localNode(confdir string, online bool) (*core.IpfsNode, error) { filename, err := config.Filename(confdir) if err != nil { return nil, err } cfg, err := config.Load(filename) if err != nil { return nil, err } if cfg.Version.ShouldCheckForUpdate() { obsolete := updates.CheckForUpdates() if obsolete != nil { if cfg.Version.Check == config.CheckError { return nil, obsolete } // when "warn" version.check mode we just show warning message log.Warning(fmt.Sprintf("%v", obsolete)) } else { // update most recent check timestamp in config config.RecordUpdateCheck(cfg, filename) } } return core.NewIpfsNode(cfg, online) } // Gets the config "-c" flag from the command, or returns // the default configuration root directory func getConfigDir(c *commander.Command) (string, error) { // use the root cmd (that's where config is specified) for ; c.Parent != nil; c = c.Parent { } // flag should be defined on root. param := c.Flag.Lookup("c").Value.Get().(string) if param != "" { return u.TildeExpansion(param) } return config.PathRoot() } func getConfig(c *commander.Command) (*config.Config, error) { confdir, err := getConfigDir(c) if err != nil { return nil, err } filename, err := config.Filename(confdir) if err != nil { return nil, err } return config.Load(filename) } // cmdContext is a wrapper structure that keeps a node, a daemonlistener, and // a config directory together. These three are needed for most commands. type cmdContext struct { node *core.IpfsNode daemon *daemon.DaemonListener configDir string } // setupCmdContext initializes a cmdContext structure from a given command. func setupCmdContext(c *commander.Command, online bool) (cc cmdContext, err error) { rootCmd := c for ; rootCmd.Parent != nil; rootCmd = c.Parent { } cc.configDir, err = getConfigDir(rootCmd) if err != nil { return } cc.node, err = localNode(cc.configDir, online) if err != nil { return } cc.daemon, err = setupDaemon(cc.configDir, cc.node) if err != nil { return } return } // setupDaemon sets up the daemon corresponding to given node. func setupDaemon(confdir string, node *core.IpfsNode) (*daemon.DaemonListener, error) { if node.Config.Addresses.API == "" { return nil, errors.New("no config.Addresses.API endpoint supplied") } maddr, err := ma.NewMultiaddr(node.Config.Addresses.API) if err != nil { return nil, err } dl, err := daemon.NewDaemonListener(node, maddr, confdir) if err != nil { return nil, err } go dl.Listen() return dl, nil } add net-diag to help message package main import ( "errors" "fmt" "os" "runtime/pprof" flag "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" commander "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" config "github.com/jbenet/go-ipfs/config" core "github.com/jbenet/go-ipfs/core" daemon "github.com/jbenet/go-ipfs/daemon" updates "github.com/jbenet/go-ipfs/updates" u "github.com/jbenet/go-ipfs/util" ) // The IPFS command tree. It is an instance of `commander.Command`. var CmdIpfs = &commander.Command{ UsageLine: "ipfs [<flags>] <command> [<args>]", Short: "global versioned p2p merkledag file system", Long: `ipfs - global versioned p2p merkledag file system Basic commands: init Initialize ipfs local configuration. add <path> Add an object to ipfs. cat <ref> Show ipfs object data. ls <ref> List links from an object. refs <ref> List link hashes from an object. Tool commands: config Manage configuration. version Show ipfs version information. commands List all available commands. Advanced Commands: mount Mount an ipfs read-only mountpoint. serve Serve an interface to ipfs. net-diag Print network diagnostic Use "ipfs help <command>" for more information about a command. `, Run: ipfsCmd, Subcommands: []*commander.Command{ cmdIpfsAdd, cmdIpfsCat, cmdIpfsLs, cmdIpfsRefs, cmdIpfsConfig, cmdIpfsVersion, cmdIpfsCommands, cmdIpfsMount, cmdIpfsInit, cmdIpfsServe, cmdIpfsRun, cmdIpfsName, cmdIpfsBootstrap, cmdIpfsDiag, }, Flag: *flag.NewFlagSet("ipfs", flag.ExitOnError), } // log is the command logger var log = u.Logger("cmd/ipfs") func init() { config, err := config.PathRoot() if err != nil { u.POut("Failure initializing the default Config Directory: ", err) os.Exit(1) } CmdIpfs.Flag.String("c", config, "specify config directory") } func ipfsCmd(c *commander.Command, args []string) error { u.POut(c.Long) return nil } func main() { u.Debug = false // setup logging // u.SetupLogging() done in an init() block now. // if debugging, setup profiling. if u.Debug { ofi, err := os.Create("cpu.prof") if err != nil { fmt.Println(err) return } pprof.StartCPUProfile(ofi) defer ofi.Close() defer pprof.StopCPUProfile() } err := CmdIpfs.Dispatch(os.Args[1:]) if err != nil { if len(err.Error()) > 0 { fmt.Fprintf(os.Stderr, "ipfs %s: %v\n", os.Args[1], err) } os.Exit(1) } return } // localNode constructs a node func localNode(confdir string, online bool) (*core.IpfsNode, error) { filename, err := config.Filename(confdir) if err != nil { return nil, err } cfg, err := config.Load(filename) if err != nil { return nil, err } if cfg.Version.ShouldCheckForUpdate() { obsolete := updates.CheckForUpdates() if obsolete != nil { if cfg.Version.Check == config.CheckError { return nil, obsolete } // when "warn" version.check mode we just show warning message log.Warning(fmt.Sprintf("%v", obsolete)) } else { // update most recent check timestamp in config config.RecordUpdateCheck(cfg, filename) } } return core.NewIpfsNode(cfg, online) } // Gets the config "-c" flag from the command, or returns // the default configuration root directory func getConfigDir(c *commander.Command) (string, error) { // use the root cmd (that's where config is specified) for ; c.Parent != nil; c = c.Parent { } // flag should be defined on root. param := c.Flag.Lookup("c").Value.Get().(string) if param != "" { return u.TildeExpansion(param) } return config.PathRoot() } func getConfig(c *commander.Command) (*config.Config, error) { confdir, err := getConfigDir(c) if err != nil { return nil, err } filename, err := config.Filename(confdir) if err != nil { return nil, err } return config.Load(filename) } // cmdContext is a wrapper structure that keeps a node, a daemonlistener, and // a config directory together. These three are needed for most commands. type cmdContext struct { node *core.IpfsNode daemon *daemon.DaemonListener configDir string } // setupCmdContext initializes a cmdContext structure from a given command. func setupCmdContext(c *commander.Command, online bool) (cc cmdContext, err error) { rootCmd := c for ; rootCmd.Parent != nil; rootCmd = c.Parent { } cc.configDir, err = getConfigDir(rootCmd) if err != nil { return } cc.node, err = localNode(cc.configDir, online) if err != nil { return } cc.daemon, err = setupDaemon(cc.configDir, cc.node) if err != nil { return } return } // setupDaemon sets up the daemon corresponding to given node. func setupDaemon(confdir string, node *core.IpfsNode) (*daemon.DaemonListener, error) { if node.Config.Addresses.API == "" { return nil, errors.New("no config.Addresses.API endpoint supplied") } maddr, err := ma.NewMultiaddr(node.Config.Addresses.API) if err != nil { return nil, err } dl, err := daemon.NewDaemonListener(node, maddr, confdir) if err != nil { return nil, err } go dl.Listen() return dl, nil }
package main import ( "errors" "fmt" "io" "math/rand" "os" "os/signal" "runtime" "runtime/pprof" "strings" "syscall" "time" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" cmds "github.com/jbenet/go-ipfs/commands" cmdsCli "github.com/jbenet/go-ipfs/commands/cli" cmdsHttp "github.com/jbenet/go-ipfs/commands/http" core "github.com/jbenet/go-ipfs/core" config "github.com/jbenet/go-ipfs/repo/config" fsrepo "github.com/jbenet/go-ipfs/repo/fsrepo" eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" u "github.com/jbenet/go-ipfs/util" "github.com/jbenet/go-ipfs/util/debugerror" ) // log is the command logger var log = eventlog.Logger("cmd/ipfs") // signal to output help var errHelpRequested = errors.New("Help Requested") const ( cpuProfile = "ipfs.cpuprof" heapProfile = "ipfs.memprof" errorFormat = "ERROR: %v\n\n" ) type cmdInvocation struct { path []string cmd *cmds.Command req cmds.Request node *core.IpfsNode } // main roadmap: // - parse the commandline to get a cmdInvocation // - if user requests, help, print it and exit. // - run the command invocation // - output the response // - if anything fails, print error, maybe with help func main() { rand.Seed(time.Now().UnixNano()) runtime.GOMAXPROCS(3) // FIXME rm arbitrary choice for n ctx := eventlog.ContextWithLoggable(context.Background(), eventlog.Uuid("session")) var err error var invoc cmdInvocation defer invoc.close() // we'll call this local helper to output errors. // this is so we control how to print errors in one place. printErr := func(err error) { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) } stopFunc, err := profileIfEnabled() if err != nil { printErr(err) os.Exit(1) } defer stopFunc() // to be executed as late as possible // this is a local helper to print out help text. // there's some considerations that this makes easier. printHelp := func(long bool, w io.Writer) { helpFunc := cmdsCli.ShortHelp if long { helpFunc = cmdsCli.LongHelp } helpFunc("ipfs", Root, invoc.path, w) } // this is a message to tell the user how to get the help text printMetaHelp := func(w io.Writer) { cmdPath := strings.Join(invoc.path, " ") fmt.Fprintf(w, "Use 'ipfs %s --help' for information about this command\n", cmdPath) } // parse the commandline into a command invocation parseErr := invoc.Parse(ctx, os.Args[1:]) // BEFORE handling the parse error, if we have enough information // AND the user requested help, print it out and exit if invoc.req != nil { longH, shortH, err := invoc.requestedHelp() if err != nil { printErr(err) os.Exit(1) } if longH || shortH { printHelp(longH, os.Stdout) os.Exit(0) } } // here we handle the cases where // - commands with no Run func are invoked directly. // - the main command is invoked. if invoc.cmd == nil || invoc.cmd.Run == nil { printHelp(false, os.Stdout) os.Exit(0) } // ok now handle parse error (which means cli input was wrong, // e.g. incorrect number of args, or nonexistent subcommand) if parseErr != nil { printErr(parseErr) // this was a user error, print help. if invoc.cmd != nil { // we need a newline space. fmt.Fprintf(os.Stderr, "\n") printMetaHelp(os.Stderr) } os.Exit(1) } // ok, finally, run the command invocation. output, err := invoc.Run(ctx) if err != nil { printErr(err) // if this error was a client error, print short help too. if isClientError(err) { printMetaHelp(os.Stderr) } os.Exit(1) } // everything went better than expected :) io.Copy(os.Stdout, output) } func (i *cmdInvocation) Run(ctx context.Context) (output io.Reader, err error) { // setup our global interrupt handler. i.setupInterruptHandler() // check if user wants to debug. option OR env var. debug, _, err := i.req.Option("debug").Bool() if err != nil { return nil, err } if debug || u.GetenvBool("DEBUG") || os.Getenv("IPFS_LOGGING") == "debug" { u.Debug = true u.SetDebugLogging() } res, err := callCommand(ctx, i.req, Root, i.cmd) if err != nil { return nil, err } if err := res.Error(); err != nil { return nil, err } return res.Reader() } func (i *cmdInvocation) constructNodeFunc(ctx context.Context) func() (*core.IpfsNode, error) { return func() (*core.IpfsNode, error) { if i.req == nil { return nil, errors.New("constructing node without a request") } cmdctx := i.req.Context() if cmdctx == nil { return nil, errors.New("constructing node without a request context") } r := fsrepo.At(i.req.Context().ConfigRoot) if err := r.Open(); err != nil { // repo is owned by the node return nil, err } // ok everything is good. set it on the invocation (for ownership) // and return it. n, err := core.NewIPFSNode(ctx, core.Standard(r, cmdctx.Online)) if err != nil { return nil, err } i.node = n return i.node, nil } } func (i *cmdInvocation) close() { // let's not forget teardown. If a node was initialized, we must close it. // Note that this means the underlying req.Context().Node variable is exposed. // this is gross, and should be changed when we extract out the exec Context. if i.node != nil { log.Info("Shutting down node...") i.node.Close() } } func (i *cmdInvocation) Parse(ctx context.Context, args []string) error { var err error i.req, i.cmd, i.path, err = cmdsCli.Parse(args, os.Stdin, Root) if err != nil { return err } i.req.Context().Context = ctx repoPath, err := getRepoPath(i.req) if err != nil { return err } log.Debugf("config path is %s", repoPath) // this sets up the function that will initialize the config lazily. cmdctx := i.req.Context() cmdctx.ConfigRoot = repoPath cmdctx.LoadConfig = loadConfig // this sets up the function that will initialize the node // this is so that we can construct the node lazily. cmdctx.ConstructNode = i.constructNodeFunc(ctx) // if no encoding was specified by user, default to plaintext encoding // (if command doesn't support plaintext, use JSON instead) if !i.req.Option("encoding").Found() { if i.req.Command().Marshalers != nil && i.req.Command().Marshalers[cmds.Text] != nil { i.req.SetOption("encoding", cmds.Text) } else { i.req.SetOption("encoding", cmds.JSON) } } return nil } func (i *cmdInvocation) requestedHelp() (short bool, long bool, err error) { longHelp, _, err := i.req.Option("help").Bool() if err != nil { return false, false, err } shortHelp, _, err := i.req.Option("h").Bool() if err != nil { return false, false, err } return longHelp, shortHelp, nil } func callPreCommandHooks(ctx context.Context, details cmdDetails, req cmds.Request, root *cmds.Command) error { log.Event(ctx, "callPreCommandHooks", &details) log.Debug("Calling pre-command hooks...") return nil } func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd *cmds.Command) (cmds.Response, error) { log.Info(config.EnvDir, req.Context().ConfigRoot) var res cmds.Response details, err := commandDetails(req.Path(), root) if err != nil { return nil, err } log.Info("looking for running daemon...") useDaemon, err := commandShouldRunOnDaemon(*details, req, root) if err != nil { return nil, err } err = callPreCommandHooks(ctx, *details, req, root) if err != nil { return nil, err } if cmd.PreRun != nil { err = cmd.PreRun(req) if err != nil { return nil, err } } if useDaemon { cfg, err := req.Context().GetConfig() if err != nil { return nil, err } addr, err := ma.NewMultiaddr(cfg.Addresses.API) if err != nil { return nil, err } log.Infof("Executing command on daemon running at %s", addr) _, host, err := manet.DialArgs(addr) if err != nil { return nil, err } client := cmdsHttp.NewClient(host) res, err = client.Send(req) if err != nil { return nil, err } } else { log.Info("Executing command locally") // Okay!!!!! NOW we can call the command. res = root.Call(req) } if cmd.PostRun != nil { cmd.PostRun(req, res) } return res, nil } // commandDetails returns a command's details for the command given by |path| // within the |root| command tree. // // Returns an error if the command is not found in the Command tree. func commandDetails(path []string, root *cmds.Command) (*cmdDetails, error) { var details cmdDetails // find the last command in path that has a cmdDetailsMap entry cmd := root for _, cmp := range path { var found bool cmd, found = cmd.Subcommands[cmp] if !found { return nil, debugerror.Errorf("subcommand %s should be in root", cmp) } if cmdDetails, found := cmdDetailsMap[cmd]; found { details = cmdDetails } } return &details, nil } // commandShouldRunOnDaemon determines, from commmand details, whether a // command ought to be executed on an IPFS daemon. // // It returns true if the command should be executed on a daemon and false if // it should be executed on a client. It returns an error if the command must // NOT be executed on either. func commandShouldRunOnDaemon(details cmdDetails, req cmds.Request, root *cmds.Command) (bool, error) { path := req.Path() // root command. if len(path) < 1 { return false, nil } if details.cannotRunOnClient && details.cannotRunOnDaemon { return false, fmt.Errorf("command disabled: %s", path[0]) } if details.doesNotUseRepo && details.canRunOnClient() { return false, nil } // at this point need to know whether daemon is running. we defer // to this point so that some commands dont open files unnecessarily. daemonLocked := fsrepo.LockedByOtherProcess(req.Context().ConfigRoot) if daemonLocked { log.Info("a daemon is running...") if details.cannotRunOnDaemon { e := "ipfs daemon is running. please stop it to run this command" return false, cmds.ClientError(e) } return true, nil } if details.cannotRunOnClient { return false, cmds.ClientError("must run on the ipfs daemon") } return false, nil } func isClientError(err error) bool { // Somewhat suprisingly, the pointer cast fails to recognize commands.Error // passed as values, so we check both. // cast to cmds.Error switch e := err.(type) { case *cmds.Error: return e.Code == cmds.ErrClient case cmds.Error: return e.Code == cmds.ErrClient } return false } func getRepoPath(req cmds.Request) (string, error) { repoOpt, found, err := req.Option("config").String() if err != nil { return "", err } if found && repoOpt != "" { return repoOpt, nil } repoPath, err := fsrepo.BestKnownPath() if err != nil { return "", err } return repoPath, nil } func loadConfig(path string) (*config.Config, error) { return fsrepo.ConfigAt(path) } // startProfiling begins CPU profiling and returns a `stop` function to be // executed as late as possible. The stop function captures the memprofile. func startProfiling() (func(), error) { // start CPU profiling as early as possible ofi, err := os.Create(cpuProfile) if err != nil { return nil, err } pprof.StartCPUProfile(ofi) stopProfiling := func() { pprof.StopCPUProfile() defer ofi.Close() // captured by the closure err := writeHeapProfileToFile() if err != nil { log.Critical(err) } } return stopProfiling, nil } func writeHeapProfileToFile() error { mprof, err := os.Create(heapProfile) if err != nil { return err } defer mprof.Close() // _after_ writing the heap profile return pprof.WriteHeapProfile(mprof) } // listen for and handle SIGTERM func (i *cmdInvocation) setupInterruptHandler() { ctx := i.req.Context() sig := allInterruptSignals() go func() { // first time, try to shut down. // loop because we may be for count := 0; ; count++ { <-sig // TODO cancel the command context instead n, err := ctx.GetNode() if err != nil { log.Error(err) log.Critical("Received interrupt signal, terminating...") os.Exit(-1) } switch count { case 0: log.Critical("Received interrupt signal, shutting down...") go func() { n.Close() log.Info("Gracefully shut down.") }() default: log.Critical("Received another interrupt before graceful shutdown, terminating...") os.Exit(-1) } } }() } func allInterruptSignals() chan os.Signal { sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) return sigc } func profileIfEnabled() (func(), error) { // FIXME this is a temporary hack so profiling of asynchronous operations // works as intended. if u.GetenvBool("DEBUG") || os.Getenv("IPFS_LOGGING") == "debug" { u.Debug = true u.SetDebugLogging() stopProfilingFunc, err := startProfiling() // TODO maybe change this to its own option... profiling makes it slower. if err != nil { return nil, err } return stopProfilingFunc, nil } return func() {}, nil } feat(main): change pprof flag to IPFS_PROF @jbenet @whyrusleeping thoughts? package main import ( "errors" "fmt" "io" "math/rand" "os" "os/signal" "runtime" "runtime/pprof" "strings" "syscall" "time" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" cmds "github.com/jbenet/go-ipfs/commands" cmdsCli "github.com/jbenet/go-ipfs/commands/cli" cmdsHttp "github.com/jbenet/go-ipfs/commands/http" core "github.com/jbenet/go-ipfs/core" config "github.com/jbenet/go-ipfs/repo/config" fsrepo "github.com/jbenet/go-ipfs/repo/fsrepo" eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" u "github.com/jbenet/go-ipfs/util" "github.com/jbenet/go-ipfs/util/debugerror" ) // log is the command logger var log = eventlog.Logger("cmd/ipfs") // signal to output help var errHelpRequested = errors.New("Help Requested") const ( EnvEnableProfiling = "IPFS_PROF" cpuProfile = "ipfs.cpuprof" heapProfile = "ipfs.memprof" errorFormat = "ERROR: %v\n\n" ) type cmdInvocation struct { path []string cmd *cmds.Command req cmds.Request node *core.IpfsNode } // main roadmap: // - parse the commandline to get a cmdInvocation // - if user requests, help, print it and exit. // - run the command invocation // - output the response // - if anything fails, print error, maybe with help func main() { rand.Seed(time.Now().UnixNano()) runtime.GOMAXPROCS(3) // FIXME rm arbitrary choice for n ctx := eventlog.ContextWithLoggable(context.Background(), eventlog.Uuid("session")) var err error var invoc cmdInvocation defer invoc.close() // we'll call this local helper to output errors. // this is so we control how to print errors in one place. printErr := func(err error) { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) } stopFunc, err := profileIfEnabled() if err != nil { printErr(err) os.Exit(1) } defer stopFunc() // to be executed as late as possible // this is a local helper to print out help text. // there's some considerations that this makes easier. printHelp := func(long bool, w io.Writer) { helpFunc := cmdsCli.ShortHelp if long { helpFunc = cmdsCli.LongHelp } helpFunc("ipfs", Root, invoc.path, w) } // this is a message to tell the user how to get the help text printMetaHelp := func(w io.Writer) { cmdPath := strings.Join(invoc.path, " ") fmt.Fprintf(w, "Use 'ipfs %s --help' for information about this command\n", cmdPath) } // parse the commandline into a command invocation parseErr := invoc.Parse(ctx, os.Args[1:]) // BEFORE handling the parse error, if we have enough information // AND the user requested help, print it out and exit if invoc.req != nil { longH, shortH, err := invoc.requestedHelp() if err != nil { printErr(err) os.Exit(1) } if longH || shortH { printHelp(longH, os.Stdout) os.Exit(0) } } // here we handle the cases where // - commands with no Run func are invoked directly. // - the main command is invoked. if invoc.cmd == nil || invoc.cmd.Run == nil { printHelp(false, os.Stdout) os.Exit(0) } // ok now handle parse error (which means cli input was wrong, // e.g. incorrect number of args, or nonexistent subcommand) if parseErr != nil { printErr(parseErr) // this was a user error, print help. if invoc.cmd != nil { // we need a newline space. fmt.Fprintf(os.Stderr, "\n") printMetaHelp(os.Stderr) } os.Exit(1) } // ok, finally, run the command invocation. output, err := invoc.Run(ctx) if err != nil { printErr(err) // if this error was a client error, print short help too. if isClientError(err) { printMetaHelp(os.Stderr) } os.Exit(1) } // everything went better than expected :) io.Copy(os.Stdout, output) } func (i *cmdInvocation) Run(ctx context.Context) (output io.Reader, err error) { // setup our global interrupt handler. i.setupInterruptHandler() // check if user wants to debug. option OR env var. debug, _, err := i.req.Option("debug").Bool() if err != nil { return nil, err } if debug || u.GetenvBool("DEBUG") || os.Getenv("IPFS_LOGGING") == "debug" { u.Debug = true u.SetDebugLogging() } res, err := callCommand(ctx, i.req, Root, i.cmd) if err != nil { return nil, err } if err := res.Error(); err != nil { return nil, err } return res.Reader() } func (i *cmdInvocation) constructNodeFunc(ctx context.Context) func() (*core.IpfsNode, error) { return func() (*core.IpfsNode, error) { if i.req == nil { return nil, errors.New("constructing node without a request") } cmdctx := i.req.Context() if cmdctx == nil { return nil, errors.New("constructing node without a request context") } r := fsrepo.At(i.req.Context().ConfigRoot) if err := r.Open(); err != nil { // repo is owned by the node return nil, err } // ok everything is good. set it on the invocation (for ownership) // and return it. n, err := core.NewIPFSNode(ctx, core.Standard(r, cmdctx.Online)) if err != nil { return nil, err } i.node = n return i.node, nil } } func (i *cmdInvocation) close() { // let's not forget teardown. If a node was initialized, we must close it. // Note that this means the underlying req.Context().Node variable is exposed. // this is gross, and should be changed when we extract out the exec Context. if i.node != nil { log.Info("Shutting down node...") i.node.Close() } } func (i *cmdInvocation) Parse(ctx context.Context, args []string) error { var err error i.req, i.cmd, i.path, err = cmdsCli.Parse(args, os.Stdin, Root) if err != nil { return err } i.req.Context().Context = ctx repoPath, err := getRepoPath(i.req) if err != nil { return err } log.Debugf("config path is %s", repoPath) // this sets up the function that will initialize the config lazily. cmdctx := i.req.Context() cmdctx.ConfigRoot = repoPath cmdctx.LoadConfig = loadConfig // this sets up the function that will initialize the node // this is so that we can construct the node lazily. cmdctx.ConstructNode = i.constructNodeFunc(ctx) // if no encoding was specified by user, default to plaintext encoding // (if command doesn't support plaintext, use JSON instead) if !i.req.Option("encoding").Found() { if i.req.Command().Marshalers != nil && i.req.Command().Marshalers[cmds.Text] != nil { i.req.SetOption("encoding", cmds.Text) } else { i.req.SetOption("encoding", cmds.JSON) } } return nil } func (i *cmdInvocation) requestedHelp() (short bool, long bool, err error) { longHelp, _, err := i.req.Option("help").Bool() if err != nil { return false, false, err } shortHelp, _, err := i.req.Option("h").Bool() if err != nil { return false, false, err } return longHelp, shortHelp, nil } func callPreCommandHooks(ctx context.Context, details cmdDetails, req cmds.Request, root *cmds.Command) error { log.Event(ctx, "callPreCommandHooks", &details) log.Debug("Calling pre-command hooks...") return nil } func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd *cmds.Command) (cmds.Response, error) { log.Info(config.EnvDir, req.Context().ConfigRoot) var res cmds.Response details, err := commandDetails(req.Path(), root) if err != nil { return nil, err } log.Info("looking for running daemon...") useDaemon, err := commandShouldRunOnDaemon(*details, req, root) if err != nil { return nil, err } err = callPreCommandHooks(ctx, *details, req, root) if err != nil { return nil, err } if cmd.PreRun != nil { err = cmd.PreRun(req) if err != nil { return nil, err } } if useDaemon { cfg, err := req.Context().GetConfig() if err != nil { return nil, err } addr, err := ma.NewMultiaddr(cfg.Addresses.API) if err != nil { return nil, err } log.Infof("Executing command on daemon running at %s", addr) _, host, err := manet.DialArgs(addr) if err != nil { return nil, err } client := cmdsHttp.NewClient(host) res, err = client.Send(req) if err != nil { return nil, err } } else { log.Info("Executing command locally") // Okay!!!!! NOW we can call the command. res = root.Call(req) } if cmd.PostRun != nil { cmd.PostRun(req, res) } return res, nil } // commandDetails returns a command's details for the command given by |path| // within the |root| command tree. // // Returns an error if the command is not found in the Command tree. func commandDetails(path []string, root *cmds.Command) (*cmdDetails, error) { var details cmdDetails // find the last command in path that has a cmdDetailsMap entry cmd := root for _, cmp := range path { var found bool cmd, found = cmd.Subcommands[cmp] if !found { return nil, debugerror.Errorf("subcommand %s should be in root", cmp) } if cmdDetails, found := cmdDetailsMap[cmd]; found { details = cmdDetails } } return &details, nil } // commandShouldRunOnDaemon determines, from commmand details, whether a // command ought to be executed on an IPFS daemon. // // It returns true if the command should be executed on a daemon and false if // it should be executed on a client. It returns an error if the command must // NOT be executed on either. func commandShouldRunOnDaemon(details cmdDetails, req cmds.Request, root *cmds.Command) (bool, error) { path := req.Path() // root command. if len(path) < 1 { return false, nil } if details.cannotRunOnClient && details.cannotRunOnDaemon { return false, fmt.Errorf("command disabled: %s", path[0]) } if details.doesNotUseRepo && details.canRunOnClient() { return false, nil } // at this point need to know whether daemon is running. we defer // to this point so that some commands dont open files unnecessarily. daemonLocked := fsrepo.LockedByOtherProcess(req.Context().ConfigRoot) if daemonLocked { log.Info("a daemon is running...") if details.cannotRunOnDaemon { e := "ipfs daemon is running. please stop it to run this command" return false, cmds.ClientError(e) } return true, nil } if details.cannotRunOnClient { return false, cmds.ClientError("must run on the ipfs daemon") } return false, nil } func isClientError(err error) bool { // Somewhat suprisingly, the pointer cast fails to recognize commands.Error // passed as values, so we check both. // cast to cmds.Error switch e := err.(type) { case *cmds.Error: return e.Code == cmds.ErrClient case cmds.Error: return e.Code == cmds.ErrClient } return false } func getRepoPath(req cmds.Request) (string, error) { repoOpt, found, err := req.Option("config").String() if err != nil { return "", err } if found && repoOpt != "" { return repoOpt, nil } repoPath, err := fsrepo.BestKnownPath() if err != nil { return "", err } return repoPath, nil } func loadConfig(path string) (*config.Config, error) { return fsrepo.ConfigAt(path) } // startProfiling begins CPU profiling and returns a `stop` function to be // executed as late as possible. The stop function captures the memprofile. func startProfiling() (func(), error) { // start CPU profiling as early as possible ofi, err := os.Create(cpuProfile) if err != nil { return nil, err } pprof.StartCPUProfile(ofi) stopProfiling := func() { pprof.StopCPUProfile() defer ofi.Close() // captured by the closure err := writeHeapProfileToFile() if err != nil { log.Critical(err) } } return stopProfiling, nil } func writeHeapProfileToFile() error { mprof, err := os.Create(heapProfile) if err != nil { return err } defer mprof.Close() // _after_ writing the heap profile return pprof.WriteHeapProfile(mprof) } // listen for and handle SIGTERM func (i *cmdInvocation) setupInterruptHandler() { ctx := i.req.Context() sig := allInterruptSignals() go func() { // first time, try to shut down. // loop because we may be for count := 0; ; count++ { <-sig // TODO cancel the command context instead n, err := ctx.GetNode() if err != nil { log.Error(err) log.Critical("Received interrupt signal, terminating...") os.Exit(-1) } switch count { case 0: log.Critical("Received interrupt signal, shutting down...") go func() { n.Close() log.Info("Gracefully shut down.") }() default: log.Critical("Received another interrupt before graceful shutdown, terminating...") os.Exit(-1) } } }() } func allInterruptSignals() chan os.Signal { sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) return sigc } func profileIfEnabled() (func(), error) { // FIXME this is a temporary hack so profiling of asynchronous operations // works as intended. if os.Getenv(EnvEnableProfiling) != "" { stopProfilingFunc, err := startProfiling() // TODO maybe change this to its own option... profiling makes it slower. if err != nil { return nil, err } return stopProfilingFunc, nil } return func() {}, nil }
package main import ( "fmt" "net/http" "os" "path/filepath" "regexp" "runtime/debug" "github.com/coryb/figtree" "github.com/coryb/kingpeon" "github.com/coryb/oreo" jira "gopkg.in/Netflix-Skunkworks/go-jira.v1" "gopkg.in/Netflix-Skunkworks/go-jira.v1/jiracli" kingpin "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/op/go-logging.v1" ) var ( log = logging.MustGetLogger("jira") defaultFormat = "%{color}%{time:2006-01-02T15:04:05.000Z07:00} %{level:-5s} [%{shortfile}]%{color:reset} %{message}" ) func handleExit() { if e := recover(); e != nil { if exit, ok := e.(jiracli.Exit); ok { os.Exit(exit.Code) } else { fmt.Fprintf(os.Stderr, "%s\n%s", e, debug.Stack()) os.Exit(1) } } } func main() { defer handleExit() logBackend := logging.NewLogBackend(os.Stderr, "", 0) format := os.Getenv("JIRA_LOG_FORMAT") if format == "" { format = defaultFormat } logging.SetBackend( logging.NewBackendFormatter( logBackend, logging.MustStringFormatter(format), ), ) if os.Getenv("JIRA_DEBUG") == "" { logging.SetLevel(logging.NOTICE, "") } else { logging.SetLevel(logging.DEBUG, "") } app := kingpin.New("jira", "Jira Command Line Interface") app.Command("version", "Prints version").PreAction(func(*kingpin.ParseContext) error { fmt.Println(jira.VERSION) panic(jiracli.Exit{Code: 0}) }) app.Flag("verbose", "Increase verbosity for debugging").Short('v').PreAction(func(_ *kingpin.ParseContext) error { logging.SetLevel(logging.GetLevel("")+1, "") if logging.GetLevel("") > logging.DEBUG { oreo.TraceRequestBody = true oreo.TraceResponseBody = true } return nil }).Counter() fig := figtree.NewFigTree() fig.EnvPrefix = "JIRA" fig.ConfigDir = ".jira.d" o := oreo.New().WithCookieFile(filepath.Join(jiracli.Homedir(), fig.ConfigDir, "cookies.js")) o = o.WithPostCallback( func(req *http.Request, resp *http.Response) (*http.Response, error) { if resp.Header.Get("X-Ausername") == "anonymous" { // we are not logged in, so force login now by running the "login" command app.Parse([]string{"login"}) return o.Do(req) } return resp, nil }, ) registry := []jiracli.CommandRegistry{ jiracli.CommandRegistry{ Command: "login", Entry: jiracli.CmdLoginRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "logout", Entry: jiracli.CmdLogoutRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "list", Aliases: []string{"ls"}, Entry: jiracli.CmdListRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "view", Entry: jiracli.CmdViewRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "create", Entry: jiracli.CmdCreateRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "edit", Entry: jiracli.CmdEditRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "comment", Entry: jiracli.CmdCommentRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "worklog list", Entry: jiracli.CmdWorklogListRegistry(fig, o), Default: true, }, jiracli.CommandRegistry{ Command: "worklog add", Entry: jiracli.CmdWorklogAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "fields", Entry: jiracli.CmdFieldsRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "createmeta", Entry: jiracli.CmdCreateMetaRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "editmeta", Entry: jiracli.CmdEditMetaRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "subtask", Entry: jiracli.CmdSubtaskRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "dup", Entry: jiracli.CmdDupRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "block", Entry: jiracli.CmdBlockRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "issuelink", Entry: jiracli.CmdIssueLinkRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "issuelinktypes", Entry: jiracli.CmdIssueLinkTypesRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "transition", Aliases: []string{"trans"}, Entry: jiracli.CmdTransitionRegistry(fig, o, ""), }, jiracli.CommandRegistry{ Command: "transitions", Entry: jiracli.CmdTransitionsRegistry(fig, o, "transitions"), }, jiracli.CommandRegistry{ Command: "transmeta", Entry: jiracli.CmdTransitionsRegistry(fig, o, "debug"), }, jiracli.CommandRegistry{ Command: "close", Entry: jiracli.CmdTransitionRegistry(fig, o, "close"), }, jiracli.CommandRegistry{ Command: "acknowledge", Aliases: []string{"ack"}, Entry: jiracli.CmdTransitionRegistry(fig, o, "acknowledge"), }, jiracli.CommandRegistry{ Command: "reopen", Entry: jiracli.CmdTransitionRegistry(fig, o, "reopen"), }, jiracli.CommandRegistry{ Command: "resolve", Entry: jiracli.CmdTransitionRegistry(fig, o, "resolve"), }, jiracli.CommandRegistry{ Command: "start", Entry: jiracli.CmdTransitionRegistry(fig, o, "start"), }, jiracli.CommandRegistry{ Command: "stop", Entry: jiracli.CmdTransitionRegistry(fig, o, "stop"), }, jiracli.CommandRegistry{ Command: "todo", Entry: jiracli.CmdTransitionRegistry(fig, o, "To Do"), }, jiracli.CommandRegistry{ Command: "backlog", Entry: jiracli.CmdTransitionRegistry(fig, o, "Backlog"), }, jiracli.CommandRegistry{ Command: "done", Entry: jiracli.CmdTransitionRegistry(fig, o, "Done"), }, jiracli.CommandRegistry{ Command: "in-progress", Aliases: []string{"prog", "progress"}, Entry: jiracli.CmdTransitionRegistry(fig, o, "Progress"), }, jiracli.CommandRegistry{ Command: "vote", Entry: jiracli.CmdVoteRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "rank", Entry: jiracli.CmdRankRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "watch", Entry: jiracli.CmdWatchRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "labels add", Entry: jiracli.CmdLabelsAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "labels set", Entry: jiracli.CmdLabelsAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "labels remove", Entry: jiracli.CmdLabelsAddRegistry(fig, o), Aliases: []string{"rm"}, }, jiracli.CommandRegistry{ Command: "take", Entry: jiracli.CmdTakeRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "assign", Entry: jiracli.CmdAssignRegistry(fig, o), Aliases: []string{"give"}, }, jiracli.CommandRegistry{ Command: "unassign", Entry: jiracli.CmdUnassignRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "component add", Entry: jiracli.CmdComponentAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "components", Entry: jiracli.CmdComponentsRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "issuetypes", Entry: jiracli.CmdIssueTypesRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "export-templates", Entry: jiracli.CmdExportTemplatesRegistry(fig), }, jiracli.CommandRegistry{ Command: "unexport-templates", Entry: jiracli.CmdUnexportTemplatesRegistry(fig), }, jiracli.CommandRegistry{ Command: "browse", Entry: jiracli.CmdBrowseRegistry(fig), Aliases: []string{"b"}, }, jiracli.CommandRegistry{ Command: "request", Entry: jiracli.CmdRequestRegistry(fig, o), Aliases: []string{"req"}, }, } jiracli.Register(app, registry) // register custom commands data := struct { CustomCommands kingpeon.DynamicCommands `yaml:"custom-commands" json":custom-commands"` }{} if err := fig.LoadAllConfigs("config.yml", &data); err != nil { log.Errorf("%s", err) panic(jiracli.Exit{Code: 1}) } if len(data.CustomCommands) > 0 { tmp := map[string]interface{}{} fig.LoadAllConfigs("config.yml", &tmp) kingpeon.RegisterDynamicCommands(app, data.CustomCommands, jiracli.TemplateProcessor()) } app.Terminate(func(status int) { for _, arg := range os.Args { if arg == "-h" || arg == "--help" || len(os.Args) == 1 { panic(jiracli.Exit{Code: 0}) } } panic(jiracli.Exit{Code: 1}) }) if len(os.Args) > 1 { // if first arg matches ISSUE-123 pattern then we assume it is a 'view' operation if ok, err := regexp.MatchString("^[A-Z]+-[0-9]+$", os.Args[1]); err != nil { log.Errorf("Invalid Regex: %s", err) } else if ok { // insert "view" at i=1 (2nd position) os.Args = append(os.Args[:1], append([]string{"view"}, os.Args[1:]...)...) } } if _, err := app.Parse(os.Args[1:]); err != nil { ctx, _ := app.ParseContext(os.Args[1:]) if ctx != nil { app.UsageForContext(ctx) } log.Errorf("Invalid Usage: %s", err) panic(jiracli.Exit{Code: 1}) } } when using --verbose set the JIRA_DEBUG environment variable so custom-commands can auto enable verbose output package main import ( "fmt" "net/http" "os" "path/filepath" "regexp" "runtime/debug" "strconv" "github.com/coryb/figtree" "github.com/coryb/kingpeon" "github.com/coryb/oreo" jira "gopkg.in/Netflix-Skunkworks/go-jira.v1" "gopkg.in/Netflix-Skunkworks/go-jira.v1/jiracli" kingpin "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/op/go-logging.v1" ) var ( log = logging.MustGetLogger("jira") defaultFormat = "%{color}%{time:2006-01-02T15:04:05.000Z07:00} %{level:-5s} [%{shortfile}]%{color:reset} %{message}" ) func handleExit() { if e := recover(); e != nil { if exit, ok := e.(jiracli.Exit); ok { os.Exit(exit.Code) } else { fmt.Fprintf(os.Stderr, "%s\n%s", e, debug.Stack()) os.Exit(1) } } } func increaseLogLevel(verbosity int) { logging.SetLevel(logging.GetLevel("")+logging.Level(verbosity), "") if logging.GetLevel("") > logging.DEBUG { oreo.TraceRequestBody = true oreo.TraceResponseBody = true } } func main() { defer handleExit() logBackend := logging.NewLogBackend(os.Stderr, "", 0) format := os.Getenv("JIRA_LOG_FORMAT") if format == "" { format = defaultFormat } logging.SetBackend( logging.NewBackendFormatter( logBackend, logging.MustStringFormatter(format), ), ) if os.Getenv("JIRA_DEBUG") == "" { logging.SetLevel(logging.NOTICE, "") } else { logging.SetLevel(logging.DEBUG, "") } app := kingpin.New("jira", "Jira Command Line Interface") app.Command("version", "Prints version").PreAction(func(*kingpin.ParseContext) error { fmt.Println(jira.VERSION) panic(jiracli.Exit{Code: 0}) }) var verbosity int app.Flag("verbose", "Increase verbosity for debugging").Short('v').PreAction(func(_ *kingpin.ParseContext) error { os.Setenv("JIRA_DEBUG", fmt.Sprintf("%s", verbosity)) increaseLogLevel(1) return nil }).CounterVar(&verbosity) if os.Getenv("JIRA_DEBUG") != "" { if verbosity, err := strconv.Atoi(os.Getenv("JIRA_DEBUG")); err == nil { increaseLogLevel(verbosity) } } fig := figtree.NewFigTree() fig.EnvPrefix = "JIRA" fig.ConfigDir = ".jira.d" o := oreo.New().WithCookieFile(filepath.Join(jiracli.Homedir(), fig.ConfigDir, "cookies.js")) o = o.WithPostCallback( func(req *http.Request, resp *http.Response) (*http.Response, error) { if resp.Header.Get("X-Ausername") == "anonymous" { // we are not logged in, so force login now by running the "login" command app.Parse([]string{"login"}) return o.Do(req) } return resp, nil }, ) registry := []jiracli.CommandRegistry{ jiracli.CommandRegistry{ Command: "login", Entry: jiracli.CmdLoginRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "logout", Entry: jiracli.CmdLogoutRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "list", Aliases: []string{"ls"}, Entry: jiracli.CmdListRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "view", Entry: jiracli.CmdViewRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "create", Entry: jiracli.CmdCreateRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "edit", Entry: jiracli.CmdEditRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "comment", Entry: jiracli.CmdCommentRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "worklog list", Entry: jiracli.CmdWorklogListRegistry(fig, o), Default: true, }, jiracli.CommandRegistry{ Command: "worklog add", Entry: jiracli.CmdWorklogAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "fields", Entry: jiracli.CmdFieldsRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "createmeta", Entry: jiracli.CmdCreateMetaRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "editmeta", Entry: jiracli.CmdEditMetaRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "subtask", Entry: jiracli.CmdSubtaskRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "dup", Entry: jiracli.CmdDupRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "block", Entry: jiracli.CmdBlockRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "issuelink", Entry: jiracli.CmdIssueLinkRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "issuelinktypes", Entry: jiracli.CmdIssueLinkTypesRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "transition", Aliases: []string{"trans"}, Entry: jiracli.CmdTransitionRegistry(fig, o, ""), }, jiracli.CommandRegistry{ Command: "transitions", Entry: jiracli.CmdTransitionsRegistry(fig, o, "transitions"), }, jiracli.CommandRegistry{ Command: "transmeta", Entry: jiracli.CmdTransitionsRegistry(fig, o, "debug"), }, jiracli.CommandRegistry{ Command: "close", Entry: jiracli.CmdTransitionRegistry(fig, o, "close"), }, jiracli.CommandRegistry{ Command: "acknowledge", Aliases: []string{"ack"}, Entry: jiracli.CmdTransitionRegistry(fig, o, "acknowledge"), }, jiracli.CommandRegistry{ Command: "reopen", Entry: jiracli.CmdTransitionRegistry(fig, o, "reopen"), }, jiracli.CommandRegistry{ Command: "resolve", Entry: jiracli.CmdTransitionRegistry(fig, o, "resolve"), }, jiracli.CommandRegistry{ Command: "start", Entry: jiracli.CmdTransitionRegistry(fig, o, "start"), }, jiracli.CommandRegistry{ Command: "stop", Entry: jiracli.CmdTransitionRegistry(fig, o, "stop"), }, jiracli.CommandRegistry{ Command: "todo", Entry: jiracli.CmdTransitionRegistry(fig, o, "To Do"), }, jiracli.CommandRegistry{ Command: "backlog", Entry: jiracli.CmdTransitionRegistry(fig, o, "Backlog"), }, jiracli.CommandRegistry{ Command: "done", Entry: jiracli.CmdTransitionRegistry(fig, o, "Done"), }, jiracli.CommandRegistry{ Command: "in-progress", Aliases: []string{"prog", "progress"}, Entry: jiracli.CmdTransitionRegistry(fig, o, "Progress"), }, jiracli.CommandRegistry{ Command: "vote", Entry: jiracli.CmdVoteRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "rank", Entry: jiracli.CmdRankRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "watch", Entry: jiracli.CmdWatchRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "labels add", Entry: jiracli.CmdLabelsAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "labels set", Entry: jiracli.CmdLabelsAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "labels remove", Entry: jiracli.CmdLabelsAddRegistry(fig, o), Aliases: []string{"rm"}, }, jiracli.CommandRegistry{ Command: "take", Entry: jiracli.CmdTakeRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "assign", Entry: jiracli.CmdAssignRegistry(fig, o), Aliases: []string{"give"}, }, jiracli.CommandRegistry{ Command: "unassign", Entry: jiracli.CmdUnassignRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "component add", Entry: jiracli.CmdComponentAddRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "components", Entry: jiracli.CmdComponentsRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "issuetypes", Entry: jiracli.CmdIssueTypesRegistry(fig, o), }, jiracli.CommandRegistry{ Command: "export-templates", Entry: jiracli.CmdExportTemplatesRegistry(fig), }, jiracli.CommandRegistry{ Command: "unexport-templates", Entry: jiracli.CmdUnexportTemplatesRegistry(fig), }, jiracli.CommandRegistry{ Command: "browse", Entry: jiracli.CmdBrowseRegistry(fig), Aliases: []string{"b"}, }, jiracli.CommandRegistry{ Command: "request", Entry: jiracli.CmdRequestRegistry(fig, o), Aliases: []string{"req"}, }, } jiracli.Register(app, registry) // register custom commands data := struct { CustomCommands kingpeon.DynamicCommands `yaml:"custom-commands" json":custom-commands"` }{} if err := fig.LoadAllConfigs("config.yml", &data); err != nil { log.Errorf("%s", err) panic(jiracli.Exit{Code: 1}) } if len(data.CustomCommands) > 0 { tmp := map[string]interface{}{} fig.LoadAllConfigs("config.yml", &tmp) kingpeon.RegisterDynamicCommands(app, data.CustomCommands, jiracli.TemplateProcessor()) } app.Terminate(func(status int) { for _, arg := range os.Args { if arg == "-h" || arg == "--help" || len(os.Args) == 1 { panic(jiracli.Exit{Code: 0}) } } panic(jiracli.Exit{Code: 1}) }) if len(os.Args) > 1 { // if first arg matches ISSUE-123 pattern then we assume it is a 'view' operation if ok, err := regexp.MatchString("^[A-Z]+-[0-9]+$", os.Args[1]); err != nil { log.Errorf("Invalid Regex: %s", err) } else if ok { // insert "view" at i=1 (2nd position) os.Args = append(os.Args[:1], append([]string{"view"}, os.Args[1:]...)...) } } if _, err := app.Parse(os.Args[1:]); err != nil { ctx, _ := app.ParseContext(os.Args[1:]) if ctx != nil { app.UsageForContext(ctx) } log.Errorf("Invalid Usage: %s", err) panic(jiracli.Exit{Code: 1}) } }
// Copyright (C) 2015,2016 Nippon Telegraph and Telephone Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "encoding/binary" "fmt" "io" "net" "sort" "time" log "github.com/Sirupsen/logrus" "github.com/armon/go-radix" api "github.com/osrg/gobgp/api" "github.com/osrg/gobgp/config" "github.com/osrg/gobgp/packet/bgp" "github.com/osrg/gobgp/packet/rtr" "github.com/osrg/gobgp/table" "gopkg.in/tomb.v2" ) func before(a, b uint32) bool { return int32(a-b) < 0 } type ipPrefix struct { Prefix net.IP Length uint8 } type roaBucket struct { Prefix *ipPrefix entries []*ROA } type ROA struct { Family int Prefix *ipPrefix MaxLen uint8 AS uint32 Src string } func NewROA(family int, prefixByte []byte, prefixLen uint8, maxLen uint8, as uint32, src string) *ROA { p := make([]byte, len(prefixByte)) copy(p, prefixByte) return &ROA{ Family: family, Prefix: &ipPrefix{ Prefix: p, Length: prefixLen, }, MaxLen: maxLen, AS: as, Src: src, } } func (r *ROA) Equal(roa *ROA) bool { if r.MaxLen == roa.MaxLen && r.Src == roa.Src && r.AS == roa.AS { return true } return false } func (r *ROA) toApiStruct() *api.ROA { host, port, _ := net.SplitHostPort(r.Src) return &api.ROA{ As: r.AS, Maxlen: uint32(r.MaxLen), Prefixlen: uint32(r.Prefix.Length), Prefix: r.Prefix.Prefix.String(), Conf: &api.RPKIConf{ Address: host, RemotePort: port, }, } } type roas []*api.ROA func (r roas) Len() int { return len(r) } func (r roas) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r roas) Less(i, j int) bool { r1 := r[i] r2 := r[j] if r1.Maxlen < r1.Maxlen { return true } else if r1.Maxlen > r1.Maxlen { return false } if r1.As < r2.As { return true } return false } type ROAEventType uint8 const ( CONNECTED ROAEventType = iota DISCONNECTED RTR LIFETIMEOUT ) type ROAEvent struct { EventType ROAEventType Src string Data []byte conn *net.TCPConn } type roaManager struct { AS uint32 Roas map[bgp.RouteFamily]*radix.Tree eventCh chan *ROAEvent clientMap map[string]*roaClient } func NewROAManager(as uint32) (*roaManager, error) { m := &roaManager{ AS: as, Roas: make(map[bgp.RouteFamily]*radix.Tree), } m.Roas[bgp.RF_IPv4_UC] = radix.New() m.Roas[bgp.RF_IPv6_UC] = radix.New() m.eventCh = make(chan *ROAEvent) m.clientMap = make(map[string]*roaClient) return m, nil } func (m *roaManager) SetAS(as uint32) error { if m.AS != 0 { return fmt.Errorf("AS was already configured") } m.AS = as return nil } func (m *roaManager) AddServer(host string, lifetime int64) error { if m.AS == 0 { return fmt.Errorf("AS isn't configured yet") } address, port, err := net.SplitHostPort(host) if err != nil { return err } if lifetime == 0 { lifetime = 3600 } if _, ok := m.clientMap[host]; ok { return fmt.Errorf("roa server exists %s", host) } client := NewRoaClient(address, port, m.eventCh, lifetime) m.clientMap[host] = client client.t.Go(client.tryConnect) return nil } func (m *roaManager) DeleteServer(host string) error { client, ok := m.clientMap[host] if !ok { return fmt.Errorf("roa server doesn't exists %s", host) } client.reset() delete(m.clientMap, host) return nil } func (m *roaManager) deleteAllROA(network string) { for _, tree := range m.Roas { deleteKeys := make([]string, 0, tree.Len()) tree.Walk(func(s string, v interface{}) bool { b, _ := v.(*roaBucket) newEntries := make([]*ROA, 0, len(b.entries)) for _, r := range b.entries { if r.Src != network { newEntries = append(newEntries, r) } } if len(newEntries) > 0 { b.entries = newEntries } else { deleteKeys = append(deleteKeys, s) } return false }) for _, key := range deleteKeys { tree.Delete(key) } } } func (m *roaManager) operate(op api.Operation, address string) error { for network, client := range m.clientMap { add, _, _ := net.SplitHostPort(network) if add == address { switch op { case api.Operation_ENABLE: client.enable(client.serialNumber) case api.Operation_DISABLE: case api.Operation_RESET: client.reset() case api.Operation_SOFTRESET: client.softReset() m.deleteAllROA(network) } return nil } } return fmt.Errorf("roa server not found %s", address) } func (c *roaManager) ReceiveROA() chan *ROAEvent { return c.eventCh } func (c *roaClient) lifetimeout() { c.eventCh <- &ROAEvent{ EventType: LIFETIMEOUT, Src: c.host, } } func (m *roaManager) HandleROAEvent(ev *ROAEvent) { client, y := m.clientMap[ev.Src] if !y { if ev.EventType == CONNECTED { ev.conn.Close() } log.Error("can't find %s roa server configuration", ev.Src) return } switch ev.EventType { case DISCONNECTED: log.Info("roa server is disconnected, ", ev.Src) client.state.Downtime = time.Now().Unix() // clear state client.endOfData = false client.pendingROAs = make([]*ROA, 0) client.state.RpkiMessages = config.RpkiMessages{} client.conn = nil client.t = tomb.Tomb{} client.t.Go(client.tryConnect) client.timer = time.AfterFunc(time.Duration(client.lifetime)*time.Second, client.lifetimeout) client.oldSessionID = client.sessionID case CONNECTED: log.Info("roa server is connected, ", ev.Src) client.conn = ev.conn client.state.Uptime = time.Now().Unix() client.t = tomb.Tomb{} client.t.Go(client.established) case RTR: m.handleRTRMsg(client, &client.state, ev.Data) case LIFETIMEOUT: // a) already reconnected but hasn't received // EndOfData -> needs to delete stale ROAs // b) not reconnected -> needs to delete stale ROAs // // c) already reconnected and received EndOfData so // all stale ROAs were deleted -> timer was cancelled // so should not be here. if client.oldSessionID != client.sessionID { log.Info("reconnected so ignore timeout", client.host) } else { log.Info("delete all due to timeout", client.host) m.deleteAllROA(client.host) } } } func (m *roaManager) roa2tree(roa *ROA) (*radix.Tree, string) { tree := m.Roas[bgp.RF_IPv4_UC] if roa.Family == bgp.AFI_IP6 { tree = m.Roas[bgp.RF_IPv6_UC] } return tree, table.IpToRadixkey(roa.Prefix.Prefix, roa.Prefix.Length) } func (m *roaManager) deleteROA(roa *ROA) { tree, key := m.roa2tree(roa) b, _ := tree.Get(key) if b != nil { bucket := b.(*roaBucket) newEntries := make([]*ROA, 0, len(bucket.entries)) for _, r := range bucket.entries { if !r.Equal(roa) { newEntries = append(newEntries, r) } } if len(newEntries) != len(bucket.entries) { bucket.entries = newEntries if len(newEntries) == 0 { tree.Delete(key) } return } } log.Info("can't withdraw a roa", roa.Prefix.Prefix.String(), roa.Prefix.Length, roa.AS, roa.MaxLen) } func (m *roaManager) addROA(roa *ROA) { tree, key := m.roa2tree(roa) b, _ := tree.Get(key) var bucket *roaBucket if b == nil { bucket = &roaBucket{ Prefix: roa.Prefix, entries: make([]*ROA, 0), } tree.Insert(key, bucket) } else { bucket = b.(*roaBucket) for _, r := range bucket.entries { if r.Equal(roa) { // we already have the same one return } } } bucket.entries = append(bucket.entries, roa) } func (c *roaManager) handleRTRMsg(client *roaClient, state *config.RpkiServerState, buf []byte) { received := &state.RpkiMessages.RpkiReceived m, err := rtr.ParseRTR(buf) if err == nil { switch msg := m.(type) { case *rtr.RTRSerialNotify: if before(client.serialNumber, msg.RTRCommon.SerialNumber) { client.enable(client.serialNumber) } else if client.serialNumber == msg.RTRCommon.SerialNumber { // nothing } else { // should not happen. try to get the whole ROAs. client.softReset() } received.SerialNotify++ case *rtr.RTRSerialQuery: case *rtr.RTRResetQuery: case *rtr.RTRCacheResponse: received.CacheResponse++ client.endOfData = false case *rtr.RTRIPPrefix: family := bgp.AFI_IP if msg.Type == rtr.RTR_IPV4_PREFIX { received.Ipv4Prefix++ } else { family = bgp.AFI_IP6 received.Ipv6Prefix++ } roa := NewROA(family, msg.Prefix, msg.PrefixLen, msg.MaxLen, msg.AS, client.host) if (msg.Flags & 1) == 1 { if client.endOfData { c.addROA(roa) } else { client.pendingROAs = append(client.pendingROAs, roa) } } else { c.deleteROA(roa) } case *rtr.RTREndOfData: received.EndOfData++ if client.sessionID != msg.RTRCommon.SessionID { // remove all ROAs related with the // previous session c.deleteAllROA(client.host) } client.sessionID = msg.RTRCommon.SessionID client.serialNumber = msg.RTRCommon.SerialNumber client.endOfData = true if client.timer != nil { client.timer.Stop() client.timer = nil } for _, roa := range client.pendingROAs { c.addROA(roa) } client.pendingROAs = make([]*ROA, 0) case *rtr.RTRCacheReset: client.softReset() received.CacheReset++ case *rtr.RTRErrorReport: received.Error++ } } else { log.Info("failed to parse a RTR message ", client.host, err) } } func (c *roaManager) handleGRPC(grpcReq *GrpcRequest) { switch grpcReq.RequestType { case REQ_RPKI: results := make([]*GrpcResponse, 0) f := func(tree *radix.Tree) (map[string]uint32, map[string]uint32) { records := make(map[string]uint32) prefixes := make(map[string]uint32) tree.Walk(func(s string, v interface{}) bool { b, _ := v.(*roaBucket) tmpRecords := make(map[string]uint32) for _, roa := range b.entries { tmpRecords[roa.Src]++ } for src, r := range tmpRecords { if r > 0 { records[src] += r prefixes[src]++ } } return false }) return records, prefixes } recordsV4, prefixesV4 := f(c.Roas[bgp.RF_IPv4_UC]) recordsV6, prefixesV6 := f(c.Roas[bgp.RF_IPv6_UC]) for _, client := range c.clientMap { state := client.state addr, port, _ := net.SplitHostPort(client.host) received := &state.RpkiMessages.RpkiReceived sent := client.state.RpkiMessages.RpkiSent up := true if client.conn == nil { up = false } f := func(m map[string]uint32, key string) uint32 { if r, ok := m[key]; ok { return r } return 0 } rpki := &api.RPKI{ Conf: &api.RPKIConf{ Address: addr, RemotePort: port, }, State: &api.RPKIState{ Uptime: state.Uptime, Downtime: state.Downtime, Up: up, RecordIpv4: f(recordsV4, client.host), RecordIpv6: f(recordsV6, client.host), PrefixIpv4: f(prefixesV4, client.host), PrefixIpv6: f(prefixesV6, client.host), Serial: client.serialNumber, ReceivedIpv4: received.Ipv4Prefix, ReceivedIpv6: received.Ipv6Prefix, SerialNotify: received.SerialNotify, CacheReset: received.CacheReset, CacheResponse: received.CacheResponse, EndOfData: received.EndOfData, Error: received.Error, SerialQuery: sent.SerialQuery, ResetQuery: sent.ResetQuery, }, } result := &GrpcResponse{} result.Data = rpki results = append(results, result) } go sendMultipleResponses(grpcReq, results) case REQ_ROA: if len(c.clientMap) == 0 { result := &GrpcResponse{} result.ResponseErr = fmt.Errorf("RPKI server isn't configured.") grpcReq.ResponseCh <- result break } results := make([]*GrpcResponse, 0) var rfList []bgp.RouteFamily switch grpcReq.RouteFamily { case bgp.RF_IPv4_UC: rfList = []bgp.RouteFamily{bgp.RF_IPv4_UC} case bgp.RF_IPv6_UC: rfList = []bgp.RouteFamily{bgp.RF_IPv6_UC} default: rfList = []bgp.RouteFamily{bgp.RF_IPv4_UC, bgp.RF_IPv6_UC} } for _, rf := range rfList { if tree, ok := c.Roas[rf]; ok { tree.Walk(func(s string, v interface{}) bool { b, _ := v.(*roaBucket) var roaList roas for _, r := range b.entries { roaList = append(roaList, r.toApiStruct()) } sort.Sort(roaList) for _, roa := range roaList { result := &GrpcResponse{ Data: roa, } results = append(results, result) } return false }) } } go sendMultipleResponses(grpcReq, results) } } func validatePath(ownAs uint32, tree *radix.Tree, cidr string, asPath *bgp.PathAttributeAsPath) config.RpkiValidationResultType { var as uint32 if asPath == nil || len(asPath.Value) == 0 { return config.RPKI_VALIDATION_RESULT_TYPE_NOT_FOUND } asParam := asPath.Value[len(asPath.Value)-1].(*bgp.As4PathParam) switch asParam.Type { case bgp.BGP_ASPATH_ATTR_TYPE_SEQ: if len(asParam.AS) == 0 { return config.RPKI_VALIDATION_RESULT_TYPE_NOT_FOUND } as = asParam.AS[len(asParam.AS)-1] case bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SET, bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SEQ: as = ownAs default: return config.RPKI_VALIDATION_RESULT_TYPE_NOT_FOUND } _, n, _ := net.ParseCIDR(cidr) ones, _ := n.Mask.Size() prefixLen := uint8(ones) _, b, _ := tree.LongestPrefix(table.IpToRadixkey(n.IP, prefixLen)) if b == nil { return config.RPKI_VALIDATION_RESULT_TYPE_NOT_FOUND } bucket, _ := b.(*roaBucket) for _, r := range bucket.entries { if prefixLen > r.MaxLen { continue } if r.AS == as { return config.RPKI_VALIDATION_RESULT_TYPE_VALID } } return config.RPKI_VALIDATION_RESULT_TYPE_INVALID } func (c *roaManager) validate(pathList []*table.Path) { for _, path := range pathList { if path.IsWithdraw || path.IsEOR() { continue } if tree, ok := c.Roas[path.GetRouteFamily()]; ok { r := validatePath(c.AS, tree, path.GetNlri().String(), path.GetAsPath()) path.SetValidation(config.RpkiValidationResultType(r)) } } } type roaClient struct { t tomb.Tomb host string conn *net.TCPConn state config.RpkiServerState eventCh chan *ROAEvent sessionID uint16 oldSessionID uint16 serialNumber uint32 timer *time.Timer lifetime int64 endOfData bool pendingROAs []*ROA } func NewRoaClient(address, port string, ch chan *ROAEvent, lifetime int64) *roaClient { return &roaClient{ host: net.JoinHostPort(address, port), eventCh: ch, lifetime: lifetime, pendingROAs: make([]*ROA, 0), } } func (c *roaClient) enable(serial uint32) error { if c.conn != nil { r := rtr.NewRTRSerialQuery(c.sessionID, serial) data, _ := r.Serialize() _, err := c.conn.Write(data) if err != nil { return err } c.state.RpkiMessages.RpkiSent.SerialQuery++ } return nil } func (c *roaClient) softReset() error { if c.conn != nil { r := rtr.NewRTRResetQuery() data, _ := r.Serialize() _, err := c.conn.Write(data) if err != nil { return err } c.state.RpkiMessages.RpkiSent.ResetQuery++ c.endOfData = false c.pendingROAs = make([]*ROA, 0) } return nil } func (c *roaClient) reset() { c.t.Kill(nil) if c.conn != nil { c.conn.Close() } } func (c *roaClient) tryConnect() error { for c.t.Alive() { conn, err := net.Dial("tcp", c.host) if err != nil { time.Sleep(30 * time.Second) } else { c.eventCh <- &ROAEvent{ EventType: CONNECTED, Src: c.host, conn: conn.(*net.TCPConn), } return nil } } return nil } func (c *roaClient) established() error { defer c.conn.Close() disconnected := func() { c.eventCh <- &ROAEvent{ EventType: DISCONNECTED, Src: c.host, } } err := c.softReset() if err != nil { disconnected() return nil } for { header := make([]byte, rtr.RTR_MIN_LEN) _, err := io.ReadFull(c.conn, header) if err != nil { break } totalLen := binary.BigEndian.Uint32(header[4:8]) if totalLen < rtr.RTR_MIN_LEN { break } body := make([]byte, totalLen-rtr.RTR_MIN_LEN) _, err = io.ReadFull(c.conn, body) if err != nil { break } c.eventCh <- &ROAEvent{ EventType: RTR, Src: c.host, Data: append(header, body...), } } disconnected() return nil } rpki: use the own AS number if asPath is empty Signed-off-by: FUJITA Tomonori <93dac1fe9c4b2a3957982200319981492ad4976e@lab.ntt.co.jp> // Copyright (C) 2015,2016 Nippon Telegraph and Telephone Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "encoding/binary" "fmt" "io" "net" "sort" "time" log "github.com/Sirupsen/logrus" "github.com/armon/go-radix" api "github.com/osrg/gobgp/api" "github.com/osrg/gobgp/config" "github.com/osrg/gobgp/packet/bgp" "github.com/osrg/gobgp/packet/rtr" "github.com/osrg/gobgp/table" "gopkg.in/tomb.v2" ) func before(a, b uint32) bool { return int32(a-b) < 0 } type ipPrefix struct { Prefix net.IP Length uint8 } type roaBucket struct { Prefix *ipPrefix entries []*ROA } type ROA struct { Family int Prefix *ipPrefix MaxLen uint8 AS uint32 Src string } func NewROA(family int, prefixByte []byte, prefixLen uint8, maxLen uint8, as uint32, src string) *ROA { p := make([]byte, len(prefixByte)) copy(p, prefixByte) return &ROA{ Family: family, Prefix: &ipPrefix{ Prefix: p, Length: prefixLen, }, MaxLen: maxLen, AS: as, Src: src, } } func (r *ROA) Equal(roa *ROA) bool { if r.MaxLen == roa.MaxLen && r.Src == roa.Src && r.AS == roa.AS { return true } return false } func (r *ROA) toApiStruct() *api.ROA { host, port, _ := net.SplitHostPort(r.Src) return &api.ROA{ As: r.AS, Maxlen: uint32(r.MaxLen), Prefixlen: uint32(r.Prefix.Length), Prefix: r.Prefix.Prefix.String(), Conf: &api.RPKIConf{ Address: host, RemotePort: port, }, } } type roas []*api.ROA func (r roas) Len() int { return len(r) } func (r roas) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r roas) Less(i, j int) bool { r1 := r[i] r2 := r[j] if r1.Maxlen < r1.Maxlen { return true } else if r1.Maxlen > r1.Maxlen { return false } if r1.As < r2.As { return true } return false } type ROAEventType uint8 const ( CONNECTED ROAEventType = iota DISCONNECTED RTR LIFETIMEOUT ) type ROAEvent struct { EventType ROAEventType Src string Data []byte conn *net.TCPConn } type roaManager struct { AS uint32 Roas map[bgp.RouteFamily]*radix.Tree eventCh chan *ROAEvent clientMap map[string]*roaClient } func NewROAManager(as uint32) (*roaManager, error) { m := &roaManager{ AS: as, Roas: make(map[bgp.RouteFamily]*radix.Tree), } m.Roas[bgp.RF_IPv4_UC] = radix.New() m.Roas[bgp.RF_IPv6_UC] = radix.New() m.eventCh = make(chan *ROAEvent) m.clientMap = make(map[string]*roaClient) return m, nil } func (m *roaManager) SetAS(as uint32) error { if m.AS != 0 { return fmt.Errorf("AS was already configured") } m.AS = as return nil } func (m *roaManager) AddServer(host string, lifetime int64) error { if m.AS == 0 { return fmt.Errorf("AS isn't configured yet") } address, port, err := net.SplitHostPort(host) if err != nil { return err } if lifetime == 0 { lifetime = 3600 } if _, ok := m.clientMap[host]; ok { return fmt.Errorf("roa server exists %s", host) } client := NewRoaClient(address, port, m.eventCh, lifetime) m.clientMap[host] = client client.t.Go(client.tryConnect) return nil } func (m *roaManager) DeleteServer(host string) error { client, ok := m.clientMap[host] if !ok { return fmt.Errorf("roa server doesn't exists %s", host) } client.reset() delete(m.clientMap, host) return nil } func (m *roaManager) deleteAllROA(network string) { for _, tree := range m.Roas { deleteKeys := make([]string, 0, tree.Len()) tree.Walk(func(s string, v interface{}) bool { b, _ := v.(*roaBucket) newEntries := make([]*ROA, 0, len(b.entries)) for _, r := range b.entries { if r.Src != network { newEntries = append(newEntries, r) } } if len(newEntries) > 0 { b.entries = newEntries } else { deleteKeys = append(deleteKeys, s) } return false }) for _, key := range deleteKeys { tree.Delete(key) } } } func (m *roaManager) operate(op api.Operation, address string) error { for network, client := range m.clientMap { add, _, _ := net.SplitHostPort(network) if add == address { switch op { case api.Operation_ENABLE: client.enable(client.serialNumber) case api.Operation_DISABLE: case api.Operation_RESET: client.reset() case api.Operation_SOFTRESET: client.softReset() m.deleteAllROA(network) } return nil } } return fmt.Errorf("roa server not found %s", address) } func (c *roaManager) ReceiveROA() chan *ROAEvent { return c.eventCh } func (c *roaClient) lifetimeout() { c.eventCh <- &ROAEvent{ EventType: LIFETIMEOUT, Src: c.host, } } func (m *roaManager) HandleROAEvent(ev *ROAEvent) { client, y := m.clientMap[ev.Src] if !y { if ev.EventType == CONNECTED { ev.conn.Close() } log.Error("can't find %s roa server configuration", ev.Src) return } switch ev.EventType { case DISCONNECTED: log.Info("roa server is disconnected, ", ev.Src) client.state.Downtime = time.Now().Unix() // clear state client.endOfData = false client.pendingROAs = make([]*ROA, 0) client.state.RpkiMessages = config.RpkiMessages{} client.conn = nil client.t = tomb.Tomb{} client.t.Go(client.tryConnect) client.timer = time.AfterFunc(time.Duration(client.lifetime)*time.Second, client.lifetimeout) client.oldSessionID = client.sessionID case CONNECTED: log.Info("roa server is connected, ", ev.Src) client.conn = ev.conn client.state.Uptime = time.Now().Unix() client.t = tomb.Tomb{} client.t.Go(client.established) case RTR: m.handleRTRMsg(client, &client.state, ev.Data) case LIFETIMEOUT: // a) already reconnected but hasn't received // EndOfData -> needs to delete stale ROAs // b) not reconnected -> needs to delete stale ROAs // // c) already reconnected and received EndOfData so // all stale ROAs were deleted -> timer was cancelled // so should not be here. if client.oldSessionID != client.sessionID { log.Info("reconnected so ignore timeout", client.host) } else { log.Info("delete all due to timeout", client.host) m.deleteAllROA(client.host) } } } func (m *roaManager) roa2tree(roa *ROA) (*radix.Tree, string) { tree := m.Roas[bgp.RF_IPv4_UC] if roa.Family == bgp.AFI_IP6 { tree = m.Roas[bgp.RF_IPv6_UC] } return tree, table.IpToRadixkey(roa.Prefix.Prefix, roa.Prefix.Length) } func (m *roaManager) deleteROA(roa *ROA) { tree, key := m.roa2tree(roa) b, _ := tree.Get(key) if b != nil { bucket := b.(*roaBucket) newEntries := make([]*ROA, 0, len(bucket.entries)) for _, r := range bucket.entries { if !r.Equal(roa) { newEntries = append(newEntries, r) } } if len(newEntries) != len(bucket.entries) { bucket.entries = newEntries if len(newEntries) == 0 { tree.Delete(key) } return } } log.Info("can't withdraw a roa", roa.Prefix.Prefix.String(), roa.Prefix.Length, roa.AS, roa.MaxLen) } func (m *roaManager) addROA(roa *ROA) { tree, key := m.roa2tree(roa) b, _ := tree.Get(key) var bucket *roaBucket if b == nil { bucket = &roaBucket{ Prefix: roa.Prefix, entries: make([]*ROA, 0), } tree.Insert(key, bucket) } else { bucket = b.(*roaBucket) for _, r := range bucket.entries { if r.Equal(roa) { // we already have the same one return } } } bucket.entries = append(bucket.entries, roa) } func (c *roaManager) handleRTRMsg(client *roaClient, state *config.RpkiServerState, buf []byte) { received := &state.RpkiMessages.RpkiReceived m, err := rtr.ParseRTR(buf) if err == nil { switch msg := m.(type) { case *rtr.RTRSerialNotify: if before(client.serialNumber, msg.RTRCommon.SerialNumber) { client.enable(client.serialNumber) } else if client.serialNumber == msg.RTRCommon.SerialNumber { // nothing } else { // should not happen. try to get the whole ROAs. client.softReset() } received.SerialNotify++ case *rtr.RTRSerialQuery: case *rtr.RTRResetQuery: case *rtr.RTRCacheResponse: received.CacheResponse++ client.endOfData = false case *rtr.RTRIPPrefix: family := bgp.AFI_IP if msg.Type == rtr.RTR_IPV4_PREFIX { received.Ipv4Prefix++ } else { family = bgp.AFI_IP6 received.Ipv6Prefix++ } roa := NewROA(family, msg.Prefix, msg.PrefixLen, msg.MaxLen, msg.AS, client.host) if (msg.Flags & 1) == 1 { if client.endOfData { c.addROA(roa) } else { client.pendingROAs = append(client.pendingROAs, roa) } } else { c.deleteROA(roa) } case *rtr.RTREndOfData: received.EndOfData++ if client.sessionID != msg.RTRCommon.SessionID { // remove all ROAs related with the // previous session c.deleteAllROA(client.host) } client.sessionID = msg.RTRCommon.SessionID client.serialNumber = msg.RTRCommon.SerialNumber client.endOfData = true if client.timer != nil { client.timer.Stop() client.timer = nil } for _, roa := range client.pendingROAs { c.addROA(roa) } client.pendingROAs = make([]*ROA, 0) case *rtr.RTRCacheReset: client.softReset() received.CacheReset++ case *rtr.RTRErrorReport: received.Error++ } } else { log.Info("failed to parse a RTR message ", client.host, err) } } func (c *roaManager) handleGRPC(grpcReq *GrpcRequest) { switch grpcReq.RequestType { case REQ_RPKI: results := make([]*GrpcResponse, 0) f := func(tree *radix.Tree) (map[string]uint32, map[string]uint32) { records := make(map[string]uint32) prefixes := make(map[string]uint32) tree.Walk(func(s string, v interface{}) bool { b, _ := v.(*roaBucket) tmpRecords := make(map[string]uint32) for _, roa := range b.entries { tmpRecords[roa.Src]++ } for src, r := range tmpRecords { if r > 0 { records[src] += r prefixes[src]++ } } return false }) return records, prefixes } recordsV4, prefixesV4 := f(c.Roas[bgp.RF_IPv4_UC]) recordsV6, prefixesV6 := f(c.Roas[bgp.RF_IPv6_UC]) for _, client := range c.clientMap { state := client.state addr, port, _ := net.SplitHostPort(client.host) received := &state.RpkiMessages.RpkiReceived sent := client.state.RpkiMessages.RpkiSent up := true if client.conn == nil { up = false } f := func(m map[string]uint32, key string) uint32 { if r, ok := m[key]; ok { return r } return 0 } rpki := &api.RPKI{ Conf: &api.RPKIConf{ Address: addr, RemotePort: port, }, State: &api.RPKIState{ Uptime: state.Uptime, Downtime: state.Downtime, Up: up, RecordIpv4: f(recordsV4, client.host), RecordIpv6: f(recordsV6, client.host), PrefixIpv4: f(prefixesV4, client.host), PrefixIpv6: f(prefixesV6, client.host), Serial: client.serialNumber, ReceivedIpv4: received.Ipv4Prefix, ReceivedIpv6: received.Ipv6Prefix, SerialNotify: received.SerialNotify, CacheReset: received.CacheReset, CacheResponse: received.CacheResponse, EndOfData: received.EndOfData, Error: received.Error, SerialQuery: sent.SerialQuery, ResetQuery: sent.ResetQuery, }, } result := &GrpcResponse{} result.Data = rpki results = append(results, result) } go sendMultipleResponses(grpcReq, results) case REQ_ROA: if len(c.clientMap) == 0 { result := &GrpcResponse{} result.ResponseErr = fmt.Errorf("RPKI server isn't configured.") grpcReq.ResponseCh <- result break } results := make([]*GrpcResponse, 0) var rfList []bgp.RouteFamily switch grpcReq.RouteFamily { case bgp.RF_IPv4_UC: rfList = []bgp.RouteFamily{bgp.RF_IPv4_UC} case bgp.RF_IPv6_UC: rfList = []bgp.RouteFamily{bgp.RF_IPv6_UC} default: rfList = []bgp.RouteFamily{bgp.RF_IPv4_UC, bgp.RF_IPv6_UC} } for _, rf := range rfList { if tree, ok := c.Roas[rf]; ok { tree.Walk(func(s string, v interface{}) bool { b, _ := v.(*roaBucket) var roaList roas for _, r := range b.entries { roaList = append(roaList, r.toApiStruct()) } sort.Sort(roaList) for _, roa := range roaList { result := &GrpcResponse{ Data: roa, } results = append(results, result) } return false }) } } go sendMultipleResponses(grpcReq, results) } } func validatePath(ownAs uint32, tree *radix.Tree, cidr string, asPath *bgp.PathAttributeAsPath) config.RpkiValidationResultType { var as uint32 if len(asPath.Value) == 0 { as = ownAs } else { asParam := asPath.Value[len(asPath.Value)-1].(*bgp.As4PathParam) switch asParam.Type { case bgp.BGP_ASPATH_ATTR_TYPE_SEQ: if len(asParam.AS) == 0 { as = ownAs } else { as = asParam.AS[len(asParam.AS)-1] } case bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SET, bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SEQ: as = ownAs default: return config.RPKI_VALIDATION_RESULT_TYPE_NOT_FOUND } } _, n, _ := net.ParseCIDR(cidr) ones, _ := n.Mask.Size() prefixLen := uint8(ones) _, b, _ := tree.LongestPrefix(table.IpToRadixkey(n.IP, prefixLen)) if b == nil { return config.RPKI_VALIDATION_RESULT_TYPE_NOT_FOUND } bucket, _ := b.(*roaBucket) for _, r := range bucket.entries { if prefixLen > r.MaxLen { continue } if r.AS == as { return config.RPKI_VALIDATION_RESULT_TYPE_VALID } } return config.RPKI_VALIDATION_RESULT_TYPE_INVALID } func (c *roaManager) validate(pathList []*table.Path) { for _, path := range pathList { if path.IsWithdraw || path.IsEOR() { continue } if tree, ok := c.Roas[path.GetRouteFamily()]; ok { r := validatePath(c.AS, tree, path.GetNlri().String(), path.GetAsPath()) path.SetValidation(config.RpkiValidationResultType(r)) } } } type roaClient struct { t tomb.Tomb host string conn *net.TCPConn state config.RpkiServerState eventCh chan *ROAEvent sessionID uint16 oldSessionID uint16 serialNumber uint32 timer *time.Timer lifetime int64 endOfData bool pendingROAs []*ROA } func NewRoaClient(address, port string, ch chan *ROAEvent, lifetime int64) *roaClient { return &roaClient{ host: net.JoinHostPort(address, port), eventCh: ch, lifetime: lifetime, pendingROAs: make([]*ROA, 0), } } func (c *roaClient) enable(serial uint32) error { if c.conn != nil { r := rtr.NewRTRSerialQuery(c.sessionID, serial) data, _ := r.Serialize() _, err := c.conn.Write(data) if err != nil { return err } c.state.RpkiMessages.RpkiSent.SerialQuery++ } return nil } func (c *roaClient) softReset() error { if c.conn != nil { r := rtr.NewRTRResetQuery() data, _ := r.Serialize() _, err := c.conn.Write(data) if err != nil { return err } c.state.RpkiMessages.RpkiSent.ResetQuery++ c.endOfData = false c.pendingROAs = make([]*ROA, 0) } return nil } func (c *roaClient) reset() { c.t.Kill(nil) if c.conn != nil { c.conn.Close() } } func (c *roaClient) tryConnect() error { for c.t.Alive() { conn, err := net.Dial("tcp", c.host) if err != nil { time.Sleep(30 * time.Second) } else { c.eventCh <- &ROAEvent{ EventType: CONNECTED, Src: c.host, conn: conn.(*net.TCPConn), } return nil } } return nil } func (c *roaClient) established() error { defer c.conn.Close() disconnected := func() { c.eventCh <- &ROAEvent{ EventType: DISCONNECTED, Src: c.host, } } err := c.softReset() if err != nil { disconnected() return nil } for { header := make([]byte, rtr.RTR_MIN_LEN) _, err := io.ReadFull(c.conn, header) if err != nil { break } totalLen := binary.BigEndian.Uint32(header[4:8]) if totalLen < rtr.RTR_MIN_LEN { break } body := make([]byte, totalLen-rtr.RTR_MIN_LEN) _, err = io.ReadFull(c.conn, body) if err != nil { break } c.eventCh <- &ROAEvent{ EventType: RTR, Src: c.host, Data: append(header, body...), } } disconnected() return nil }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bytes" "context" goflag "flag" "fmt" "io" "os" "path/filepath" "strings" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" "k8s.io/klog/v2" "k8s.io/kops/cmd/kops/util" kopsapi "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/client/simple" "k8s.io/kops/pkg/commands" "k8s.io/kops/pkg/commands/commandutils" "k8s.io/kubectl/pkg/util/i18n" "k8s.io/kubectl/pkg/util/templates" ) const ( validResources = ` * cluster * instancegroup * secret ` ) var ( rootLong = templates.LongDesc(i18n.T(` kOps is Kubernetes Operations. kOps is the easiest way to get a production grade Kubernetes cluster up and running. We like to think of it as kubectl for clusters. kOps helps you create, destroy, upgrade and maintain production-grade, highly available, Kubernetes clusters from the command line. AWS (Amazon Web Services) is currently officially supported, with GCE and OpenStack in beta support. `)) rootShort = i18n.T(`kOps is Kubernetes Operations.`) ) type RootCmd struct { util.FactoryOptions factory *util.Factory configFile string clusterName string cobraCommand *cobra.Command } var _ commandutils.Factory = &RootCmd{} var rootCommand = RootCmd{ cobraCommand: &cobra.Command{ Use: "kops", Short: rootShort, Long: rootLong, }, } func Execute() { goflag.Set("logtostderr", "true") goflag.CommandLine.Parse([]string{}) if err := rootCommand.cobraCommand.Execute(); err != nil { exitWithError(err) } } func init() { cobra.OnInitialize(initConfig) klog.InitFlags(nil) factory := util.NewFactory(&rootCommand.FactoryOptions) rootCommand.factory = factory NewCmdRoot(factory, os.Stdout) } func NewCmdRoot(f *util.Factory, out io.Writer) *cobra.Command { cmd := rootCommand.cobraCommand //cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine) goflag.CommandLine.VisitAll(func(goflag *goflag.Flag) { switch goflag.Name { case "cloud-provider-gce-lb-src-cidrs": case "cloud-provider-gce-l7lb-src-cidrs": // Skip; these is dragged in by the google cloudprovider dependency default: cmd.PersistentFlags().AddGoFlag(goflag) } }) cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "yaml config file (default is $HOME/.kops.yaml)") viper.BindPFlag("config", cmd.PersistentFlags().Lookup("config")) viper.SetDefault("config", "$HOME/.kops.yaml") cmd.RegisterFlagCompletionFunc("config", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return []string{"yaml", "json"}, cobra.ShellCompDirectiveFilterFileExt }) cmd.PersistentFlags().StringVar(&rootCommand.RegistryPath, "state", "", "Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable") viper.BindPFlag("KOPS_STATE_STORE", cmd.PersistentFlags().Lookup("state")) viper.BindEnv("KOPS_STATE_STORE") // TODO implement completion against VFS defaultClusterName := os.Getenv("KOPS_CLUSTER_NAME") cmd.PersistentFlags().StringVarP(&rootCommand.clusterName, "name", "", defaultClusterName, "Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable") cmd.RegisterFlagCompletionFunc("name", commandutils.CompleteClusterName(&rootCommand, false)) // create subcommands cmd.AddCommand(NewCmdCreate(f, out)) cmd.AddCommand(NewCmdDelete(f, out)) cmd.AddCommand(NewCmdDistrust(f, out)) cmd.AddCommand(NewCmdEdit(f, out)) cmd.AddCommand(NewCmdExport(f, out)) cmd.AddCommand(NewCmdGet(f, out)) cmd.AddCommand(commands.NewCmdHelpers(f, out)) cmd.AddCommand(NewCmdPromote(f, out)) cmd.AddCommand(NewCmdUpdate(f, out)) cmd.AddCommand(NewCmdReplace(f, out)) cmd.AddCommand(NewCmdRollingUpdate(f, out)) cmd.AddCommand(NewCmdSet(f, out)) cmd.AddCommand(NewCmdToolbox(f, out)) cmd.AddCommand(NewCmdUnset(f, out)) cmd.AddCommand(NewCmdUpgrade(f, out)) cmd.AddCommand(NewCmdValidate(f, out)) cmd.AddCommand(NewCmdVersion(f, out)) return cmd } // initConfig reads in config file and ENV variables if set. func initConfig() { // Config file precedence: --config flag, ${HOME}/.kops.yaml ${HOME}/.kops/config configFile := rootCommand.configFile if configFile == "" { home := homedir.HomeDir() configPaths := []string{ filepath.Join(home, ".kops.yaml"), filepath.Join(home, ".kops", "config"), } for _, p := range configPaths { _, err := os.Stat(p) if err == nil { configFile = p break } else if !os.IsNotExist(err) { klog.V(2).Infof("error checking for file %s: %v", p, err) } } } if configFile != "" { viper.SetConfigFile(configFile) viper.SetConfigType("yaml") if err := viper.ReadInConfig(); err != nil { klog.Warningf("error reading config: %v", err) } } rootCommand.RegistryPath = viper.GetString("KOPS_STATE_STORE") // Tolerate multiple slashes at end rootCommand.RegistryPath = strings.TrimSuffix(rootCommand.RegistryPath, "/") } func (c *RootCmd) AddCommand(cmd *cobra.Command) { c.cobraCommand.AddCommand(cmd) } func (c *RootCmd) clusterNameArgs(clusterName *string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { if err := c.ProcessArgs(args); err != nil { return err } *clusterName = c.ClusterName(true) if *clusterName == "" { return fmt.Errorf("--name is required") } return nil } } func (c *RootCmd) clusterNameArgsNoKubeconfig(clusterName *string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { if err := c.ProcessArgs(args); err != nil { return err } *clusterName = c.clusterName if *clusterName == "" { return fmt.Errorf("--name is required") } return nil } } // ProcessArgs will parse the positional args. It assumes one of these formats: // * <no arguments at all> // * <clustername> (and --name not specified) // Everything else is an error. func (c *RootCmd) ProcessArgs(args []string) error { if len(args) == 0 { return nil } if len(args) == 1 { // Assume <clustername> if c.clusterName == "" { c.clusterName = args[0] return nil } } fmt.Printf("\nFound multiple arguments which look like a cluster name\n") if c.clusterName != "" { fmt.Printf("\t%q (via flag)\n", c.clusterName) } for _, arg := range args { fmt.Printf("\t%q (as argument)\n", arg) } fmt.Printf("\n") fmt.Printf("This often happens if you specify an argument to a boolean flag without using =\n") fmt.Printf("For example: use `--bastion=true` or `--bastion`, not `--bastion true`\n\n") if len(args) == 1 { return fmt.Errorf("cannot specify cluster via --name and positional argument") } return fmt.Errorf("expected a single <clustername> to be passed as an argument") } func (c *RootCmd) ClusterName(verbose bool) string { if c.clusterName != "" { return c.clusterName } // Read from kubeconfig pathOptions := clientcmd.NewDefaultPathOptions() config, err := pathOptions.GetStartingConfig() if err != nil { klog.Warningf("error reading kubecfg: %v", err) } else if config.CurrentContext == "" { klog.Warningf("no context set in kubecfg") } else { context := config.Contexts[config.CurrentContext] if context == nil { klog.Warningf("context %q in kubecfg not found", config.CurrentContext) } else if context.Cluster == "" { klog.Warningf("context %q in kubecfg did not have a cluster", config.CurrentContext) } else { if verbose { fmt.Fprintf(os.Stderr, "Using cluster from kubectl context: %s\n\n", context.Cluster) } c.clusterName = context.Cluster } } return c.clusterName } func (c *RootCmd) Clientset() (simple.Clientset, error) { return c.factory.Clientset() } func (c *RootCmd) Cluster(ctx context.Context) (*kopsapi.Cluster, error) { clusterName := c.ClusterName(true) if clusterName == "" { return nil, fmt.Errorf("--name is required") } return GetCluster(ctx, c.factory, clusterName) } func GetCluster(ctx context.Context, factory commandutils.Factory, clusterName string) (*kopsapi.Cluster, error) { if clusterName == "" { return nil, field.Required(field.NewPath("clusterName"), "Cluster name is required") } clientset, err := factory.Clientset() if err != nil { return nil, err } cluster, err := clientset.GetCluster(ctx, clusterName) if err != nil { return nil, fmt.Errorf("error reading cluster configuration: %v", err) } if cluster == nil { return nil, fmt.Errorf("cluster %q not found", clusterName) } if clusterName != cluster.ObjectMeta.Name { return nil, fmt.Errorf("cluster name did not match expected name: %v vs %v", clusterName, cluster.ObjectMeta.Name) } return cluster, nil } func GetClusterNameForCompletionNoKubeconfig(clusterArgs []string) (clusterName string, completions []string, directive cobra.ShellCompDirective) { if len(clusterArgs) > 0 { return clusterArgs[0], nil, 0 } if rootCommand.clusterName != "" { return rootCommand.clusterName, nil, 0 } return "", []string{"--name"}, cobra.ShellCompDirectiveNoFileComp } func GetClusterForCompletion(ctx context.Context, factory commandutils.Factory, clusterArgs []string) (cluster *kopsapi.Cluster, clientSet simple.Clientset, completions []string, directive cobra.ShellCompDirective) { clusterName := "" if len(clusterArgs) > 0 { clusterName = clusterArgs[0] } else { clusterName = rootCommand.ClusterName(false) } if clusterName == "" { return nil, nil, []string{"--name"}, cobra.ShellCompDirectiveNoFileComp } cluster, err := GetCluster(ctx, &rootCommand, clusterName) if err != nil { completions, directive := commandutils.CompletionError("getting cluster", err) return nil, nil, completions, directive } clientSet, err = rootCommand.Clientset() if err != nil { completions, directive := commandutils.CompletionError("getting clientset", err) return nil, nil, completions, directive } return cluster, clientSet, nil, 0 } // ConsumeStdin reads all the bytes available from stdin func ConsumeStdin() ([]byte, error) { file := os.Stdin buf := new(bytes.Buffer) _, err := buf.ReadFrom(file) if err != nil { return nil, fmt.Errorf("error reading stdin: %v", err) } return buf.Bytes(), nil } Suppress usage for errors returned from RunE /* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bytes" "context" goflag "flag" "fmt" "io" "os" "path/filepath" "strings" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" "k8s.io/klog/v2" "k8s.io/kops/cmd/kops/util" kopsapi "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/client/simple" "k8s.io/kops/pkg/commands" "k8s.io/kops/pkg/commands/commandutils" "k8s.io/kubectl/pkg/util/i18n" "k8s.io/kubectl/pkg/util/templates" ) const ( validResources = ` * cluster * instancegroup * secret ` ) var ( rootLong = templates.LongDesc(i18n.T(` kOps is Kubernetes Operations. kOps is the easiest way to get a production grade Kubernetes cluster up and running. We like to think of it as kubectl for clusters. kOps helps you create, destroy, upgrade and maintain production-grade, highly available, Kubernetes clusters from the command line. AWS (Amazon Web Services) is currently officially supported, with GCE and OpenStack in beta support. `)) rootShort = i18n.T(`kOps is Kubernetes Operations.`) ) type RootCmd struct { util.FactoryOptions factory *util.Factory configFile string clusterName string cobraCommand *cobra.Command } var _ commandutils.Factory = &RootCmd{} var rootCommand = RootCmd{ cobraCommand: &cobra.Command{ Use: "kops", Short: rootShort, Long: rootLong, PersistentPreRun: func(cmd *cobra.Command, args []string) { cmd.SilenceUsage = true }, }, } func Execute() { goflag.Set("logtostderr", "true") goflag.CommandLine.Parse([]string{}) if err := rootCommand.cobraCommand.Execute(); err != nil { exitWithError(err) } } func init() { cobra.OnInitialize(initConfig) klog.InitFlags(nil) factory := util.NewFactory(&rootCommand.FactoryOptions) rootCommand.factory = factory NewCmdRoot(factory, os.Stdout) } func NewCmdRoot(f *util.Factory, out io.Writer) *cobra.Command { cmd := rootCommand.cobraCommand //cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine) goflag.CommandLine.VisitAll(func(goflag *goflag.Flag) { switch goflag.Name { case "cloud-provider-gce-lb-src-cidrs": case "cloud-provider-gce-l7lb-src-cidrs": // Skip; these is dragged in by the google cloudprovider dependency default: cmd.PersistentFlags().AddGoFlag(goflag) } }) cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "yaml config file (default is $HOME/.kops.yaml)") viper.BindPFlag("config", cmd.PersistentFlags().Lookup("config")) viper.SetDefault("config", "$HOME/.kops.yaml") cmd.RegisterFlagCompletionFunc("config", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return []string{"yaml", "json"}, cobra.ShellCompDirectiveFilterFileExt }) cmd.PersistentFlags().StringVar(&rootCommand.RegistryPath, "state", "", "Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable") viper.BindPFlag("KOPS_STATE_STORE", cmd.PersistentFlags().Lookup("state")) viper.BindEnv("KOPS_STATE_STORE") // TODO implement completion against VFS defaultClusterName := os.Getenv("KOPS_CLUSTER_NAME") cmd.PersistentFlags().StringVarP(&rootCommand.clusterName, "name", "", defaultClusterName, "Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable") cmd.RegisterFlagCompletionFunc("name", commandutils.CompleteClusterName(&rootCommand, false)) // create subcommands cmd.AddCommand(NewCmdCreate(f, out)) cmd.AddCommand(NewCmdDelete(f, out)) cmd.AddCommand(NewCmdDistrust(f, out)) cmd.AddCommand(NewCmdEdit(f, out)) cmd.AddCommand(NewCmdExport(f, out)) cmd.AddCommand(NewCmdGet(f, out)) cmd.AddCommand(commands.NewCmdHelpers(f, out)) cmd.AddCommand(NewCmdPromote(f, out)) cmd.AddCommand(NewCmdUpdate(f, out)) cmd.AddCommand(NewCmdReplace(f, out)) cmd.AddCommand(NewCmdRollingUpdate(f, out)) cmd.AddCommand(NewCmdSet(f, out)) cmd.AddCommand(NewCmdToolbox(f, out)) cmd.AddCommand(NewCmdUnset(f, out)) cmd.AddCommand(NewCmdUpgrade(f, out)) cmd.AddCommand(NewCmdValidate(f, out)) cmd.AddCommand(NewCmdVersion(f, out)) return cmd } // initConfig reads in config file and ENV variables if set. func initConfig() { // Config file precedence: --config flag, ${HOME}/.kops.yaml ${HOME}/.kops/config configFile := rootCommand.configFile if configFile == "" { home := homedir.HomeDir() configPaths := []string{ filepath.Join(home, ".kops.yaml"), filepath.Join(home, ".kops", "config"), } for _, p := range configPaths { _, err := os.Stat(p) if err == nil { configFile = p break } else if !os.IsNotExist(err) { klog.V(2).Infof("error checking for file %s: %v", p, err) } } } if configFile != "" { viper.SetConfigFile(configFile) viper.SetConfigType("yaml") if err := viper.ReadInConfig(); err != nil { klog.Warningf("error reading config: %v", err) } } rootCommand.RegistryPath = viper.GetString("KOPS_STATE_STORE") // Tolerate multiple slashes at end rootCommand.RegistryPath = strings.TrimSuffix(rootCommand.RegistryPath, "/") } func (c *RootCmd) AddCommand(cmd *cobra.Command) { c.cobraCommand.AddCommand(cmd) } func (c *RootCmd) clusterNameArgs(clusterName *string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { if err := c.ProcessArgs(args); err != nil { return err } *clusterName = c.ClusterName(true) if *clusterName == "" { return fmt.Errorf("--name is required") } return nil } } func (c *RootCmd) clusterNameArgsNoKubeconfig(clusterName *string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { if err := c.ProcessArgs(args); err != nil { return err } *clusterName = c.clusterName if *clusterName == "" { return fmt.Errorf("--name is required") } return nil } } // ProcessArgs will parse the positional args. It assumes one of these formats: // * <no arguments at all> // * <clustername> (and --name not specified) // Everything else is an error. func (c *RootCmd) ProcessArgs(args []string) error { if len(args) == 0 { return nil } if len(args) == 1 { // Assume <clustername> if c.clusterName == "" { c.clusterName = args[0] return nil } } fmt.Printf("\nFound multiple arguments which look like a cluster name\n") if c.clusterName != "" { fmt.Printf("\t%q (via flag)\n", c.clusterName) } for _, arg := range args { fmt.Printf("\t%q (as argument)\n", arg) } fmt.Printf("\n") fmt.Printf("This often happens if you specify an argument to a boolean flag without using =\n") fmt.Printf("For example: use `--bastion=true` or `--bastion`, not `--bastion true`\n\n") if len(args) == 1 { return fmt.Errorf("cannot specify cluster via --name and positional argument") } return fmt.Errorf("expected a single <clustername> to be passed as an argument") } func (c *RootCmd) ClusterName(verbose bool) string { if c.clusterName != "" { return c.clusterName } // Read from kubeconfig pathOptions := clientcmd.NewDefaultPathOptions() config, err := pathOptions.GetStartingConfig() if err != nil { klog.Warningf("error reading kubecfg: %v", err) } else if config.CurrentContext == "" { klog.Warningf("no context set in kubecfg") } else { context := config.Contexts[config.CurrentContext] if context == nil { klog.Warningf("context %q in kubecfg not found", config.CurrentContext) } else if context.Cluster == "" { klog.Warningf("context %q in kubecfg did not have a cluster", config.CurrentContext) } else { if verbose { fmt.Fprintf(os.Stderr, "Using cluster from kubectl context: %s\n\n", context.Cluster) } c.clusterName = context.Cluster } } return c.clusterName } func (c *RootCmd) Clientset() (simple.Clientset, error) { return c.factory.Clientset() } func (c *RootCmd) Cluster(ctx context.Context) (*kopsapi.Cluster, error) { clusterName := c.ClusterName(true) if clusterName == "" { return nil, fmt.Errorf("--name is required") } return GetCluster(ctx, c.factory, clusterName) } func GetCluster(ctx context.Context, factory commandutils.Factory, clusterName string) (*kopsapi.Cluster, error) { if clusterName == "" { return nil, field.Required(field.NewPath("clusterName"), "Cluster name is required") } clientset, err := factory.Clientset() if err != nil { return nil, err } cluster, err := clientset.GetCluster(ctx, clusterName) if err != nil { return nil, fmt.Errorf("error reading cluster configuration: %v", err) } if cluster == nil { return nil, fmt.Errorf("cluster %q not found", clusterName) } if clusterName != cluster.ObjectMeta.Name { return nil, fmt.Errorf("cluster name did not match expected name: %v vs %v", clusterName, cluster.ObjectMeta.Name) } return cluster, nil } func GetClusterNameForCompletionNoKubeconfig(clusterArgs []string) (clusterName string, completions []string, directive cobra.ShellCompDirective) { if len(clusterArgs) > 0 { return clusterArgs[0], nil, 0 } if rootCommand.clusterName != "" { return rootCommand.clusterName, nil, 0 } return "", []string{"--name"}, cobra.ShellCompDirectiveNoFileComp } func GetClusterForCompletion(ctx context.Context, factory commandutils.Factory, clusterArgs []string) (cluster *kopsapi.Cluster, clientSet simple.Clientset, completions []string, directive cobra.ShellCompDirective) { clusterName := "" if len(clusterArgs) > 0 { clusterName = clusterArgs[0] } else { clusterName = rootCommand.ClusterName(false) } if clusterName == "" { return nil, nil, []string{"--name"}, cobra.ShellCompDirectiveNoFileComp } cluster, err := GetCluster(ctx, &rootCommand, clusterName) if err != nil { completions, directive := commandutils.CompletionError("getting cluster", err) return nil, nil, completions, directive } clientSet, err = rootCommand.Clientset() if err != nil { completions, directive := commandutils.CompletionError("getting clientset", err) return nil, nil, completions, directive } return cluster, clientSet, nil, 0 } // ConsumeStdin reads all the bytes available from stdin func ConsumeStdin() ([]byte, error) { file := os.Stdin buf := new(bytes.Buffer) _, err := buf.ReadFrom(file) if err != nil { return nil, fmt.Errorf("error reading stdin: %v", err) } return buf.Bytes(), nil }
// +build linux darwin freebsd package mount import ( "os" "time" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/ncw/rclone/cmd/mountlib" "github.com/ncw/rclone/fs/log" "github.com/ncw/rclone/vfs" "github.com/pkg/errors" "golang.org/x/net/context" ) // Dir represents a directory entry type Dir struct { *vfs.Dir } // Check interface satsified var _ fusefs.Node = (*Dir)(nil) // Attr updates the attributes of a directory func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) { defer log.Trace(d, "")("attr=%+v, err=%v", a, &err) a.Valid = mountlib.AttrTimeout a.Gid = d.VFS().Opt.GID a.Uid = d.VFS().Opt.UID a.Mode = os.ModeDir | d.VFS().Opt.DirPerms modTime := d.ModTime() a.Atime = modTime a.Mtime = modTime a.Ctime = modTime a.Crtime = modTime // FIXME include Valid so get some caching? // FIXME fs.Debugf(d.path, "Dir.Attr %+v", a) return nil } // Check interface satisfied var _ fusefs.NodeSetattrer = (*Dir)(nil) // Setattr handles attribute changes from FUSE. Currently supports ModTime only. func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) { defer log.Trace(d, "stat=%+v", req)("err=%v", &err) if d.VFS().Opt.NoModTime { return nil } if req.Valid.MtimeNow() { err = d.SetModTime(time.Now()) } else if req.Valid.Mtime() { err = d.SetModTime(req.Mtime) } return translateError(err) } // Check interface satisfied var _ fusefs.NodeRequestLookuper = (*Dir)(nil) // Lookup looks up a specific entry in the receiver. // // Lookup should return a Node corresponding to the entry. If the // name does not exist in the directory, Lookup should return ENOENT. // // Lookup need not to handle the names "." and "..". func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err) mnode, err := d.Dir.Stat(req.Name) if err != nil { return nil, translateError(err) } resp.EntryValid = mountlib.AttrTimeout switch x := mnode.(type) { case *vfs.File: return &File{x}, nil case *vfs.Dir: return &Dir{x}, nil } panic("bad type") } // Check interface satisfied var _ fusefs.HandleReadDirAller = (*Dir)(nil) // ReadDirAll reads the contents of the directory func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) { itemsRead := -1 defer log.Trace(d, "")("item=%d, err=%v", &itemsRead, &err) items, err := d.Dir.ReadDirAll() if err != nil { return nil, translateError(err) } for _, node := range items { var dirent = fuse.Dirent{ // Inode FIXME ??? Type: fuse.DT_File, Name: node.Name(), } if node.IsDir() { dirent.Type = fuse.DT_Dir } dirents = append(dirents, dirent) } itemsRead = len(dirents) return dirents, nil } var _ fusefs.NodeCreater = (*Dir)(nil) // Create makes a new file func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err) file, err := d.Dir.Create(req.Name, int(req.Flags)) if err != nil { return nil, nil, translateError(err) } fh, err := file.Open(int(req.Flags) | os.O_CREATE) if err != nil { return nil, nil, translateError(err) } return &File{file}, &FileHandle{fh}, err } var _ fusefs.NodeMkdirer = (*Dir)(nil) // Mkdir creates a new directory func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err) dir, err := d.Dir.Mkdir(req.Name) if err != nil { return nil, translateError(err) } return &Dir{dir}, nil } var _ fusefs.NodeRemover = (*Dir)(nil) // Remove removes the entry with the given name from // the receiver, which must be a directory. The entry to be removed // may correspond to a file (unlink) or to a directory (rmdir). func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) { defer log.Trace(d, "name=%q", req.Name)("err=%v", &err) err = d.Dir.RemoveName(req.Name) if err != nil { return translateError(err) } return nil } // Check interface satisfied var _ fusefs.NodeRenamer = (*Dir)(nil) // Rename the file func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) { defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err) destDir, ok := newDir.(*Dir) if !ok { return errors.Errorf("Unknown Dir type %T", newDir) } err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir) if err != nil { return translateError(err) } return nil } // Check interface satisfied var _ fusefs.NodeFsyncer = (*Dir)(nil) // Fsync the directory func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) { defer log.Trace(d, "")("err=%v", &err) err = d.Dir.Sync() if err != nil { return translateError(err) } return nil } mount: return ENOSYS rather than EIO on attempted link This fixes FileZilla accessing an rclone mount served over sftp. See: https://forum.rclone.org/t/moving-files-on-rclone-mount-with-filezilla/5029 // +build linux darwin freebsd package mount import ( "os" "time" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/ncw/rclone/cmd/mountlib" "github.com/ncw/rclone/fs/log" "github.com/ncw/rclone/vfs" "github.com/pkg/errors" "golang.org/x/net/context" ) // Dir represents a directory entry type Dir struct { *vfs.Dir } // Check interface satsified var _ fusefs.Node = (*Dir)(nil) // Attr updates the attributes of a directory func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) { defer log.Trace(d, "")("attr=%+v, err=%v", a, &err) a.Valid = mountlib.AttrTimeout a.Gid = d.VFS().Opt.GID a.Uid = d.VFS().Opt.UID a.Mode = os.ModeDir | d.VFS().Opt.DirPerms modTime := d.ModTime() a.Atime = modTime a.Mtime = modTime a.Ctime = modTime a.Crtime = modTime // FIXME include Valid so get some caching? // FIXME fs.Debugf(d.path, "Dir.Attr %+v", a) return nil } // Check interface satisfied var _ fusefs.NodeSetattrer = (*Dir)(nil) // Setattr handles attribute changes from FUSE. Currently supports ModTime only. func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) { defer log.Trace(d, "stat=%+v", req)("err=%v", &err) if d.VFS().Opt.NoModTime { return nil } if req.Valid.MtimeNow() { err = d.SetModTime(time.Now()) } else if req.Valid.Mtime() { err = d.SetModTime(req.Mtime) } return translateError(err) } // Check interface satisfied var _ fusefs.NodeRequestLookuper = (*Dir)(nil) // Lookup looks up a specific entry in the receiver. // // Lookup should return a Node corresponding to the entry. If the // name does not exist in the directory, Lookup should return ENOENT. // // Lookup need not to handle the names "." and "..". func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err) mnode, err := d.Dir.Stat(req.Name) if err != nil { return nil, translateError(err) } resp.EntryValid = mountlib.AttrTimeout switch x := mnode.(type) { case *vfs.File: return &File{x}, nil case *vfs.Dir: return &Dir{x}, nil } panic("bad type") } // Check interface satisfied var _ fusefs.HandleReadDirAller = (*Dir)(nil) // ReadDirAll reads the contents of the directory func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) { itemsRead := -1 defer log.Trace(d, "")("item=%d, err=%v", &itemsRead, &err) items, err := d.Dir.ReadDirAll() if err != nil { return nil, translateError(err) } for _, node := range items { var dirent = fuse.Dirent{ // Inode FIXME ??? Type: fuse.DT_File, Name: node.Name(), } if node.IsDir() { dirent.Type = fuse.DT_Dir } dirents = append(dirents, dirent) } itemsRead = len(dirents) return dirents, nil } var _ fusefs.NodeCreater = (*Dir)(nil) // Create makes a new file func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err) file, err := d.Dir.Create(req.Name, int(req.Flags)) if err != nil { return nil, nil, translateError(err) } fh, err := file.Open(int(req.Flags) | os.O_CREATE) if err != nil { return nil, nil, translateError(err) } return &File{file}, &FileHandle{fh}, err } var _ fusefs.NodeMkdirer = (*Dir)(nil) // Mkdir creates a new directory func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err) dir, err := d.Dir.Mkdir(req.Name) if err != nil { return nil, translateError(err) } return &Dir{dir}, nil } var _ fusefs.NodeRemover = (*Dir)(nil) // Remove removes the entry with the given name from // the receiver, which must be a directory. The entry to be removed // may correspond to a file (unlink) or to a directory (rmdir). func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) { defer log.Trace(d, "name=%q", req.Name)("err=%v", &err) err = d.Dir.RemoveName(req.Name) if err != nil { return translateError(err) } return nil } // Check interface satisfied var _ fusefs.NodeRenamer = (*Dir)(nil) // Rename the file func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) { defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err) destDir, ok := newDir.(*Dir) if !ok { return errors.Errorf("Unknown Dir type %T", newDir) } err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir) if err != nil { return translateError(err) } return nil } // Check interface satisfied var _ fusefs.NodeFsyncer = (*Dir)(nil) // Fsync the directory func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) { defer log.Trace(d, "")("err=%v", &err) err = d.Dir.Sync() if err != nil { return translateError(err) } return nil } // Check interface satisfied var _ fusefs.NodeLinker = (*Dir)(nil) // Link creates a new directory entry in the receiver based on an // existing Node. Receiver must be a directory. func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node) (new fusefs.Node, err error) { defer log.Trace(d, "req=%v, old=%v", req, old)("new=%v, err=%v", &new, &err) return nil, fuse.ENOSYS }
// // Last.Backend LLC CONFIDENTIAL // __________________ // // [2014] - [2019] Last.Backend LLC // All Rights Reserved. // // NOTICE: All information contained herein is, and remains // the property of Last.Backend LLC and its suppliers, // if any. The intellectual and technical concepts contained // herein are proprietary to Last.Backend LLC // and its suppliers and may be covered by Russian Federation and Foreign Patents, // patents in process, and are protected by trade secret or copyright law. // Dissemination of this information or reproduction of this material // is strictly forbidden unless prior written permission is obtained // from Last.Backend LLC. // // Last.Backend Open-source API // // Open-source system for automating deployment, scaling, and management of containerized applications. // // Terms Of Service: // // https://lastbackend.com/legal/terms/ // // Schemes: https // Host: api.lastbackend.com // BasePath: / // Version: 0.9.4 // License: MIT http://opensource.org/licenses/MIT // Contact: Last.Backend Teams <team@lastbackend.com> https://lastbackend.com // // Consumes: // - application/json // // Produces: // - application/json // // Security: // - bearerToken: // // SecurityDefinitions: // bearerToken: // description: Bearer Token authentication // type: apiKey // name: authorization // in: header // // Extensions: // x-meta-value: value // x-meta-array: // - value1 // - value2 // x-meta-array-obj: // - name: obj // value: field // // swagger:meta package main import ( "fmt" "github.com/lastbackend/lastbackend/pkg/node" flag "github.com/spf13/pflag" "github.com/spf13/viper" "strings" ) const default_env_prefix = "LB" const default_config_type = "yaml" const default_config_name = "config" var ( flags = []struct { // flag name Name string // flag short name Short string // flag value Value interface{} // flag description Desc string // viper name for binding from flag Bind string }{ {Name: "access-token", Short: "", Value: "", Desc: "Access token to API server", Bind: "token"}, {Name: "workdir", Short: "", Value: "", Desc: "Node workdir for runtime", Bind: "workdir"}, {Name: "manifest-path", Short: "", Value: "", Desc: "Node manifest(s) path", Bind: "manifest.dir"}, {Name: "bind-interface", Short: "", Value: "eth0", Desc: "Exporter bind network interface", Bind: "network.interface"}, {Name: "network-proxy", Short: "", Value: "ipvs", Desc: "Network proxy driver (ipvs by default)", Bind: "network.cpi.type"}, {Name: "network-proxy-iface-internal", Short: "", Value: "docker0", Desc: "Network proxy internal interface binding", Bind: "network.cpi.interface.internal"}, {Name: "network-proxy-iface-external", Short: "", Value: "eth0", Desc: "Network proxy external interface binding", Bind: "network.cpi.interface.external"}, {Name: "network-driver", Short: "", Value: "vxlan", Desc: "Network driver (vxlan by default)", Bind: "network.cni.type"}, {Name: "network-driver-iface-external", Short: "", Value: "eth0", Desc: "Container overlay network external interface for host communication", Bind: "network.cni.interface.external"}, {Name: "network-driver-iface-internal", Short: "", Value: "docker0", Desc: "Container overlay network internal bridge interface for container intercommunications", Bind: "network.cni.interface.internal"}, {Name: "container-runtime", Short: "", Value: "docker", Desc: "Node container runtime", Bind: "container.cri.type"}, {Name: "container-runtime-docker-version", Short: "", Value: "", Desc: "Set docker version for docker container runtime", Bind: "container.cri.docker.version"}, {Name: "container-storage-root", Short: "", Value: "/var/run/lastbackend", Desc: "Node container storage root", Bind: "container.csi.dir.root"}, {Name: "container-runtime", Short: "", Value: "docker", Desc: "Node container images runtime", Bind: "container.iri.type"}, {Name: "container-runtime-docker-version", Short: "", Value: "", Desc: "Set docker version for docker container runtime", Bind: "container.iri.docker.version"}, {Name: "bind-address", Short: "", Value: "0.0.0.0", Desc: "Node bind address", Bind: "server.host"}, {Name: "bind-port", Short: "", Value: 2965, Desc: "Node listening port binding", Bind: "server.port"}, {Name: "tls-cert-file", Short: "", Value: "", Desc: "Node cert file path", Bind: "server.tls.cert"}, {Name: "tls-private-key-file", Short: "", Value: "", Desc: "Node private key file path", Bind: "server.tls.key"}, {Name: "tls-ca-file", Short: "", Value: "", Desc: "Node certificate authority file path", Bind: "server.tls.ca"}, {Name: "api-uri", Short: "", Value: "", Desc: "REST API endpoint", Bind: "api.uri"}, {Name: "api-tls-cert-file", Short: "", Value: "", Desc: "REST API TLS certificate file path", Bind: "api.tls.cert"}, {Name: "api-tls-private-key-file", Short: "", Value: "", Desc: "REST API TLS private key file path", Bind: "api.tls.key"}, {Name: "api-tls-ca-file", Short: "", Value: "", Desc: "REST API TSL certificate authority file path", Bind: "api.tls.ca"}, {Name: "verbose", Short: "v", Value: 0, Desc: "Set log level from 0 to 7", Bind: "verbose"}, {Name: "config", Short: "c", Value: "", Desc: "Path for the configuration file", Bind: "config"}, } ) func main() { for _, item := range flags { switch item.Value.(type) { case string: flag.StringP(item.Name, item.Short, item.Value.(string), item.Desc) case int: flag.IntP(item.Name, item.Short, item.Value.(int), item.Desc) case []string: flag.StringSliceP(item.Name, item.Short, item.Value.([]string), item.Desc) default: panic(fmt.Sprintf("bad %s argument value", item.Name)) } } flag.Parse() v := viper.New() v.AutomaticEnv() v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) v.SetEnvPrefix(default_env_prefix) for _, item := range flags { if err := v.BindPFlag(item.Bind, flag.Lookup(item.Name)); err != nil { panic(err) } name := strings.Replace(strings.ToUpper(item.Name), "-", "_", -1) name = strings.Join([]string{default_env_prefix, name}, "_") if err := v.BindEnv(item.Bind, name); err != nil { panic(err) } } v.SetConfigType(default_config_type) v.SetConfigFile(v.GetString(default_config_name)) if len(v.GetString("config")) != 0 { if err := v.ReadInConfig(); err != nil { panic(fmt.Sprintf("Read config err: %v", err)) } } node.Daemon(v) } update config // // Last.Backend LLC CONFIDENTIAL // __________________ // // [2014] - [2019] Last.Backend LLC // All Rights Reserved. // // NOTICE: All information contained herein is, and remains // the property of Last.Backend LLC and its suppliers, // if any. The intellectual and technical concepts contained // herein are proprietary to Last.Backend LLC // and its suppliers and may be covered by Russian Federation and Foreign Patents, // patents in process, and are protected by trade secret or copyright law. // Dissemination of this information or reproduction of this material // is strictly forbidden unless prior written permission is obtained // from Last.Backend LLC. // // Last.Backend Open-source API // // Open-source system for automating deployment, scaling, and management of containerized applications. // // Terms Of Service: // // https://lastbackend.com/legal/terms/ // // Schemes: https // Host: api.lastbackend.com // BasePath: / // Version: 0.9.4 // License: MIT http://opensource.org/licenses/MIT // Contact: Last.Backend Teams <team@lastbackend.com> https://lastbackend.com // // Consumes: // - application/json // // Produces: // - application/json // // Security: // - bearerToken: // // SecurityDefinitions: // bearerToken: // description: Bearer Token authentication // type: apiKey // name: authorization // in: header // // Extensions: // x-meta-value: value // x-meta-array: // - value1 // - value2 // x-meta-array-obj: // - name: obj // value: field // // swagger:meta package main import ( "fmt" "github.com/lastbackend/lastbackend/pkg/node" flag "github.com/spf13/pflag" "github.com/spf13/viper" "strings" ) const default_env_prefix = "LB" const default_config_type = "yaml" const default_config_name = "config" var ( flags = []struct { // flag name Name string // flag short name Short string // flag value Value interface{} // flag description Desc string // viper name for binding from flag Bind string }{ {Name: "access-token", Short: "", Value: "", Desc: "Access token to API server", Bind: "token"}, {Name: "workdir", Short: "", Value: "", Desc: "Node workdir for runtime", Bind: "workdir"}, {Name: "manifest-path", Short: "", Value: "", Desc: "Node manifest(s) path", Bind: "manifest.dir"}, {Name: "bind-interface", Short: "", Value: "eth0", Desc: "Exporter bind network interface", Bind: "network.interface"}, {Name: "network-proxy", Short: "", Value: "ipvs", Desc: "Network proxy driver (ipvs by default)", Bind: "network.cpi.type"}, {Name: "network-proxy-iface-internal", Short: "", Value: "docker0", Desc: "Network proxy internal interface binding", Bind: "network.cpi.interface.internal"}, {Name: "network-proxy-iface-external", Short: "", Value: "eth0", Desc: "Network proxy external interface binding", Bind: "network.cpi.interface.external"}, {Name: "network-driver", Short: "", Value: "vxlan", Desc: "Network driver (vxlan by default)", Bind: "network.cni.type"}, {Name: "network-driver-iface-external", Short: "", Value: "eth0", Desc: "Container overlay network external interface for host communication", Bind: "network.cni.interface.external"}, {Name: "network-driver-iface-internal", Short: "", Value: "docker0", Desc: "Container overlay network internal bridge interface for container intercommunications", Bind: "network.cni.interface.internal"}, {Name: "container-runtime", Short: "", Value: "docker", Desc: "Node container runtime", Bind: "container.cri.type"}, {Name: "container-runtime-docker-version", Short: "", Value: "", Desc: "Set docker version for docker container runtime", Bind: "container.cri.docker.version"}, {Name: "container-storage-root", Short: "", Value: "/var/run/lastbackend", Desc: "Node container storage root", Bind: "container.csi.dir.root"}, {Name: "container-image-runtime", Short: "", Value: "docker", Desc: "Node container images runtime", Bind: "container.iri.type"}, {Name: "container-runtime-docker-version", Short: "", Value: "", Desc: "Set docker version for docker container runtime", Bind: "container.iri.docker.version"}, {Name: "bind-address", Short: "", Value: "0.0.0.0", Desc: "Node bind address", Bind: "server.host"}, {Name: "bind-port", Short: "", Value: 2965, Desc: "Node listening port binding", Bind: "server.port"}, {Name: "tls-cert-file", Short: "", Value: "", Desc: "Node cert file path", Bind: "server.tls.cert"}, {Name: "tls-private-key-file", Short: "", Value: "", Desc: "Node private key file path", Bind: "server.tls.key"}, {Name: "tls-ca-file", Short: "", Value: "", Desc: "Node certificate authority file path", Bind: "server.tls.ca"}, {Name: "api-uri", Short: "", Value: "", Desc: "REST API endpoint", Bind: "api.uri"}, {Name: "api-tls-cert-file", Short: "", Value: "", Desc: "REST API TLS certificate file path", Bind: "api.tls.cert"}, {Name: "api-tls-private-key-file", Short: "", Value: "", Desc: "REST API TLS private key file path", Bind: "api.tls.key"}, {Name: "api-tls-ca-file", Short: "", Value: "", Desc: "REST API TSL certificate authority file path", Bind: "api.tls.ca"}, {Name: "verbose", Short: "v", Value: 0, Desc: "Set log level from 0 to 7", Bind: "verbose"}, {Name: "config", Short: "c", Value: "", Desc: "Path for the configuration file", Bind: "config"}, } ) func main() { for _, item := range flags { switch item.Value.(type) { case string: flag.StringP(item.Name, item.Short, item.Value.(string), item.Desc) case int: flag.IntP(item.Name, item.Short, item.Value.(int), item.Desc) case []string: flag.StringSliceP(item.Name, item.Short, item.Value.([]string), item.Desc) default: panic(fmt.Sprintf("bad %s argument value", item.Name)) } } flag.Parse() v := viper.New() v.AutomaticEnv() v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) v.SetEnvPrefix(default_env_prefix) for _, item := range flags { if err := v.BindPFlag(item.Bind, flag.Lookup(item.Name)); err != nil { panic(err) } name := strings.Replace(strings.ToUpper(item.Name), "-", "_", -1) name = strings.Join([]string{default_env_prefix, name}, "_") if err := v.BindEnv(item.Bind, name); err != nil { panic(err) } } v.SetConfigType(default_config_type) v.SetConfigFile(v.GetString(default_config_name)) if len(v.GetString("config")) != 0 { if err := v.ReadInConfig(); err != nil { panic(fmt.Sprintf("Read config err: %v", err)) } } node.Daemon(v) }
// Copyright © 2017 uMov.me Team <devteam-umovme@googlegroups.com> // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. package cmd import ( "fmt" "time" "github.com/apex/log" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/umovme/dbview/setup" ) const ( daemonInterval = 30 * time.Second ) // replicateCmd represents the replicate command var replicateCmd = &cobra.Command{ Use: "replicate", Short: "Runs the replication functions", Long: `Runs the replication functions and updates the target database at the latest version`, Run: func(cmd *cobra.Command, args []string) { logInfoBold("Starting dbview replication") if viper.GetBool("daemon") { dur := viper.GetDuration("refresh-interval") if dur < daemonInterval { log.Fatalf("Refresh interval (%s) must greater or equals than %s.\n", dur, daemonInterval) } logInfoBold("Starting in daemon mode") ticker := time.NewTicker(dur) for ; true; <-ticker.C { runReplicate() } } runReplicate() log.Info("Done.") }, } func runReplicate() { localConn := setup.ConnectionDetails{ Username: viper.GetString("local-database.username"), Host: viper.GetString("local-database.host"), Port: viper.GetInt("local-database.port"), Database: viper.GetString("local-database.target_database"), SslMode: viper.GetString("local-database.ssl"), Password: viper.GetString("local-database.password"), } log.Debugf("Using local connection with '%s'", localConn.ToString()) remoteConn := setup.ConnectionDetails{ Username: viper.GetString("remote-database.username"), Host: viper.GetString("remote-database.host"), Port: viper.GetInt("remote-database.port"), Database: viper.GetString("remote-database.database"), SslMode: viper.GetString("remote-database.ssl"), Password: viper.GetString("remote-database.password"), } log.Debugf("Using remote connection with '%s'", remoteConn.ToString()) log.Debugf("Remember to use a remote user with '%s' in their search_path variable!", customerUser) newQuery := fmt.Sprintf( "SELECT do_replication_log('%s', '%s');", remoteConn.ToString(), fmt.Sprintf("u%s", viper.GetString("customer"))) log.Debugf("QUERY: %s", newQuery) log.Info("Updating Replication Data...") if err := setup.ExecuteQuery(localConn, newQuery); err != nil { log.WithError(err).Error("fail to replicate the data") } } func init() { RootCmd.AddCommand(replicateCmd) // daemon mode related replicateCmd.PersistentFlags().Bool("daemon", false, "Run as daemon ") viper.BindPFlag("daemon", replicateCmd.PersistentFlags().Lookup("daemon")) replicateCmd.PersistentFlags().Duration("refresh-interval", daemonInterval, "Refresh interval for daemon mode") viper.BindPFlag("refresh-interval", replicateCmd.PersistentFlags().Lookup("refresh-interval")) replicateCmd.PersistentFlags().String("remote-database.ssl", "disable", fmt.Sprintf("Remote %s", sslConnectionLabel)) viper.BindPFlag("remote-database.ssl", replicateCmd.PersistentFlags().Lookup("remote-database.ssl")) replicateCmd.PersistentFlags().StringP("remote-database.username", "", "postgres", fmt.Sprintf("Remote %s", dbUserLabel)) viper.BindPFlag("remote-database.username", replicateCmd.PersistentFlags().Lookup("remote-database.username")) replicateCmd.PersistentFlags().StringP("remote-database.port", "", "9999", fmt.Sprintf("Remote %s", dbPortLabel)) viper.BindPFlag("remote-database.port", replicateCmd.PersistentFlags().Lookup("remote-database.port")) replicateCmd.PersistentFlags().StringP("remote-database.password", "", "", fmt.Sprintf("Remote %s", dbUserPasswordLabel)) viper.BindPFlag("remote-database.password", replicateCmd.PersistentFlags().Lookup("remote-database.password")) replicateCmd.PersistentFlags().StringP("remote-database.host", "", "dbview.umov.me", fmt.Sprintf("Remote %s", dbHostLabel)) viper.BindPFlag("remote-database.host", replicateCmd.PersistentFlags().Lookup("remote-database.host")) replicateCmd.PersistentFlags().StringP("remote-database.database", "", "prod_umov_dbview", "Remote Database name") viper.BindPFlag("local-database.database", replicateCmd.PersistentFlags().Lookup("local-database.database")) } force update function when replicate is called // Copyright © 2017 uMov.me Team <devteam-umovme@googlegroups.com> // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. package cmd import ( "fmt" "time" "github.com/apex/log" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/umovme/dbview/setup" ) const ( daemonInterval = 30 * time.Second ) // replicateCmd represents the replicate command var replicateCmd = &cobra.Command{ Use: "replicate", Short: "Runs the replication functions", Long: `Runs the replication functions and updates the target database at the latest version`, Run: func(cmd *cobra.Command, args []string) { logInfoBold("Starting dbview replication") if viper.GetBool("daemon") { dur := viper.GetDuration("refresh-interval") if dur < daemonInterval { log.Fatalf("Refresh interval (%s) must greater or equals than %s.\n", dur, daemonInterval) } logInfoBold("Starting in daemon mode") ticker := time.NewTicker(dur) for ; true; <-ticker.C { runReplicate() } } runReplicate() log.Info("Done.") }, } func runReplicate() { localConn := setup.ConnectionDetails{ Username: viper.GetString("local-database.username"), Host: viper.GetString("local-database.host"), Port: viper.GetInt("local-database.port"), Database: viper.GetString("local-database.target_database"), SslMode: viper.GetString("local-database.ssl"), Password: viper.GetString("local-database.password"), } log.Debugf("Using local connection with '%s'", localConn.ToString()) remoteConn := setup.ConnectionDetails{ Username: viper.GetString("remote-database.username"), Host: viper.GetString("remote-database.host"), Port: viper.GetInt("remote-database.port"), Database: viper.GetString("remote-database.database"), SslMode: viper.GetString("remote-database.ssl"), Password: viper.GetString("remote-database.password"), } log.Debugf("Using remote connection with '%s'", remoteConn.ToString()) log.Debugf("Remember to use a remote user with '%s' in their search_path variable!", customerUser) log.Info("Updating the database functions") setup.ExecuteQuery(localConn, setup.ReplicationLogFunction) newQuery := fmt.Sprintf( "SELECT do_replication_log('%s', '%s');", remoteConn.ToString(), fmt.Sprintf("u%s", viper.GetString("customer"))) log.Debugf("QUERY: %s", newQuery) log.Info("Updating Replication Data...") if err := setup.ExecuteQuery(localConn, newQuery); err != nil { log.WithError(err).Error("fail to replicate the data") } } func init() { RootCmd.AddCommand(replicateCmd) // daemon mode related replicateCmd.PersistentFlags().Bool("daemon", false, "Run as daemon ") viper.BindPFlag("daemon", replicateCmd.PersistentFlags().Lookup("daemon")) replicateCmd.PersistentFlags().Duration("refresh-interval", daemonInterval, "Refresh interval for daemon mode") viper.BindPFlag("refresh-interval", replicateCmd.PersistentFlags().Lookup("refresh-interval")) replicateCmd.PersistentFlags().String("remote-database.ssl", "disable", fmt.Sprintf("Remote %s", sslConnectionLabel)) viper.BindPFlag("remote-database.ssl", replicateCmd.PersistentFlags().Lookup("remote-database.ssl")) replicateCmd.PersistentFlags().StringP("remote-database.username", "", "postgres", fmt.Sprintf("Remote %s", dbUserLabel)) viper.BindPFlag("remote-database.username", replicateCmd.PersistentFlags().Lookup("remote-database.username")) replicateCmd.PersistentFlags().StringP("remote-database.port", "", "9999", fmt.Sprintf("Remote %s", dbPortLabel)) viper.BindPFlag("remote-database.port", replicateCmd.PersistentFlags().Lookup("remote-database.port")) replicateCmd.PersistentFlags().StringP("remote-database.password", "", "", fmt.Sprintf("Remote %s", dbUserPasswordLabel)) viper.BindPFlag("remote-database.password", replicateCmd.PersistentFlags().Lookup("remote-database.password")) replicateCmd.PersistentFlags().StringP("remote-database.host", "", "dbview.umov.me", fmt.Sprintf("Remote %s", dbHostLabel)) viper.BindPFlag("remote-database.host", replicateCmd.PersistentFlags().Lookup("remote-database.host")) replicateCmd.PersistentFlags().StringP("remote-database.database", "", "prod_umov_dbview", "Remote Database name") viper.BindPFlag("local-database.database", replicateCmd.PersistentFlags().Lookup("local-database.database")) }
package cmd import ( "log" "math/rand" "os" "os/exec" "path" "path/filepath" "strconv" "testing" "time" "github.com/stretchr/testify/require" "github.com/zaquestion/lab/internal/git" ) func TestMain(m *testing.M) { wd, err := git.WorkingDir() if err != nil { log.Fatal(err) } os.Chdir(wd) err = exec.Command("go", "test", "-c", "-coverpkg", "./...", "-covermode", "count", "-o", "lab_bin").Run() if err != nil { log.Fatal(err) } rand.Seed(time.Now().UnixNano()) os.Chdir(path.Join(wd, "testdata")) code := m.Run() os.Chdir(wd) os.Remove("lab_bin") testdirs, err := filepath.Glob("testdata-*") if err != nil { log.Fatal(err) } for _, dir := range testdirs { err := os.RemoveAll(dir) if err != nil { log.Fatal(err) } } os.Exit(code) } func copyTestRepo(t *testing.T) string { dir := "../testdata-" + strconv.Itoa(int(rand.Uint64())) t.Log(dir) err := exec.Command("cp", "-r", "../testdata", dir).Run() if err != nil { t.Fatal(err) } wd, err := os.Getwd() if err != nil { t.Fatal(err) } dir = path.Join(wd, dir) return dir } func TestRootCloneNoArg(t *testing.T) { cmd := exec.Command("../lab_bin", "clone") b, _ := cmd.CombinedOutput() require.Contains(t, string(b), "You must specify a repository to clone.") } (tests) test passthrough to git and combined help text on `lab` package cmd import ( "log" "math/rand" "os" "os/exec" "path" "path/filepath" "strconv" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/zaquestion/lab/internal/git" ) func TestMain(m *testing.M) { wd, err := git.WorkingDir() if err != nil { log.Fatal(err) } os.Chdir(wd) err = exec.Command("go", "test", "-c", "-coverpkg", "./...", "-covermode", "count", "-o", "lab_bin").Run() if err != nil { log.Fatal(err) } rand.Seed(time.Now().UnixNano()) os.Chdir(path.Join(wd, "testdata")) code := m.Run() os.Chdir(wd) os.Remove("lab_bin") testdirs, err := filepath.Glob("testdata-*") if err != nil { log.Fatal(err) } for _, dir := range testdirs { err := os.RemoveAll(dir) if err != nil { log.Fatal(err) } } os.Exit(code) } func copyTestRepo(t *testing.T) string { dir := "../testdata-" + strconv.Itoa(int(rand.Uint64())) t.Log(dir) err := exec.Command("cp", "-r", "../testdata", dir).Run() if err != nil { t.Fatal(err) } wd, err := os.Getwd() if err != nil { t.Fatal(err) } dir = path.Join(wd, dir) return dir } func TestRootCloneNoArg(t *testing.T) { cmd := exec.Command("../lab_bin", "clone") b, _ := cmd.CombinedOutput() require.Contains(t, string(b), "You must specify a repository to clone.") } func TestRootGitCmd(t *testing.T) { cmd := exec.Command("../lab_bin", "log", "-n", "1") b, _ := cmd.CombinedOutput() require.Contains(t, string(b), `commit cd64a7caea4f3ee5696a190379aff1a7f636e598 Author: Zaq? Wiedmann <zaquestion@gmail.com> Date: Sat Sep 2 20:58:39 2017 -0700 Added additional commit for LastCommitMessage and meeting requirements for Log test (>1 commit)`) } func TestRootNoArg(t *testing.T) { cmd := exec.Command("../lab_bin") b, _ := cmd.CombinedOutput() assert.Contains(t, string(b), "usage: git [--version] [--help] [-C <path>] [-c name=value]") assert.Contains(t, string(b), `These GitLab commands are provided by lab: fork Fork a remote repository on GitLab and add as remote`) }
package main import ( "fmt" "time" "github.com/PuerkitoBio/goquery" "github.com/oli-g/chuper" ) var ( delay = 2 * time.Second seeds = []string{ "http://www.repubblica.it", "http://www.corriere.it", "http://www.repubblica.it", "http://www.corriere.it", } criteria = &chuper.ResponseCriteria{ Method: "GET", ContentType: "text/html", Status: 200, Host: "www.gazzetta.it", } firstProcessor = chuper.ProcessorFunc(func(ctx *chuper.Context, doc *goquery.Document) bool { fmt.Printf("seed - %s - info: first %s %s from %s\n", time.Now().Format(time.RFC3339), ctx.Cmd.Method(), ctx.Cmd.URL(), ctx.SourceURL()) return true }) secondProcessor = chuper.ProcessorFunc(func(ctx *chuper.Context, doc *goquery.Document) bool { fmt.Printf("seed - %s - info: second %s %s\n", time.Now().Format(time.RFC3339), ctx.Cmd.Method(), ctx.Cmd.URL()) return false }) thirdProcessor = chuper.ProcessorFunc(func(ctx *chuper.Context, doc *goquery.Document) bool { fmt.Printf("seed - %s - info: third %s %s\n", time.Now().Format(time.RFC3339), ctx.Cmd.Method(), ctx.Cmd.URL()) return true }) ) func main() { crawler := chuper.New() crawler.CrawlDelay = delay // crawler.CrawlPoliteness = true // crawler.Cache = nil // crawler.HTTPClient = prepareTorHTTPClient() crawler.Register(criteria, firstProcessor, secondProcessor, thirdProcessor) crawler.Start() crawler.Enqueue("GET", seeds...) crawler.EnqueueWithSource("GET", "http://www.gazzetta.it", "http://www.google.it") crawler.Block() } Fix example program package main import ( "time" "github.com/PuerkitoBio/goquery" "github.com/oli-g/chuper" ) var ( delay = 2 * time.Second seeds = []string{ "http://www.repubblica.it", "http://www.corriere.it", "http://www.repubblica.it", "http://www.corriere.it", "http://www.gazzetta.it", } criteria = &chuper.ResponseCriteria{ Method: "GET", ContentType: "text/html", Status: 200, Host: "www.gazzetta.it", } firstProcessor = chuper.ProcessorFunc(func(ctx chuper.Context, doc *goquery.Document) bool { ctx.Log(map[string]interface{}{ "url": ctx.URL().String(), "source": ctx.SourceURL().String(), }).Info("First processor") return true }) secondProcessor = chuper.ProcessorFunc(func(ctx chuper.Context, doc *goquery.Document) bool { ctx.Log(map[string]interface{}{ "url": ctx.URL().String(), "source": ctx.SourceURL().String(), }).Info("Second processor") return false }) thirdProcessor = chuper.ProcessorFunc(func(ctx chuper.Context, doc *goquery.Document) bool { ctx.Log(map[string]interface{}{ "url": ctx.URL().String(), "source": ctx.SourceURL().String(), }).Info("Third processor") return true }) ) func main() { crawler := chuper.New() crawler.CrawlDelay = delay crawler.Register(criteria, firstProcessor, secondProcessor, thirdProcessor) q := crawler.Start() for _, u := range seeds { q.Enqueue("GET", u, "www.google.com") } crawler.Finish() }
package main import ( "flag" "fmt" "hash" "io" "os" "golang.org/x/crypto/sha3" ) func main() { size := flag.Int("n", 256, "size in bits of the desired hash: 224, 256 (default), 384, or 512") flag.Parse() var h hash.Hash switch *size { case 224: h = sha3.New224() case 256: h = sha3.New256() case 384: h = sha3.New384() case 512: h = sha3.New512() default: panic(fmt.Errorf("unsupported hash size %d (must be 224, 256, 384, or 512)", *size)) } _, err := io.Copy(h, os.Stdin) if err != nil { panic(err) } _, err = os.Stdout.Write(h.Sum(nil)) if err != nil { panic(err) } } cmd/sha3: remove redundant info in help output Package flag prints the default value on the end of the line, so we don't need to say it again. This also makes the other wording a bit more consise. Closes #148 package main import ( "flag" "fmt" "hash" "io" "os" "golang.org/x/crypto/sha3" ) func main() { size := flag.Int("n", 256, "output size in `bits`: 224, 256, 384, or 512") flag.Parse() var h hash.Hash switch *size { case 224: h = sha3.New224() case 256: h = sha3.New256() case 384: h = sha3.New384() case 512: h = sha3.New512() default: panic(fmt.Errorf("unsupported hash size %d (must be 224, 256, 384, or 512)", *size)) } _, err := io.Copy(h, os.Stdin) if err != nil { panic(err) } _, err = os.Stdout.Write(h.Sum(nil)) if err != nil { panic(err) } }
package main import ( "flag" "fmt" "github.com/Symantec/Dominator/lib/constants" "github.com/Symantec/Dominator/lib/filter" "github.com/Symantec/Dominator/lib/flagutil" "github.com/Symantec/Dominator/lib/fsbench" "github.com/Symantec/Dominator/lib/fsrateio" "github.com/Symantec/Dominator/lib/html" "github.com/Symantec/Dominator/lib/logbuf" "github.com/Symantec/Dominator/lib/memstats" "github.com/Symantec/Dominator/lib/netspeed" "github.com/Symantec/Dominator/lib/rateio" "github.com/Symantec/Dominator/lib/srpc/setupserver" "github.com/Symantec/Dominator/sub/httpd" "github.com/Symantec/Dominator/sub/rpcd" "github.com/Symantec/Dominator/sub/scanner" "github.com/Symantec/tricorder/go/tricorder" "github.com/Symantec/tricorder/go/tricorder/units" "io" "log" "os" "os/signal" "path" "runtime" "strconv" "syscall" ) var ( defaultScanSpeedPercent = flag.Uint64("defaultScanSpeedPercent", constants.DefaultScanSpeedPercent, "Scan speed as percentage of capacity") logbufLines = flag.Uint("logbufLines", 1024, "Number of lines to store in the log buffer") maxThreads = flag.Uint("maxThreads", 1, "Maximum number of parallel OS threads to use") permitInsecureMode = flag.Bool("permitInsecureMode", false, "If true, run in insecure mode. This gives remote root access to all") pidfile = flag.String("pidfile", "/var/run/subd.pid", "Name of file to write my PID to") portNum = flag.Uint("portNum", constants.SubPortNumber, "Port number to allocate and listen on for HTTP/RPC") rootDir = flag.String("rootDir", "/", "Name of root of directory tree to manage") scanExcludeList flagutil.StringList = constants.ScanExcludeList showStats = flag.Bool("showStats", false, "If true, show statistics after each cycle") subdDir = flag.String("subdDir", ".subd", "Name of subd private directory, relative to rootDir. This must be on the same file-system as rootDir") unshare = flag.Bool("unshare", true, "Internal use only.") ) func init() { runtime.LockOSThread() flag.Var(&scanExcludeList, "scanExcludeList", "Comma separated list of patterns to exclude from scanning") } func sanityCheck() bool { r_devnum, err := fsbench.GetDevnumForFile(*rootDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to get device number for: %s\t%s\n", *rootDir, err) return false } subdDirPathname := path.Join(*rootDir, *subdDir) s_devnum, err := fsbench.GetDevnumForFile(subdDirPathname) if err != nil { fmt.Fprintf(os.Stderr, "Unable to get device number for: %s\t%s\n", subdDirPathname, err) return false } if r_devnum != s_devnum { fmt.Fprintf(os.Stderr, "rootDir and subdDir must be on the same file-system\n") return false } return true } func createDirectory(dirname string) bool { if err := os.MkdirAll(dirname, 0750); err != nil { fmt.Fprintf(os.Stderr, "Unable to create directory: %s\t%s\n", dirname, err) return false } return true } func mountTmpfs(dirname string) bool { var statfs syscall.Statfs_t if err := syscall.Statfs(dirname, &statfs); err != nil { fmt.Fprintf(os.Stderr, "Unable to create Statfs: %s\t%s\n", dirname, err) return false } if statfs.Type != 0x01021994 { err := syscall.Mount("none", dirname, "tmpfs", 0, "size=65536,mode=0750") if err == nil { fmt.Printf("Mounted tmpfs on: %s\n", dirname) } else { fmt.Fprintf(os.Stderr, "Unable to mount tmpfs on: %s\t%s\n", dirname, err) return false } } return true } func unshareAndBind(workingRootDir string) bool { if *unshare { // Re-exec myself using the unshare syscall while on a locked thread. // This hack is required because syscall.Unshare() operates on only one // thread in the process, and Go switches execution between threads // randomly. Thus, the namespace can be suddenly switched for running // code. This is an aspect of Go that was not well thought out. runtime.LockOSThread() if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { fmt.Fprintf(os.Stderr, "Unable to unshare mount namesace\t%s\n", err) return false } // Ensure the process is slightly niced. Since the Linux implementation // of setpriority(2) only applies to a thread, not the whole process // (contrary to the POSIX specification), do this in the pinned OS // thread so that the whole process (after exec) will be niced. syscall.Setpriority(syscall.PRIO_PROCESS, 0, 1) args := append(os.Args, "-unshare=false") if err := syscall.Exec(args[0], args, os.Environ()); err != nil { fmt.Fprintf(os.Stderr, "Unable to Exec:%s\t%s\n", args[0], err) return false } } err := syscall.Mount("none", "/", "", syscall.MS_REC|syscall.MS_PRIVATE, "") if err != nil { fmt.Fprintf(os.Stderr, "Unable to set mount sharing to private\t%s\n", err) return false } syscall.Unmount(workingRootDir, 0) err = syscall.Mount(*rootDir, workingRootDir, "", syscall.MS_BIND, "") if err != nil { fmt.Fprintf(os.Stderr, "Unable to bind mount %s to %s\t%s\n", *rootDir, workingRootDir, err) return false } // Clean up -unshare=false so that a subsequent re-exec starts from scratch. args := make([]string, 0, len(os.Args)-1) for _, arg := range os.Args { if arg != "-unshare=false" { args = append(args, arg) } } os.Args = args return true } func getCachedFsSpeed(workingRootDir string, cacheDirname string) (bytesPerSecond, blocksPerSecond uint64, computed, ok bool) { bytesPerSecond = 0 blocksPerSecond = 0 devnum, err := fsbench.GetDevnumForFile(workingRootDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to get device number for: %s\t%s\n", workingRootDir, err) return 0, 0, false, false } fsbenchDir := path.Join(cacheDirname, "fsbench") if !createDirectory(fsbenchDir) { return 0, 0, false, false } cacheFilename := path.Join(fsbenchDir, strconv.FormatUint(devnum, 16)) file, err := os.Open(cacheFilename) if err == nil { n, err := fmt.Fscanf(file, "%d %d", &bytesPerSecond, &blocksPerSecond) file.Close() if n == 2 || err == nil { return bytesPerSecond, blocksPerSecond, false, true } } bytesPerSecond, blocksPerSecond, err = fsbench.GetReadSpeed(workingRootDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to measure read speed\t%s\n", err) return 0, 0, true, false } file, err = os.Create(cacheFilename) if err != nil { fmt.Fprintf(os.Stderr, "Unable to open: %s for write\t%s\n", cacheFilename, err) return 0, 0, true, false } fmt.Fprintf(file, "%d %d\n", bytesPerSecond, blocksPerSecond) file.Close() return bytesPerSecond, blocksPerSecond, true, true } func publishFsSpeed(bytesPerSecond, blocksPerSecond uint64) { tricorder.RegisterMetric("/root-read-speed", &bytesPerSecond, units.BytePerSecond, "read speed of root file-system media") tricorder.RegisterMetric("/root-block-read-speed", &blocksPerSecond, units.None, "read speed of root file-system media in blocks/second") } func getCachedNetworkSpeed(cacheFilename string) uint64 { if speed, ok := netspeed.GetSpeedToHost(""); ok { return speed } file, err := os.Open(cacheFilename) if err != nil { return 0 } defer file.Close() var bytesPerSecond uint64 n, err := fmt.Fscanf(file, "%d", &bytesPerSecond) if n == 1 || err == nil { return bytesPerSecond } return 0 } type DumpableFileSystemHistory struct { fsh *scanner.FileSystemHistory } func (fsh *DumpableFileSystemHistory) WriteHtml(writer io.Writer) { fs := fsh.fsh.FileSystem() if fs == nil { return } fmt.Fprintln(writer, "<pre>") fs.List(writer) fmt.Fprintln(writer, "</pre>") } func gracefulCleanup() { os.Remove(*pidfile) os.Exit(1) } func writePidfile() { file, err := os.Create(*pidfile) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } defer file.Close() fmt.Fprintln(file, os.Getpid()) } func main() { flag.Parse() tricorder.RegisterFlags() subdDirPathname := path.Join(*rootDir, *subdDir) workingRootDir := path.Join(subdDirPathname, "root") objectsDir := path.Join(workingRootDir, *subdDir, "objects") tmpDir := path.Join(subdDirPathname, "tmp") netbenchFilename := path.Join(subdDirPathname, "netbench") oldTriggersFilename := path.Join(subdDirPathname, "triggers.previous") if !createDirectory(workingRootDir) { os.Exit(1) } if !sanityCheck() { os.Exit(1) } if !createDirectory(tmpDir) { os.Exit(1) } if !mountTmpfs(tmpDir) { os.Exit(1) } if !unshareAndBind(workingRootDir) { os.Exit(1) } if !createDirectory(objectsDir) { os.Exit(1) } runtime.GOMAXPROCS(int(*maxThreads)) circularBuffer := logbuf.New(*logbufLines) logger := log.New(circularBuffer, "", log.LstdFlags) if err := setupserver.SetupTls(); err != nil { logger.Println(err) circularBuffer.Flush() if !*permitInsecureMode { os.Exit(1) } } bytesPerSecond, blocksPerSecond, firstScan, ok := getCachedFsSpeed( workingRootDir, tmpDir) if !ok { os.Exit(1) } publishFsSpeed(bytesPerSecond, blocksPerSecond) var configuration scanner.Configuration var err error configuration.ScanFilter, err = filter.New(scanExcludeList) if err != nil { fmt.Fprintf(os.Stderr, "Unable to set default scan exclusions\t%s\n", err) os.Exit(1) } configuration.FsScanContext = fsrateio.NewReaderContext(bytesPerSecond, blocksPerSecond, *defaultScanSpeedPercent) defaultSpeed := configuration.FsScanContext.GetContext().SpeedPercent() if firstScan { configuration.FsScanContext.GetContext().SetSpeedPercent(100) } if *showStats { fmt.Println(configuration.FsScanContext) } var fsh scanner.FileSystemHistory mainFunc := func(fsChannel <-chan *scanner.FileSystem, disableScanner func(disableScanner bool)) { networkReaderContext := rateio.NewReaderContext( getCachedNetworkSpeed(netbenchFilename), constants.DefaultNetworkSpeedPercent, &rateio.ReadMeasurer{}) configuration.NetworkReaderContext = networkReaderContext invalidateNextScanObjectCache := false rpcdHtmlWriter := rpcd.Setup(&configuration, &fsh, objectsDir, workingRootDir, networkReaderContext, netbenchFilename, oldTriggersFilename, disableScanner, func() { invalidateNextScanObjectCache = true fsh.UpdateObjectCacheOnly() }, logger) configMetricsDir, err := tricorder.RegisterDirectory("/config") if err != nil { fmt.Fprintf(os.Stderr, "Unable to create /config metrics directory\t%s\n", err) os.Exit(1) } configuration.RegisterMetrics(configMetricsDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to create config metrics\t%s\n", err) os.Exit(1) } httpd.AddHtmlWriter(rpcdHtmlWriter) httpd.AddHtmlWriter(&fsh) httpd.AddHtmlWriter(&configuration) httpd.AddHtmlWriter(circularBuffer) html.RegisterHtmlWriterForPattern("/dumpFileSystem", "Scanned File System", &DumpableFileSystemHistory{&fsh}) if err = httpd.StartServer(*portNum); err != nil { fmt.Fprintf(os.Stderr, "Unable to create http server\t%s\n", err) os.Exit(1) } fsh.Update(nil) sighupChannel := make(chan os.Signal) signal.Notify(sighupChannel, syscall.SIGHUP) sigtermChannel := make(chan os.Signal) signal.Notify(sigtermChannel, syscall.SIGTERM, syscall.SIGINT) writePidfile() for iter := 0; true; { select { case <-sighupChannel: logger.Printf("Caught SIGHUP: re-execing with: %v\n", os.Args) circularBuffer.Flush() err = syscall.Exec(os.Args[0], os.Args, os.Environ()) if err != nil { logger.Printf("Unable to Exec:%s\t%s\n", os.Args[0], err) } case <-sigtermChannel: logger.Printf("Caught SIGTERM: performing graceful cleanup\n") circularBuffer.Flush() gracefulCleanup() case fs := <-fsChannel: if *showStats { fmt.Printf("Completed cycle: %d\n", iter) } if invalidateNextScanObjectCache { fs.ScanObjectCache() invalidateNextScanObjectCache = false } fsh.Update(fs) iter++ runtime.GC() // An opportune time to take out the garbage. if *showStats { fmt.Print(fsh) fmt.Print(fsh.FileSystem()) memstats.WriteMemoryStats(os.Stdout) fmt.Println() } if firstScan { configuration.FsScanContext.GetContext().SetSpeedPercent( defaultSpeed) firstScan = false if *showStats { fmt.Println(configuration.FsScanContext) } } } } } scanner.StartScanning(workingRootDir, objectsDir, &configuration, logger, mainFunc) } Add -defaultNetworkSpeedPercent option to subd. package main import ( "flag" "fmt" "github.com/Symantec/Dominator/lib/constants" "github.com/Symantec/Dominator/lib/filter" "github.com/Symantec/Dominator/lib/flagutil" "github.com/Symantec/Dominator/lib/fsbench" "github.com/Symantec/Dominator/lib/fsrateio" "github.com/Symantec/Dominator/lib/html" "github.com/Symantec/Dominator/lib/logbuf" "github.com/Symantec/Dominator/lib/memstats" "github.com/Symantec/Dominator/lib/netspeed" "github.com/Symantec/Dominator/lib/rateio" "github.com/Symantec/Dominator/lib/srpc/setupserver" "github.com/Symantec/Dominator/sub/httpd" "github.com/Symantec/Dominator/sub/rpcd" "github.com/Symantec/Dominator/sub/scanner" "github.com/Symantec/tricorder/go/tricorder" "github.com/Symantec/tricorder/go/tricorder/units" "io" "log" "os" "os/signal" "path" "runtime" "strconv" "syscall" ) var ( defaultNetworkSpeedPercent = flag.Uint64("defaultNetworkSpeedPercent", constants.DefaultNetworkSpeedPercent, "Network speed as percentage of capacity") defaultScanSpeedPercent = flag.Uint64("defaultScanSpeedPercent", constants.DefaultScanSpeedPercent, "Scan speed as percentage of capacity") logbufLines = flag.Uint("logbufLines", 1024, "Number of lines to store in the log buffer") maxThreads = flag.Uint("maxThreads", 1, "Maximum number of parallel OS threads to use") permitInsecureMode = flag.Bool("permitInsecureMode", false, "If true, run in insecure mode. This gives remote root access to all") pidfile = flag.String("pidfile", "/var/run/subd.pid", "Name of file to write my PID to") portNum = flag.Uint("portNum", constants.SubPortNumber, "Port number to allocate and listen on for HTTP/RPC") rootDir = flag.String("rootDir", "/", "Name of root of directory tree to manage") scanExcludeList flagutil.StringList = constants.ScanExcludeList showStats = flag.Bool("showStats", false, "If true, show statistics after each cycle") subdDir = flag.String("subdDir", ".subd", "Name of subd private directory, relative to rootDir. This must be on the same file-system as rootDir") unshare = flag.Bool("unshare", true, "Internal use only.") ) func init() { runtime.LockOSThread() flag.Var(&scanExcludeList, "scanExcludeList", "Comma separated list of patterns to exclude from scanning") } func sanityCheck() bool { r_devnum, err := fsbench.GetDevnumForFile(*rootDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to get device number for: %s\t%s\n", *rootDir, err) return false } subdDirPathname := path.Join(*rootDir, *subdDir) s_devnum, err := fsbench.GetDevnumForFile(subdDirPathname) if err != nil { fmt.Fprintf(os.Stderr, "Unable to get device number for: %s\t%s\n", subdDirPathname, err) return false } if r_devnum != s_devnum { fmt.Fprintf(os.Stderr, "rootDir and subdDir must be on the same file-system\n") return false } return true } func createDirectory(dirname string) bool { if err := os.MkdirAll(dirname, 0750); err != nil { fmt.Fprintf(os.Stderr, "Unable to create directory: %s\t%s\n", dirname, err) return false } return true } func mountTmpfs(dirname string) bool { var statfs syscall.Statfs_t if err := syscall.Statfs(dirname, &statfs); err != nil { fmt.Fprintf(os.Stderr, "Unable to create Statfs: %s\t%s\n", dirname, err) return false } if statfs.Type != 0x01021994 { err := syscall.Mount("none", dirname, "tmpfs", 0, "size=65536,mode=0750") if err == nil { fmt.Printf("Mounted tmpfs on: %s\n", dirname) } else { fmt.Fprintf(os.Stderr, "Unable to mount tmpfs on: %s\t%s\n", dirname, err) return false } } return true } func unshareAndBind(workingRootDir string) bool { if *unshare { // Re-exec myself using the unshare syscall while on a locked thread. // This hack is required because syscall.Unshare() operates on only one // thread in the process, and Go switches execution between threads // randomly. Thus, the namespace can be suddenly switched for running // code. This is an aspect of Go that was not well thought out. runtime.LockOSThread() if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { fmt.Fprintf(os.Stderr, "Unable to unshare mount namesace\t%s\n", err) return false } // Ensure the process is slightly niced. Since the Linux implementation // of setpriority(2) only applies to a thread, not the whole process // (contrary to the POSIX specification), do this in the pinned OS // thread so that the whole process (after exec) will be niced. syscall.Setpriority(syscall.PRIO_PROCESS, 0, 1) args := append(os.Args, "-unshare=false") if err := syscall.Exec(args[0], args, os.Environ()); err != nil { fmt.Fprintf(os.Stderr, "Unable to Exec:%s\t%s\n", args[0], err) return false } } err := syscall.Mount("none", "/", "", syscall.MS_REC|syscall.MS_PRIVATE, "") if err != nil { fmt.Fprintf(os.Stderr, "Unable to set mount sharing to private\t%s\n", err) return false } syscall.Unmount(workingRootDir, 0) err = syscall.Mount(*rootDir, workingRootDir, "", syscall.MS_BIND, "") if err != nil { fmt.Fprintf(os.Stderr, "Unable to bind mount %s to %s\t%s\n", *rootDir, workingRootDir, err) return false } // Clean up -unshare=false so that a subsequent re-exec starts from scratch. args := make([]string, 0, len(os.Args)-1) for _, arg := range os.Args { if arg != "-unshare=false" { args = append(args, arg) } } os.Args = args return true } func getCachedFsSpeed(workingRootDir string, cacheDirname string) (bytesPerSecond, blocksPerSecond uint64, computed, ok bool) { bytesPerSecond = 0 blocksPerSecond = 0 devnum, err := fsbench.GetDevnumForFile(workingRootDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to get device number for: %s\t%s\n", workingRootDir, err) return 0, 0, false, false } fsbenchDir := path.Join(cacheDirname, "fsbench") if !createDirectory(fsbenchDir) { return 0, 0, false, false } cacheFilename := path.Join(fsbenchDir, strconv.FormatUint(devnum, 16)) file, err := os.Open(cacheFilename) if err == nil { n, err := fmt.Fscanf(file, "%d %d", &bytesPerSecond, &blocksPerSecond) file.Close() if n == 2 || err == nil { return bytesPerSecond, blocksPerSecond, false, true } } bytesPerSecond, blocksPerSecond, err = fsbench.GetReadSpeed(workingRootDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to measure read speed\t%s\n", err) return 0, 0, true, false } file, err = os.Create(cacheFilename) if err != nil { fmt.Fprintf(os.Stderr, "Unable to open: %s for write\t%s\n", cacheFilename, err) return 0, 0, true, false } fmt.Fprintf(file, "%d %d\n", bytesPerSecond, blocksPerSecond) file.Close() return bytesPerSecond, blocksPerSecond, true, true } func publishFsSpeed(bytesPerSecond, blocksPerSecond uint64) { tricorder.RegisterMetric("/root-read-speed", &bytesPerSecond, units.BytePerSecond, "read speed of root file-system media") tricorder.RegisterMetric("/root-block-read-speed", &blocksPerSecond, units.None, "read speed of root file-system media in blocks/second") } func getCachedNetworkSpeed(cacheFilename string) uint64 { if speed, ok := netspeed.GetSpeedToHost(""); ok { return speed } file, err := os.Open(cacheFilename) if err != nil { return 0 } defer file.Close() var bytesPerSecond uint64 n, err := fmt.Fscanf(file, "%d", &bytesPerSecond) if n == 1 || err == nil { return bytesPerSecond } return 0 } type DumpableFileSystemHistory struct { fsh *scanner.FileSystemHistory } func (fsh *DumpableFileSystemHistory) WriteHtml(writer io.Writer) { fs := fsh.fsh.FileSystem() if fs == nil { return } fmt.Fprintln(writer, "<pre>") fs.List(writer) fmt.Fprintln(writer, "</pre>") } func gracefulCleanup() { os.Remove(*pidfile) os.Exit(1) } func writePidfile() { file, err := os.Create(*pidfile) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } defer file.Close() fmt.Fprintln(file, os.Getpid()) } func main() { flag.Parse() tricorder.RegisterFlags() subdDirPathname := path.Join(*rootDir, *subdDir) workingRootDir := path.Join(subdDirPathname, "root") objectsDir := path.Join(workingRootDir, *subdDir, "objects") tmpDir := path.Join(subdDirPathname, "tmp") netbenchFilename := path.Join(subdDirPathname, "netbench") oldTriggersFilename := path.Join(subdDirPathname, "triggers.previous") if !createDirectory(workingRootDir) { os.Exit(1) } if !sanityCheck() { os.Exit(1) } if !createDirectory(tmpDir) { os.Exit(1) } if !mountTmpfs(tmpDir) { os.Exit(1) } if !unshareAndBind(workingRootDir) { os.Exit(1) } if !createDirectory(objectsDir) { os.Exit(1) } runtime.GOMAXPROCS(int(*maxThreads)) circularBuffer := logbuf.New(*logbufLines) logger := log.New(circularBuffer, "", log.LstdFlags) if err := setupserver.SetupTls(); err != nil { logger.Println(err) circularBuffer.Flush() if !*permitInsecureMode { os.Exit(1) } } bytesPerSecond, blocksPerSecond, firstScan, ok := getCachedFsSpeed( workingRootDir, tmpDir) if !ok { os.Exit(1) } publishFsSpeed(bytesPerSecond, blocksPerSecond) var configuration scanner.Configuration var err error configuration.ScanFilter, err = filter.New(scanExcludeList) if err != nil { fmt.Fprintf(os.Stderr, "Unable to set default scan exclusions\t%s\n", err) os.Exit(1) } configuration.FsScanContext = fsrateio.NewReaderContext(bytesPerSecond, blocksPerSecond, *defaultScanSpeedPercent) defaultSpeed := configuration.FsScanContext.GetContext().SpeedPercent() if firstScan { configuration.FsScanContext.GetContext().SetSpeedPercent(100) } if *showStats { fmt.Println(configuration.FsScanContext) } var fsh scanner.FileSystemHistory mainFunc := func(fsChannel <-chan *scanner.FileSystem, disableScanner func(disableScanner bool)) { networkReaderContext := rateio.NewReaderContext( getCachedNetworkSpeed(netbenchFilename), *defaultNetworkSpeedPercent, &rateio.ReadMeasurer{}) configuration.NetworkReaderContext = networkReaderContext invalidateNextScanObjectCache := false rpcdHtmlWriter := rpcd.Setup(&configuration, &fsh, objectsDir, workingRootDir, networkReaderContext, netbenchFilename, oldTriggersFilename, disableScanner, func() { invalidateNextScanObjectCache = true fsh.UpdateObjectCacheOnly() }, logger) configMetricsDir, err := tricorder.RegisterDirectory("/config") if err != nil { fmt.Fprintf(os.Stderr, "Unable to create /config metrics directory\t%s\n", err) os.Exit(1) } configuration.RegisterMetrics(configMetricsDir) if err != nil { fmt.Fprintf(os.Stderr, "Unable to create config metrics\t%s\n", err) os.Exit(1) } httpd.AddHtmlWriter(rpcdHtmlWriter) httpd.AddHtmlWriter(&fsh) httpd.AddHtmlWriter(&configuration) httpd.AddHtmlWriter(circularBuffer) html.RegisterHtmlWriterForPattern("/dumpFileSystem", "Scanned File System", &DumpableFileSystemHistory{&fsh}) if err = httpd.StartServer(*portNum); err != nil { fmt.Fprintf(os.Stderr, "Unable to create http server\t%s\n", err) os.Exit(1) } fsh.Update(nil) sighupChannel := make(chan os.Signal) signal.Notify(sighupChannel, syscall.SIGHUP) sigtermChannel := make(chan os.Signal) signal.Notify(sigtermChannel, syscall.SIGTERM, syscall.SIGINT) writePidfile() for iter := 0; true; { select { case <-sighupChannel: logger.Printf("Caught SIGHUP: re-execing with: %v\n", os.Args) circularBuffer.Flush() err = syscall.Exec(os.Args[0], os.Args, os.Environ()) if err != nil { logger.Printf("Unable to Exec:%s\t%s\n", os.Args[0], err) } case <-sigtermChannel: logger.Printf("Caught SIGTERM: performing graceful cleanup\n") circularBuffer.Flush() gracefulCleanup() case fs := <-fsChannel: if *showStats { fmt.Printf("Completed cycle: %d\n", iter) } if invalidateNextScanObjectCache { fs.ScanObjectCache() invalidateNextScanObjectCache = false } fsh.Update(fs) iter++ runtime.GC() // An opportune time to take out the garbage. if *showStats { fmt.Print(fsh) fmt.Print(fsh.FileSystem()) memstats.WriteMemoryStats(os.Stdout) fmt.Println() } if firstScan { configuration.FsScanContext.GetContext().SetSpeedPercent( defaultSpeed) firstScan = false if *showStats { fmt.Println(configuration.FsScanContext) } } } } } scanner.StartScanning(workingRootDir, objectsDir, &configuration, logger, mainFunc) }
package main import ( "fmt" "io" "net/http" "os" log "github.com/Sirupsen/logrus" "github.com/cheggaaa/pb" "github.com/codegangsta/cli" "github.com/mattn/go-isatty" "github.com/otium/ytdl" ) type options struct { noProgress bool outputFile string infoOnly bool silent bool debug bool append bool filters []string downloadURL bool byteRange string } func main() { app := cli.NewApp() app.Name = "ytdl" app.HelpName = "ytdl" // Set our own custom args usage app.ArgsUsage = "[youtube url or video id]" app.Usage = "Download youtube videos" app.HideHelp = true app.Version = "0.1.0" app.Flags = []cli.Flag{ cli.HelpFlag, cli.StringFlag{ Name: "output, o", Usage: "Write output to a file, passing - outputs to stdout", Value: "{{.Title}}.{{.Ext}}", }, cli.BoolFlag{ Name: "info, i", Usage: "Only output video info", }, cli.BoolFlag{ Name: "no-progress", Usage: "Disable the progress bar", }, cli.BoolFlag{ Name: "silent, s", Usage: "Only output errors, also disables progress bar", }, cli.BoolFlag{ Name: "debug, d", Usage: "Output debug log", }, cli.BoolFlag{ Name: "append, a", Usage: "Append to output file instead of overwriting", }, cli.StringSliceFlag{ Name: "filter, f", Usage: "Filter available formats, syntax: [format_key]:val1,val2", Value: &cli.StringSlice{ fmt.Sprintf("%s:mp4", ytdl.FormatExtensionKey), fmt.Sprintf("%s:1080p,720p,480p,360p,240p,144p", ytdl.FormatResolutionKey), fmt.Sprintf("!%s:nil", ytdl.FormatVideoEncodingKey), fmt.Sprintf("!%s:nil", ytdl.FormatAudioEncodingKey), }, }, cli.StringFlag{ Name: "range, r", Usage: "Download a specific range of bytes of the video, [start]-[end]", }, cli.BoolFlag{ Name: "download-url, u", Usage: "Prints download url to stdout", }, } app.Action = func(c *cli.Context) { identifier := c.Args().First() if identifier == "" || c.Bool("help") { cli.ShowAppHelp(c) } else { options := options{ noProgress: c.Bool("no-progress"), outputFile: c.String("output"), infoOnly: c.Bool("info"), silent: c.Bool("silent"), debug: c.Bool("debug"), append: c.Bool("append"), filters: c.StringSlice("filter"), downloadURL: c.Bool("download-url"), byteRange: c.String("range"), } handler(identifier, options) } } app.Run(os.Args) } func handler(identifier string, options options) { var err error defer func() { if err != nil { log.SetOutput(os.Stderr) log.Fatal(err.Error()) } }() var out io.Writer var logOut io.Writer = os.Stdout // if downloading to stdout, set log output to stderr, not sure if this is correct if options.outputFile == "-" { out = os.Stdout logOut = os.Stderr } log.SetOutput(logOut) // ouput only errors or not silent := options.outputFile == "" || options.silent || options.infoOnly || options.downloadURL if silent { log.SetLevel(log.FatalLevel) } else if options.debug { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } // TODO: Show activity indicator log.Info("Fetching video info...") //fmt.Print("\u001b[0G") //fmt.Print("\u001b[2K") info, err := ytdl.GetInfo(identifier) if err != nil { err = fmt.Errorf("Unable to fetch video info: %s", err.Error()) return } // TODO: Get more info, and change structure // TODO: Allow json output if options.infoOnly { fmt.Println("Title:", info.Title) fmt.Println("Author:", info.Author) fmt.Println("Date Published:", info.DatePublished.Format("Jan 2 2006")) fmt.Println("Duration:", info.Duration) return } formats := info.Formats // parse filter arguments, and filter through formats for _, filter := range options.filters { key, values, exclude, err := parseFilter(filter) if err == nil { if exclude { formats = ytdl.FilterFormatsExclude(formats, ytdl.FormatKey(key), values) } else { formats = ytdl.FilterFormats(formats, ytdl.FormatKey(key), values) } } } if len(formats) == 0 { err = fmt.Errorf("No formats available that match criteria") return } format := formats[0] downloadURL, err := info.GetDownloadURL(format) if err != nil { err = fmt.Errorf("Unable to get download url: %s", err.Error()) return } if options.downloadURL { fmt.Print(downloadURL.String()) // print new line character if outputing to terminal if isatty.IsTerminal(os.Stdout.Fd()) { fmt.Println() } return } if out == nil { var fileName string fileName, err = createFileName(options.outputFile, outputFileName{ Title: info.Title, Ext: format[ytdl.FormatExtensionKey].(string), DatePublished: info.DatePublished.Format("2006-01-02"), Resolution: format[ytdl.FormatResolutionKey].(string), Author: info.Author, Duration: info.Duration.String(), }) if err != nil { err = fmt.Errorf("Unable to parse output file file name: %s", err.Error()) return } // Create file truncate if append flag is not set flags := os.O_CREATE | os.O_WRONLY if options.append { flags |= os.O_APPEND } else { flags |= os.O_TRUNC } var f *os.File // open as write only f, err = os.OpenFile(fileName, flags, 0666) if err != nil { err = fmt.Errorf("Unable to open output file: %s", err.Error()) return } defer f.Close() out = f } log.Info("Downloading to ", out.(*os.File).Name()) var req *http.Request req, err = http.NewRequest("GET", downloadURL.String(), nil) // if byte range flag is set, use http range header option if options.byteRange != "" { req.Header.Set("Range", "bytes="+options.byteRange) } resp, err := http.DefaultClient.Do(req) if err != nil || resp.StatusCode < 200 || resp.StatusCode >= 300 { if err == nil { err = fmt.Errorf("Received status code %d from download url", resp.StatusCode) } err = fmt.Errorf("Unable to start download: %s", err.Error()) return } defer resp.Body.Close() // if we aren't in silent mode or the no progress flag wasn't set, // initialize progress bar if !silent && !options.noProgress { progressBar := pb.New64(resp.ContentLength) progressBar.SetUnits(pb.U_BYTES) progressBar.ShowTimeLeft = true progressBar.ShowSpeed = true // progressBar.RefreshRate = time.Millisecond * 1 progressBar.Output = logOut progressBar.Start() defer progressBar.Finish() out = io.MultiWriter(out, progressBar) } _, err = io.Copy(out, resp.Body) } Added output info json option package main import ( "fmt" "io" "net/http" "os" "encoding/json" log "github.com/Sirupsen/logrus" "github.com/cheggaaa/pb" "github.com/codegangsta/cli" "github.com/mattn/go-isatty" "github.com/otium/ytdl" ) type options struct { noProgress bool outputFile string infoOnly bool silent bool debug bool append bool filters []string downloadURL bool byteRange string json bool } func main() { app := cli.NewApp() app.Name = "ytdl" app.HelpName = "ytdl" // Set our own custom args usage app.ArgsUsage = "[youtube url or video id]" app.Usage = "Download youtube videos" app.HideHelp = true app.Version = "0.1.0" app.Flags = []cli.Flag{ cli.HelpFlag, cli.StringFlag{ Name: "output, o", Usage: "Write output to a file, passing - outputs to stdout", Value: "{{.Title}}.{{.Ext}}", }, cli.BoolFlag{ Name: "info, i", Usage: "Only output video info", }, cli.BoolFlag{ Name: "no-progress", Usage: "Disable the progress bar", }, cli.BoolFlag{ Name: "silent, s", Usage: "Only output errors, also disables progress bar", }, cli.BoolFlag{ Name: "debug, d", Usage: "Output debug log", }, cli.BoolFlag{ Name: "append, a", Usage: "Append to output file instead of overwriting", }, cli.StringSliceFlag{ Name: "filter, f", Usage: "Filter available formats, syntax: [format_key]:val1,val2", Value: &cli.StringSlice{ fmt.Sprintf("%s:mp4", ytdl.FormatExtensionKey), fmt.Sprintf("%s:1080p,720p,480p,360p,240p,144p", ytdl.FormatResolutionKey), fmt.Sprintf("!%s:nil", ytdl.FormatVideoEncodingKey), fmt.Sprintf("!%s:nil", ytdl.FormatAudioEncodingKey), }, }, cli.StringFlag{ Name: "range, r", Usage: "Download a specific range of bytes of the video, [start]-[end]", }, cli.BoolFlag{ Name: "download-url, u", Usage: "Prints download url to stdout", }, cli.BoolFlag{ Name: "json, j", Usage: "Print info json to stdout", }, } app.Action = func(c *cli.Context) { identifier := c.Args().First() if identifier == "" || c.Bool("help") { cli.ShowAppHelp(c) } else { options := options{ noProgress: c.Bool("no-progress"), outputFile: c.String("output"), infoOnly: c.Bool("info"), silent: c.Bool("silent"), debug: c.Bool("debug"), append: c.Bool("append"), filters: c.StringSlice("filter"), downloadURL: c.Bool("download-url"), byteRange: c.String("range"), json: c.Bool("json"), } handler(identifier, options) } } app.Run(os.Args) } func handler(identifier string, options options) { var err error defer func() { if err != nil { log.SetOutput(os.Stderr) log.Fatal(err.Error()) } }() var out io.Writer var logOut io.Writer = os.Stdout // if downloading to stdout, set log output to stderr, not sure if this is correct if options.outputFile == "-" { out = os.Stdout logOut = os.Stderr } log.SetOutput(logOut) // ouput only errors or not silent := options.outputFile == "" || options.silent || options.infoOnly || options.downloadURL || options.json if silent { log.SetLevel(log.FatalLevel) } else if options.debug { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } // TODO: Show activity indicator log.Info("Fetching video info...") //fmt.Print("\u001b[0G") //fmt.Print("\u001b[2K") info, err := ytdl.GetInfo(identifier) if err != nil { err = fmt.Errorf("Unable to fetch video info: %s", err.Error()) return } // TODO: Get more info, and change structure // TODO: Allow json output if options.infoOnly { fmt.Println("Title:", info.Title) fmt.Println("Author:", info.Author) fmt.Println("Date Published:", info.DatePublished.Format("Jan 2 2006")) fmt.Println("Duration:", info.Duration) return } else if options.json { var data []byte data, err = json.MarshalIndent(info, "", "\t") if err != nil { err = fmt.Errorf("Unable to marshal json: %s", err.Error()) return } fmt.Println(string(data)) return } formats := info.Formats // parse filter arguments, and filter through formats for _, filter := range options.filters { key, values, exclude, err := parseFilter(filter) if err == nil { if exclude { formats = ytdl.FilterFormatsExclude(formats, ytdl.FormatKey(key), values) } else { formats = ytdl.FilterFormats(formats, ytdl.FormatKey(key), values) } } } if len(formats) == 0 { err = fmt.Errorf("No formats available that match criteria") return } format := formats[0] downloadURL, err := info.GetDownloadURL(format) if err != nil { err = fmt.Errorf("Unable to get download url: %s", err.Error()) return } if options.downloadURL { fmt.Print(downloadURL.String()) // print new line character if outputing to terminal if isatty.IsTerminal(os.Stdout.Fd()) { fmt.Println() } return } if out == nil { var fileName string fileName, err = createFileName(options.outputFile, outputFileName{ Title: info.Title, Ext: format[ytdl.FormatExtensionKey].(string), DatePublished: info.DatePublished.Format("2006-01-02"), Resolution: format[ytdl.FormatResolutionKey].(string), Author: info.Author, Duration: info.Duration.String(), }) if err != nil { err = fmt.Errorf("Unable to parse output file file name: %s", err.Error()) return } // Create file truncate if append flag is not set flags := os.O_CREATE | os.O_WRONLY if options.append { flags |= os.O_APPEND } else { flags |= os.O_TRUNC } var f *os.File // open as write only f, err = os.OpenFile(fileName, flags, 0666) if err != nil { err = fmt.Errorf("Unable to open output file: %s", err.Error()) return } defer f.Close() out = f } log.Info("Downloading to ", out.(*os.File).Name()) var req *http.Request req, err = http.NewRequest("GET", downloadURL.String(), nil) // if byte range flag is set, use http range header option if options.byteRange != "" { req.Header.Set("Range", "bytes="+options.byteRange) } resp, err := http.DefaultClient.Do(req) if err != nil || resp.StatusCode < 200 || resp.StatusCode >= 300 { if err == nil { err = fmt.Errorf("Received status code %d from download url", resp.StatusCode) } err = fmt.Errorf("Unable to start download: %s", err.Error()) return } defer resp.Body.Close() // if we aren't in silent mode or the no progress flag wasn't set, // initialize progress bar if !silent && !options.noProgress { progressBar := pb.New64(resp.ContentLength) progressBar.SetUnits(pb.U_BYTES) progressBar.ShowTimeLeft = true progressBar.ShowSpeed = true // progressBar.RefreshRate = time.Millisecond * 1 progressBar.Output = logOut progressBar.Start() defer progressBar.Finish() out = io.MultiWriter(out, progressBar) } _, err = io.Copy(out, resp.Body) }
package quic import ( "bytes" "context" "crypto/tls" "errors" "net" "reflect" "time" "github.com/lucas-clemente/quic-go/internal/crypto" "github.com/lucas-clemente/quic-go/internal/handshake" "github.com/lucas-clemente/quic-go/internal/protocol" "github.com/lucas-clemente/quic-go/internal/testdata" "github.com/lucas-clemente/quic-go/internal/utils" "github.com/lucas-clemente/quic-go/internal/wire" "github.com/lucas-clemente/quic-go/qerr" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) type mockSession struct { connectionID protocol.ConnectionID packetCount int closed bool closeReason error closedRemote bool stopRunLoop chan struct{} // run returns as soon as this channel receives a value handshakeChan chan error } func (s *mockSession) handlePacket(*receivedPacket) { s.packetCount++ } func (s *mockSession) run() error { <-s.stopRunLoop return s.closeReason } func (s *mockSession) Close(e error) error { if s.closed { return nil } s.closeReason = e s.closed = true close(s.stopRunLoop) return nil } func (s *mockSession) closeRemote(e error) { s.closeReason = e s.closed = true s.closedRemote = true close(s.stopRunLoop) } func (s *mockSession) OpenStream() (Stream, error) { return &stream{}, nil } func (s *mockSession) AcceptStream() (Stream, error) { panic("not implemented") } func (s *mockSession) AcceptUniStream() (ReceiveStream, error) { panic("not implemented") } func (s *mockSession) OpenStreamSync() (Stream, error) { panic("not implemented") } func (s *mockSession) OpenUniStream() (SendStream, error) { panic("not implemented") } func (s *mockSession) OpenUniStreamSync() (SendStream, error) { panic("not implemented") } func (s *mockSession) LocalAddr() net.Addr { panic("not implemented") } func (s *mockSession) RemoteAddr() net.Addr { panic("not implemented") } func (*mockSession) Context() context.Context { panic("not implemented") } func (*mockSession) ConnectionState() ConnectionState { panic("not implemented") } func (*mockSession) GetVersion() protocol.VersionNumber { return protocol.VersionWhatever } func (s *mockSession) handshakeStatus() <-chan error { return s.handshakeChan } func (*mockSession) getCryptoStream() cryptoStreamI { panic("not implemented") } var _ Session = &mockSession{} func newMockSession( _ connection, _ protocol.VersionNumber, connectionID protocol.ConnectionID, _ *handshake.ServerConfig, _ *tls.Config, _ *Config, ) (packetHandler, error) { s := mockSession{ connectionID: connectionID, handshakeChan: make(chan error), stopRunLoop: make(chan struct{}), } return &s, nil } var _ = Describe("Server", func() { var ( conn *mockPacketConn config *Config udpAddr = &net.UDPAddr{IP: net.IPv4(192, 168, 100, 200), Port: 1337} ) BeforeEach(func() { conn = newMockPacketConn() conn.addr = &net.UDPAddr{} config = &Config{Versions: protocol.SupportedVersions} }) Context("with mock session", func() { var ( serv *server firstPacket []byte // a valid first packet for a new connection with connectionID 0x4cfa9f9b668619f6 (= connID) connID = protocol.ConnectionID(0x4cfa9f9b668619f6) ) BeforeEach(func() { serv = &server{ sessions: make(map[protocol.ConnectionID]packetHandler), newSession: newMockSession, conn: conn, config: config, sessionQueue: make(chan Session, 5), errorChan: make(chan struct{}), } b := &bytes.Buffer{} utils.BigEndian.WriteUint32(b, uint32(protocol.SupportedVersions[0])) firstPacket = []byte{0x09, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6} firstPacket = append(append(firstPacket, b.Bytes()...), 0x01) firstPacket = append(firstPacket, bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)...) // add padding }) It("setups with the right values", func() { config := &Config{ HandshakeTimeout: 1337 * time.Minute, IdleTimeout: 42 * time.Hour, RequestConnectionIDOmission: true, MaxIncomingStreams: 1234, MaxIncomingUniStreams: 4321, } c := populateServerConfig(config) Expect(c.HandshakeTimeout).To(Equal(1337 * time.Minute)) Expect(c.IdleTimeout).To(Equal(42 * time.Hour)) Expect(c.RequestConnectionIDOmission).To(BeFalse()) Expect(c.MaxIncomingStreams).To(Equal(1234)) Expect(c.MaxIncomingUniStreams).To(Equal(4321)) }) It("disables bidirectional streams", func() { config := &Config{ MaxIncomingStreams: -1, MaxIncomingUniStreams: 4321, } c := populateServerConfig(config) Expect(c.MaxIncomingStreams).To(BeZero()) Expect(c.MaxIncomingUniStreams).To(Equal(4321)) }) It("disables unidirectional streams", func() { config := &Config{ MaxIncomingStreams: 1234, MaxIncomingUniStreams: -1, } c := populateServerConfig(config) Expect(c.MaxIncomingStreams).To(Equal(1234)) Expect(c.MaxIncomingUniStreams).To(BeZero()) }) It("returns the address", func() { conn.addr = &net.UDPAddr{ IP: net.IPv4(192, 168, 13, 37), Port: 1234, } Expect(serv.Addr().String()).To(Equal("192.168.13.37:1234")) }) It("creates new sessions", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) sess := serv.sessions[connID].(*mockSession) Expect(sess.connectionID).To(Equal(connID)) Expect(sess.packetCount).To(Equal(1)) }) It("accepts new TLS sessions", func() { connID := protocol.ConnectionID(0x12345) sess, err := newMockSession(nil, protocol.VersionTLS, connID, nil, nil, nil) Expect(err).ToNot(HaveOccurred()) err = serv.setupTLS() Expect(err).ToNot(HaveOccurred()) serv.serverTLS.sessionChan <- tlsSession{ connID: connID, sess: sess, } Eventually(func() packetHandler { serv.sessionsMutex.Lock() defer serv.sessionsMutex.Unlock() return serv.sessions[connID] }).Should(Equal(sess)) }) It("only accepts one new TLS sessions for one connection ID", func() { connID := protocol.ConnectionID(0x12345) sess1, err := newMockSession(nil, protocol.VersionTLS, connID, nil, nil, nil) Expect(err).ToNot(HaveOccurred()) sess2, err := newMockSession(nil, protocol.VersionTLS, connID, nil, nil, nil) Expect(err).ToNot(HaveOccurred()) err = serv.setupTLS() Expect(err).ToNot(HaveOccurred()) serv.serverTLS.sessionChan <- tlsSession{ connID: connID, sess: sess1, } Eventually(func() packetHandler { serv.sessionsMutex.Lock() defer serv.sessionsMutex.Unlock() return serv.sessions[connID] }).Should(Equal(sess1)) serv.serverTLS.sessionChan <- tlsSession{ connID: connID, sess: sess2, } Eventually(func() packetHandler { serv.sessionsMutex.Lock() defer serv.sessionsMutex.Unlock() return serv.sessions[connID] }).Should(Equal(sess1)) }) It("accepts a session once the connection it is forward secure", func(done Done) { var acceptedSess Session go func() { defer GinkgoRecover() var err error acceptedSess, err = serv.Accept() Expect(err).ToNot(HaveOccurred()) }() err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) sess := serv.sessions[connID].(*mockSession) Consistently(func() Session { return acceptedSess }).Should(BeNil()) close(sess.handshakeChan) Eventually(func() Session { return acceptedSess }).Should(Equal(sess)) close(done) }, 0.5) It("doesn't accept session that error during the handshake", func(done Done) { var accepted bool go func() { defer GinkgoRecover() serv.Accept() accepted = true }() err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) sess := serv.sessions[connID].(*mockSession) sess.handshakeChan <- errors.New("handshake failed") Consistently(func() bool { return accepted }).Should(BeFalse()) close(done) }) It("assigns packets to existing sessions", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) err = serv.handlePacket(nil, nil, []byte{0x08, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6, 0x01}) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).connectionID).To(Equal(connID)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(2)) }) It("closes and deletes sessions", func() { serv.deleteClosedSessionsAfter = time.Second // make sure that the nil value for the closed session doesn't get deleted in this test nullAEAD, err := crypto.NewNullAEAD(protocol.PerspectiveServer, connID, protocol.VersionWhatever) Expect(err).ToNot(HaveOccurred()) err = serv.handlePacket(nil, nil, append(firstPacket, nullAEAD.Seal(nil, nil, 0, firstPacket)...)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID]).ToNot(BeNil()) // make session.run() return serv.sessions[connID].(*mockSession).stopRunLoop <- struct{}{} // The server should now have closed the session, leaving a nil value in the sessions map Consistently(func() map[protocol.ConnectionID]packetHandler { return serv.sessions }).Should(HaveLen(1)) Expect(serv.sessions[connID]).To(BeNil()) }) It("deletes nil session entries after a wait time", func() { serv.deleteClosedSessionsAfter = 25 * time.Millisecond nullAEAD, err := crypto.NewNullAEAD(protocol.PerspectiveServer, connID, protocol.VersionWhatever) Expect(err).ToNot(HaveOccurred()) err = serv.handlePacket(nil, nil, append(firstPacket, nullAEAD.Seal(nil, nil, 0, firstPacket)...)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions).To(HaveKey(connID)) // make session.run() return serv.sessions[connID].(*mockSession).stopRunLoop <- struct{}{} Eventually(func() bool { serv.sessionsMutex.Lock() _, ok := serv.sessions[connID] serv.sessionsMutex.Unlock() return ok }).Should(BeFalse()) }) It("closes sessions and the connection when Close is called", func() { go serv.serve() session, _ := newMockSession(nil, 0, 0, nil, nil, nil) serv.sessions[1] = session err := serv.Close() Expect(err).NotTo(HaveOccurred()) Expect(session.(*mockSession).closed).To(BeTrue()) Expect(conn.closed).To(BeTrue()) }) It("ignores packets for closed sessions", func() { serv.sessions[connID] = nil err := serv.handlePacket(nil, nil, []byte{0x08, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6, 0x01}) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID]).To(BeNil()) }) It("works if no quic.Config is given", func(done Done) { ln, err := ListenAddr("127.0.0.1:0", testdata.GetTLSConfig(), nil) Expect(err).ToNot(HaveOccurred()) Expect(ln.Close()).To(Succeed()) close(done) }, 1) It("closes properly", func() { ln, err := ListenAddr("127.0.0.1:0", testdata.GetTLSConfig(), config) Expect(err).ToNot(HaveOccurred()) var returned bool go func() { defer GinkgoRecover() _, err := ln.Accept() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("use of closed network connection")) returned = true }() ln.Close() Eventually(func() bool { return returned }).Should(BeTrue()) }) It("errors when encountering a connection error", func(done Done) { testErr := errors.New("connection error") conn.readErr = testErr go serv.serve() _, err := serv.Accept() Expect(err).To(MatchError(testErr)) Expect(serv.Close()).To(Succeed()) close(done) }, 0.5) It("closes all sessions when encountering a connection error", func() { session, _ := newMockSession(nil, 0, 0, nil, nil, nil) serv.sessions[0x12345] = session Expect(serv.sessions[0x12345].(*mockSession).closed).To(BeFalse()) testErr := errors.New("connection error") conn.readErr = testErr go serv.serve() Eventually(func() Session { return serv.sessions[connID] }).Should(BeNil()) Eventually(func() bool { return session.(*mockSession).closed }).Should(BeTrue()) Expect(serv.Close()).To(Succeed()) }) It("ignores delayed packets with mismatching versions", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) b := &bytes.Buffer{} // add an unsupported version data := []byte{0x09, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6} utils.BigEndian.WriteUint32(b, uint32(protocol.SupportedVersions[0]+1)) data = append(append(data, b.Bytes()...), 0x01) err = serv.handlePacket(nil, nil, data) Expect(err).ToNot(HaveOccurred()) // if we didn't ignore the packet, the server would try to send a version negotiation packet, which would make the test panic because it doesn't have a udpConn Expect(conn.dataWritten.Bytes()).To(BeEmpty()) // make sure the packet was *not* passed to session.handlePacket() Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) }) It("errors on invalid public header", func() { err := serv.handlePacket(nil, nil, nil) Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.InvalidPacketHeader)) }) It("ignores public resets for unknown connections", func() { err := serv.handlePacket(nil, nil, wire.WritePublicReset(999, 1, 1337)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(BeEmpty()) }) It("ignores public resets for known connections", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) err = serv.handlePacket(nil, nil, wire.WritePublicReset(connID, 1, 1337)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) }) It("ignores invalid public resets for known connections", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) data := wire.WritePublicReset(connID, 1, 1337) err = serv.handlePacket(nil, nil, data[:len(data)-2]) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) }) It("doesn't try to process a packet after sending a gQUIC Version Negotiation Packet", func() { config.Versions = []protocol.VersionNumber{99} b := &bytes.Buffer{} hdr := wire.Header{ VersionFlag: true, ConnectionID: 0x1337, PacketNumber: 1, PacketNumberLen: protocol.PacketNumberLen2, } hdr.Write(b, protocol.PerspectiveClient, 13 /* not a valid QUIC version */) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)) // add a fake CHLO err := serv.handlePacket(conn, nil, b.Bytes()) Expect(conn.dataWritten.Bytes()).ToNot(BeEmpty()) Expect(err).ToNot(HaveOccurred()) }) It("doesn't respond with a version negotiation packet if the first packet is too small", func() { b := &bytes.Buffer{} hdr := wire.Header{ VersionFlag: true, ConnectionID: 0x1337, PacketNumber: 1, PacketNumberLen: protocol.PacketNumberLen2, } hdr.Write(b, protocol.PerspectiveClient, 13 /* not a valid QUIC version */) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize-1)) // this packet is 1 byte too small err := serv.handlePacket(conn, udpAddr, b.Bytes()) Expect(err).To(MatchError("dropping small packet with unknown version")) Expect(conn.dataWritten.Len()).Should(BeZero()) }) }) It("setups with the right values", func() { supportedVersions := []protocol.VersionNumber{protocol.VersionTLS, protocol.Version39} acceptCookie := func(_ net.Addr, _ *Cookie) bool { return true } config := Config{ Versions: supportedVersions, AcceptCookie: acceptCookie, HandshakeTimeout: 1337 * time.Hour, IdleTimeout: 42 * time.Minute, KeepAlive: true, } ln, err := Listen(conn, &tls.Config{}, &config) Expect(err).ToNot(HaveOccurred()) server := ln.(*server) Expect(server.deleteClosedSessionsAfter).To(Equal(protocol.ClosedSessionDeleteTimeout)) Expect(server.sessions).ToNot(BeNil()) Expect(server.scfg).ToNot(BeNil()) Expect(server.config.Versions).To(Equal(supportedVersions)) Expect(server.config.HandshakeTimeout).To(Equal(1337 * time.Hour)) Expect(server.config.IdleTimeout).To(Equal(42 * time.Minute)) Expect(reflect.ValueOf(server.config.AcceptCookie)).To(Equal(reflect.ValueOf(acceptCookie))) Expect(server.config.KeepAlive).To(BeTrue()) }) It("errors when the Config contains an invalid version", func() { version := protocol.VersionNumber(0x1234) _, err := Listen(conn, &tls.Config{}, &Config{Versions: []protocol.VersionNumber{version}}) Expect(err).To(MatchError("0x1234 is not a valid QUIC version")) }) It("fills in default values if options are not set in the Config", func() { ln, err := Listen(conn, &tls.Config{}, &Config{}) Expect(err).ToNot(HaveOccurred()) server := ln.(*server) Expect(server.config.Versions).To(Equal(protocol.SupportedVersions)) Expect(server.config.HandshakeTimeout).To(Equal(protocol.DefaultHandshakeTimeout)) Expect(server.config.IdleTimeout).To(Equal(protocol.DefaultIdleTimeout)) Expect(reflect.ValueOf(server.config.AcceptCookie)).To(Equal(reflect.ValueOf(defaultAcceptCookie))) Expect(server.config.KeepAlive).To(BeFalse()) }) It("listens on a given address", func() { addr := "127.0.0.1:13579" ln, err := ListenAddr(addr, nil, config) Expect(err).ToNot(HaveOccurred()) serv := ln.(*server) Expect(serv.Addr().String()).To(Equal(addr)) }) It("errors if given an invalid address", func() { addr := "127.0.0.1" _, err := ListenAddr(addr, nil, config) Expect(err).To(BeAssignableToTypeOf(&net.AddrError{})) }) It("errors if given an invalid address", func() { addr := "1.1.1.1:1111" _, err := ListenAddr(addr, nil, config) Expect(err).To(BeAssignableToTypeOf(&net.OpError{})) }) It("sends a gQUIC Version Negotaion Packet, if the client sent a gQUIC Public Header", func() { b := &bytes.Buffer{} hdr := wire.Header{ VersionFlag: true, ConnectionID: 0x1337, PacketNumber: 1, PacketNumberLen: protocol.PacketNumberLen2, } hdr.Write(b, protocol.PerspectiveClient, 13 /* not a valid QUIC version */) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)) // add a fake CHLO conn.dataToRead <- b.Bytes() conn.dataReadFrom = udpAddr ln, err := Listen(conn, nil, config) Expect(err).ToNot(HaveOccurred()) done := make(chan struct{}) go func() { defer GinkgoRecover() ln.Accept() close(done) }() Eventually(func() int { return conn.dataWritten.Len() }).ShouldNot(BeZero()) Expect(conn.dataWrittenTo).To(Equal(udpAddr)) r := bytes.NewReader(conn.dataWritten.Bytes()) packet, err := wire.ParseHeaderSentByServer(r, protocol.VersionUnknown) Expect(err).ToNot(HaveOccurred()) Expect(packet.VersionFlag).To(BeTrue()) Expect(packet.ConnectionID).To(Equal(protocol.ConnectionID(0x1337))) Expect(r.Len()).To(BeZero()) Consistently(done).ShouldNot(BeClosed()) // make the go routine return ln.Close() Eventually(done).Should(BeClosed()) }) It("sends an IETF draft style Version Negotaion Packet, if the client sent a IETF draft style header", func() { config.Versions = append(config.Versions, protocol.VersionTLS) b := &bytes.Buffer{} hdr := wire.Header{ Type: protocol.PacketTypeInitial, IsLongHeader: true, ConnectionID: 0x1337, PacketNumber: 0x55, Version: 0x1234, } err := hdr.Write(b, protocol.PerspectiveClient, protocol.VersionTLS) Expect(err).ToNot(HaveOccurred()) b.Write(bytes.Repeat([]byte{0}, protocol.MinInitialPacketSize)) // add a fake CHLO conn.dataToRead <- b.Bytes() conn.dataReadFrom = udpAddr ln, err := Listen(conn, testdata.GetTLSConfig(), config) Expect(err).ToNot(HaveOccurred()) done := make(chan struct{}) go func() { defer GinkgoRecover() ln.Accept() close(done) }() Eventually(func() int { return conn.dataWritten.Len() }).ShouldNot(BeZero()) Expect(conn.dataWrittenTo).To(Equal(udpAddr)) r := bytes.NewReader(conn.dataWritten.Bytes()) packet, err := wire.ParseHeaderSentByServer(r, protocol.VersionUnknown) Expect(err).ToNot(HaveOccurred()) Expect(packet.IsVersionNegotiation).To(BeTrue()) Expect(packet.ConnectionID).To(Equal(protocol.ConnectionID(0x1337))) Expect(r.Len()).To(BeZero()) Consistently(done).ShouldNot(BeClosed()) // make the go routine return ln.Close() Eventually(done).Should(BeClosed()) }) It("ignores IETF draft style Initial packets, if it doesn't support TLS", func() { b := &bytes.Buffer{} hdr := wire.Header{ Type: protocol.PacketTypeInitial, IsLongHeader: true, ConnectionID: 0x1337, PacketNumber: 0x55, Version: protocol.VersionTLS, } err := hdr.Write(b, protocol.PerspectiveClient, protocol.VersionTLS) Expect(err).ToNot(HaveOccurred()) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)) // add a fake CHLO conn.dataToRead <- b.Bytes() conn.dataReadFrom = udpAddr ln, err := Listen(conn, testdata.GetTLSConfig(), config) Expect(err).ToNot(HaveOccurred()) defer ln.Close() Consistently(func() int { return conn.dataWritten.Len() }).Should(BeZero()) }) It("sends a PublicReset for new connections that don't have the VersionFlag set", func() { conn.dataReadFrom = udpAddr conn.dataToRead <- []byte{0x08, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6, 0x01} ln, err := Listen(conn, nil, config) Expect(err).ToNot(HaveOccurred()) go func() { defer GinkgoRecover() _, err := ln.Accept() Expect(err).ToNot(HaveOccurred()) }() Eventually(func() int { return conn.dataWritten.Len() }).ShouldNot(BeZero()) Expect(conn.dataWrittenTo).To(Equal(udpAddr)) Expect(conn.dataWritten.Bytes()[0] & 0x02).ToNot(BeZero()) // check that the ResetFlag is set Expect(ln.(*server).sessions).To(BeEmpty()) }) }) var _ = Describe("default source address verification", func() { It("accepts a token", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} cookie := &Cookie{ RemoteAddr: "192.168.0.1", SentTime: time.Now().Add(-protocol.CookieExpiryTime).Add(time.Second), // will expire in 1 second } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeTrue()) }) It("requests verification if no token is provided", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} Expect(defaultAcceptCookie(remoteAddr, nil)).To(BeFalse()) }) It("rejects a token if the address doesn't match", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} cookie := &Cookie{ RemoteAddr: "127.0.0.1", SentTime: time.Now(), } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeFalse()) }) It("accepts a token for a remote address is not a UDP address", func() { remoteAddr := &net.TCPAddr{IP: net.IPv4(192, 168, 0, 1), Port: 1337} cookie := &Cookie{ RemoteAddr: "192.168.0.1:1337", SentTime: time.Now(), } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeTrue()) }) It("rejects an invalid token for a remote address is not a UDP address", func() { remoteAddr := &net.TCPAddr{IP: net.IPv4(192, 168, 0, 1), Port: 1337} cookie := &Cookie{ RemoteAddr: "192.168.0.1:7331", // mismatching port SentTime: time.Now(), } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeFalse()) }) It("rejects an expired token", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} cookie := &Cookie{ RemoteAddr: "192.168.0.1", SentTime: time.Now().Add(-protocol.CookieExpiryTime).Add(-time.Second), // expired 1 second ago } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeFalse()) }) }) fix incorrect server test When a Read from the connection fails, we need to close all sessions, but it's not necessary to remove them from the sessions map in the server. package quic import ( "bytes" "context" "crypto/tls" "errors" "net" "reflect" "time" "github.com/lucas-clemente/quic-go/internal/crypto" "github.com/lucas-clemente/quic-go/internal/handshake" "github.com/lucas-clemente/quic-go/internal/protocol" "github.com/lucas-clemente/quic-go/internal/testdata" "github.com/lucas-clemente/quic-go/internal/utils" "github.com/lucas-clemente/quic-go/internal/wire" "github.com/lucas-clemente/quic-go/qerr" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) type mockSession struct { connectionID protocol.ConnectionID packetCount int closed bool closeReason error closedRemote bool stopRunLoop chan struct{} // run returns as soon as this channel receives a value handshakeChan chan error } func (s *mockSession) handlePacket(*receivedPacket) { s.packetCount++ } func (s *mockSession) run() error { <-s.stopRunLoop return s.closeReason } func (s *mockSession) Close(e error) error { if s.closed { return nil } s.closeReason = e s.closed = true close(s.stopRunLoop) return nil } func (s *mockSession) closeRemote(e error) { s.closeReason = e s.closed = true s.closedRemote = true close(s.stopRunLoop) } func (s *mockSession) OpenStream() (Stream, error) { return &stream{}, nil } func (s *mockSession) AcceptStream() (Stream, error) { panic("not implemented") } func (s *mockSession) AcceptUniStream() (ReceiveStream, error) { panic("not implemented") } func (s *mockSession) OpenStreamSync() (Stream, error) { panic("not implemented") } func (s *mockSession) OpenUniStream() (SendStream, error) { panic("not implemented") } func (s *mockSession) OpenUniStreamSync() (SendStream, error) { panic("not implemented") } func (s *mockSession) LocalAddr() net.Addr { panic("not implemented") } func (s *mockSession) RemoteAddr() net.Addr { panic("not implemented") } func (*mockSession) Context() context.Context { panic("not implemented") } func (*mockSession) ConnectionState() ConnectionState { panic("not implemented") } func (*mockSession) GetVersion() protocol.VersionNumber { return protocol.VersionWhatever } func (s *mockSession) handshakeStatus() <-chan error { return s.handshakeChan } func (*mockSession) getCryptoStream() cryptoStreamI { panic("not implemented") } var _ Session = &mockSession{} func newMockSession( _ connection, _ protocol.VersionNumber, connectionID protocol.ConnectionID, _ *handshake.ServerConfig, _ *tls.Config, _ *Config, ) (packetHandler, error) { s := mockSession{ connectionID: connectionID, handshakeChan: make(chan error), stopRunLoop: make(chan struct{}), } return &s, nil } var _ = Describe("Server", func() { var ( conn *mockPacketConn config *Config udpAddr = &net.UDPAddr{IP: net.IPv4(192, 168, 100, 200), Port: 1337} ) BeforeEach(func() { conn = newMockPacketConn() conn.addr = &net.UDPAddr{} config = &Config{Versions: protocol.SupportedVersions} }) Context("with mock session", func() { var ( serv *server firstPacket []byte // a valid first packet for a new connection with connectionID 0x4cfa9f9b668619f6 (= connID) connID = protocol.ConnectionID(0x4cfa9f9b668619f6) ) BeforeEach(func() { serv = &server{ sessions: make(map[protocol.ConnectionID]packetHandler), newSession: newMockSession, conn: conn, config: config, sessionQueue: make(chan Session, 5), errorChan: make(chan struct{}), } b := &bytes.Buffer{} utils.BigEndian.WriteUint32(b, uint32(protocol.SupportedVersions[0])) firstPacket = []byte{0x09, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6} firstPacket = append(append(firstPacket, b.Bytes()...), 0x01) firstPacket = append(firstPacket, bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)...) // add padding }) It("setups with the right values", func() { config := &Config{ HandshakeTimeout: 1337 * time.Minute, IdleTimeout: 42 * time.Hour, RequestConnectionIDOmission: true, MaxIncomingStreams: 1234, MaxIncomingUniStreams: 4321, } c := populateServerConfig(config) Expect(c.HandshakeTimeout).To(Equal(1337 * time.Minute)) Expect(c.IdleTimeout).To(Equal(42 * time.Hour)) Expect(c.RequestConnectionIDOmission).To(BeFalse()) Expect(c.MaxIncomingStreams).To(Equal(1234)) Expect(c.MaxIncomingUniStreams).To(Equal(4321)) }) It("disables bidirectional streams", func() { config := &Config{ MaxIncomingStreams: -1, MaxIncomingUniStreams: 4321, } c := populateServerConfig(config) Expect(c.MaxIncomingStreams).To(BeZero()) Expect(c.MaxIncomingUniStreams).To(Equal(4321)) }) It("disables unidirectional streams", func() { config := &Config{ MaxIncomingStreams: 1234, MaxIncomingUniStreams: -1, } c := populateServerConfig(config) Expect(c.MaxIncomingStreams).To(Equal(1234)) Expect(c.MaxIncomingUniStreams).To(BeZero()) }) It("returns the address", func() { conn.addr = &net.UDPAddr{ IP: net.IPv4(192, 168, 13, 37), Port: 1234, } Expect(serv.Addr().String()).To(Equal("192.168.13.37:1234")) }) It("creates new sessions", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) sess := serv.sessions[connID].(*mockSession) Expect(sess.connectionID).To(Equal(connID)) Expect(sess.packetCount).To(Equal(1)) }) It("accepts new TLS sessions", func() { connID := protocol.ConnectionID(0x12345) sess, err := newMockSession(nil, protocol.VersionTLS, connID, nil, nil, nil) Expect(err).ToNot(HaveOccurred()) err = serv.setupTLS() Expect(err).ToNot(HaveOccurred()) serv.serverTLS.sessionChan <- tlsSession{ connID: connID, sess: sess, } Eventually(func() packetHandler { serv.sessionsMutex.Lock() defer serv.sessionsMutex.Unlock() return serv.sessions[connID] }).Should(Equal(sess)) }) It("only accepts one new TLS sessions for one connection ID", func() { connID := protocol.ConnectionID(0x12345) sess1, err := newMockSession(nil, protocol.VersionTLS, connID, nil, nil, nil) Expect(err).ToNot(HaveOccurred()) sess2, err := newMockSession(nil, protocol.VersionTLS, connID, nil, nil, nil) Expect(err).ToNot(HaveOccurred()) err = serv.setupTLS() Expect(err).ToNot(HaveOccurred()) serv.serverTLS.sessionChan <- tlsSession{ connID: connID, sess: sess1, } Eventually(func() packetHandler { serv.sessionsMutex.Lock() defer serv.sessionsMutex.Unlock() return serv.sessions[connID] }).Should(Equal(sess1)) serv.serverTLS.sessionChan <- tlsSession{ connID: connID, sess: sess2, } Eventually(func() packetHandler { serv.sessionsMutex.Lock() defer serv.sessionsMutex.Unlock() return serv.sessions[connID] }).Should(Equal(sess1)) }) It("accepts a session once the connection it is forward secure", func(done Done) { var acceptedSess Session go func() { defer GinkgoRecover() var err error acceptedSess, err = serv.Accept() Expect(err).ToNot(HaveOccurred()) }() err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) sess := serv.sessions[connID].(*mockSession) Consistently(func() Session { return acceptedSess }).Should(BeNil()) close(sess.handshakeChan) Eventually(func() Session { return acceptedSess }).Should(Equal(sess)) close(done) }, 0.5) It("doesn't accept session that error during the handshake", func(done Done) { var accepted bool go func() { defer GinkgoRecover() serv.Accept() accepted = true }() err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) sess := serv.sessions[connID].(*mockSession) sess.handshakeChan <- errors.New("handshake failed") Consistently(func() bool { return accepted }).Should(BeFalse()) close(done) }) It("assigns packets to existing sessions", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) err = serv.handlePacket(nil, nil, []byte{0x08, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6, 0x01}) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).connectionID).To(Equal(connID)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(2)) }) It("closes and deletes sessions", func() { serv.deleteClosedSessionsAfter = time.Second // make sure that the nil value for the closed session doesn't get deleted in this test nullAEAD, err := crypto.NewNullAEAD(protocol.PerspectiveServer, connID, protocol.VersionWhatever) Expect(err).ToNot(HaveOccurred()) err = serv.handlePacket(nil, nil, append(firstPacket, nullAEAD.Seal(nil, nil, 0, firstPacket)...)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID]).ToNot(BeNil()) // make session.run() return serv.sessions[connID].(*mockSession).stopRunLoop <- struct{}{} // The server should now have closed the session, leaving a nil value in the sessions map Consistently(func() map[protocol.ConnectionID]packetHandler { return serv.sessions }).Should(HaveLen(1)) Expect(serv.sessions[connID]).To(BeNil()) }) It("deletes nil session entries after a wait time", func() { serv.deleteClosedSessionsAfter = 25 * time.Millisecond nullAEAD, err := crypto.NewNullAEAD(protocol.PerspectiveServer, connID, protocol.VersionWhatever) Expect(err).ToNot(HaveOccurred()) err = serv.handlePacket(nil, nil, append(firstPacket, nullAEAD.Seal(nil, nil, 0, firstPacket)...)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions).To(HaveKey(connID)) // make session.run() return serv.sessions[connID].(*mockSession).stopRunLoop <- struct{}{} Eventually(func() bool { serv.sessionsMutex.Lock() _, ok := serv.sessions[connID] serv.sessionsMutex.Unlock() return ok }).Should(BeFalse()) }) It("closes sessions and the connection when Close is called", func() { go serv.serve() session, _ := newMockSession(nil, 0, 0, nil, nil, nil) serv.sessions[1] = session err := serv.Close() Expect(err).NotTo(HaveOccurred()) Expect(session.(*mockSession).closed).To(BeTrue()) Expect(conn.closed).To(BeTrue()) }) It("ignores packets for closed sessions", func() { serv.sessions[connID] = nil err := serv.handlePacket(nil, nil, []byte{0x08, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6, 0x01}) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID]).To(BeNil()) }) It("works if no quic.Config is given", func(done Done) { ln, err := ListenAddr("127.0.0.1:0", testdata.GetTLSConfig(), nil) Expect(err).ToNot(HaveOccurred()) Expect(ln.Close()).To(Succeed()) close(done) }, 1) It("closes properly", func() { ln, err := ListenAddr("127.0.0.1:0", testdata.GetTLSConfig(), config) Expect(err).ToNot(HaveOccurred()) var returned bool go func() { defer GinkgoRecover() _, err := ln.Accept() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("use of closed network connection")) returned = true }() ln.Close() Eventually(func() bool { return returned }).Should(BeTrue()) }) It("errors when encountering a connection error", func(done Done) { testErr := errors.New("connection error") conn.readErr = testErr go serv.serve() _, err := serv.Accept() Expect(err).To(MatchError(testErr)) Expect(serv.Close()).To(Succeed()) close(done) }, 0.5) It("closes all sessions when encountering a connection error", func() { session, _ := newMockSession(nil, 0, 0, nil, nil, nil) serv.sessions[0x12345] = session Expect(serv.sessions[0x12345].(*mockSession).closed).To(BeFalse()) testErr := errors.New("connection error") conn.readErr = testErr go serv.serve() Eventually(func() bool { return session.(*mockSession).closed }).Should(BeTrue()) Expect(serv.Close()).To(Succeed()) }) It("ignores delayed packets with mismatching versions", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) b := &bytes.Buffer{} // add an unsupported version data := []byte{0x09, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6} utils.BigEndian.WriteUint32(b, uint32(protocol.SupportedVersions[0]+1)) data = append(append(data, b.Bytes()...), 0x01) err = serv.handlePacket(nil, nil, data) Expect(err).ToNot(HaveOccurred()) // if we didn't ignore the packet, the server would try to send a version negotiation packet, which would make the test panic because it doesn't have a udpConn Expect(conn.dataWritten.Bytes()).To(BeEmpty()) // make sure the packet was *not* passed to session.handlePacket() Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) }) It("errors on invalid public header", func() { err := serv.handlePacket(nil, nil, nil) Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.InvalidPacketHeader)) }) It("ignores public resets for unknown connections", func() { err := serv.handlePacket(nil, nil, wire.WritePublicReset(999, 1, 1337)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(BeEmpty()) }) It("ignores public resets for known connections", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) err = serv.handlePacket(nil, nil, wire.WritePublicReset(connID, 1, 1337)) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) }) It("ignores invalid public resets for known connections", func() { err := serv.handlePacket(nil, nil, firstPacket) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) data := wire.WritePublicReset(connID, 1, 1337) err = serv.handlePacket(nil, nil, data[:len(data)-2]) Expect(err).ToNot(HaveOccurred()) Expect(serv.sessions).To(HaveLen(1)) Expect(serv.sessions[connID].(*mockSession).packetCount).To(Equal(1)) }) It("doesn't try to process a packet after sending a gQUIC Version Negotiation Packet", func() { config.Versions = []protocol.VersionNumber{99} b := &bytes.Buffer{} hdr := wire.Header{ VersionFlag: true, ConnectionID: 0x1337, PacketNumber: 1, PacketNumberLen: protocol.PacketNumberLen2, } hdr.Write(b, protocol.PerspectiveClient, 13 /* not a valid QUIC version */) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)) // add a fake CHLO err := serv.handlePacket(conn, nil, b.Bytes()) Expect(conn.dataWritten.Bytes()).ToNot(BeEmpty()) Expect(err).ToNot(HaveOccurred()) }) It("doesn't respond with a version negotiation packet if the first packet is too small", func() { b := &bytes.Buffer{} hdr := wire.Header{ VersionFlag: true, ConnectionID: 0x1337, PacketNumber: 1, PacketNumberLen: protocol.PacketNumberLen2, } hdr.Write(b, protocol.PerspectiveClient, 13 /* not a valid QUIC version */) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize-1)) // this packet is 1 byte too small err := serv.handlePacket(conn, udpAddr, b.Bytes()) Expect(err).To(MatchError("dropping small packet with unknown version")) Expect(conn.dataWritten.Len()).Should(BeZero()) }) }) It("setups with the right values", func() { supportedVersions := []protocol.VersionNumber{protocol.VersionTLS, protocol.Version39} acceptCookie := func(_ net.Addr, _ *Cookie) bool { return true } config := Config{ Versions: supportedVersions, AcceptCookie: acceptCookie, HandshakeTimeout: 1337 * time.Hour, IdleTimeout: 42 * time.Minute, KeepAlive: true, } ln, err := Listen(conn, &tls.Config{}, &config) Expect(err).ToNot(HaveOccurred()) server := ln.(*server) Expect(server.deleteClosedSessionsAfter).To(Equal(protocol.ClosedSessionDeleteTimeout)) Expect(server.sessions).ToNot(BeNil()) Expect(server.scfg).ToNot(BeNil()) Expect(server.config.Versions).To(Equal(supportedVersions)) Expect(server.config.HandshakeTimeout).To(Equal(1337 * time.Hour)) Expect(server.config.IdleTimeout).To(Equal(42 * time.Minute)) Expect(reflect.ValueOf(server.config.AcceptCookie)).To(Equal(reflect.ValueOf(acceptCookie))) Expect(server.config.KeepAlive).To(BeTrue()) }) It("errors when the Config contains an invalid version", func() { version := protocol.VersionNumber(0x1234) _, err := Listen(conn, &tls.Config{}, &Config{Versions: []protocol.VersionNumber{version}}) Expect(err).To(MatchError("0x1234 is not a valid QUIC version")) }) It("fills in default values if options are not set in the Config", func() { ln, err := Listen(conn, &tls.Config{}, &Config{}) Expect(err).ToNot(HaveOccurred()) server := ln.(*server) Expect(server.config.Versions).To(Equal(protocol.SupportedVersions)) Expect(server.config.HandshakeTimeout).To(Equal(protocol.DefaultHandshakeTimeout)) Expect(server.config.IdleTimeout).To(Equal(protocol.DefaultIdleTimeout)) Expect(reflect.ValueOf(server.config.AcceptCookie)).To(Equal(reflect.ValueOf(defaultAcceptCookie))) Expect(server.config.KeepAlive).To(BeFalse()) }) It("listens on a given address", func() { addr := "127.0.0.1:13579" ln, err := ListenAddr(addr, nil, config) Expect(err).ToNot(HaveOccurred()) serv := ln.(*server) Expect(serv.Addr().String()).To(Equal(addr)) }) It("errors if given an invalid address", func() { addr := "127.0.0.1" _, err := ListenAddr(addr, nil, config) Expect(err).To(BeAssignableToTypeOf(&net.AddrError{})) }) It("errors if given an invalid address", func() { addr := "1.1.1.1:1111" _, err := ListenAddr(addr, nil, config) Expect(err).To(BeAssignableToTypeOf(&net.OpError{})) }) It("sends a gQUIC Version Negotaion Packet, if the client sent a gQUIC Public Header", func() { b := &bytes.Buffer{} hdr := wire.Header{ VersionFlag: true, ConnectionID: 0x1337, PacketNumber: 1, PacketNumberLen: protocol.PacketNumberLen2, } hdr.Write(b, protocol.PerspectiveClient, 13 /* not a valid QUIC version */) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)) // add a fake CHLO conn.dataToRead <- b.Bytes() conn.dataReadFrom = udpAddr ln, err := Listen(conn, nil, config) Expect(err).ToNot(HaveOccurred()) done := make(chan struct{}) go func() { defer GinkgoRecover() ln.Accept() close(done) }() Eventually(func() int { return conn.dataWritten.Len() }).ShouldNot(BeZero()) Expect(conn.dataWrittenTo).To(Equal(udpAddr)) r := bytes.NewReader(conn.dataWritten.Bytes()) packet, err := wire.ParseHeaderSentByServer(r, protocol.VersionUnknown) Expect(err).ToNot(HaveOccurred()) Expect(packet.VersionFlag).To(BeTrue()) Expect(packet.ConnectionID).To(Equal(protocol.ConnectionID(0x1337))) Expect(r.Len()).To(BeZero()) Consistently(done).ShouldNot(BeClosed()) // make the go routine return ln.Close() Eventually(done).Should(BeClosed()) }) It("sends an IETF draft style Version Negotaion Packet, if the client sent a IETF draft style header", func() { config.Versions = append(config.Versions, protocol.VersionTLS) b := &bytes.Buffer{} hdr := wire.Header{ Type: protocol.PacketTypeInitial, IsLongHeader: true, ConnectionID: 0x1337, PacketNumber: 0x55, Version: 0x1234, } err := hdr.Write(b, protocol.PerspectiveClient, protocol.VersionTLS) Expect(err).ToNot(HaveOccurred()) b.Write(bytes.Repeat([]byte{0}, protocol.MinInitialPacketSize)) // add a fake CHLO conn.dataToRead <- b.Bytes() conn.dataReadFrom = udpAddr ln, err := Listen(conn, testdata.GetTLSConfig(), config) Expect(err).ToNot(HaveOccurred()) done := make(chan struct{}) go func() { defer GinkgoRecover() ln.Accept() close(done) }() Eventually(func() int { return conn.dataWritten.Len() }).ShouldNot(BeZero()) Expect(conn.dataWrittenTo).To(Equal(udpAddr)) r := bytes.NewReader(conn.dataWritten.Bytes()) packet, err := wire.ParseHeaderSentByServer(r, protocol.VersionUnknown) Expect(err).ToNot(HaveOccurred()) Expect(packet.IsVersionNegotiation).To(BeTrue()) Expect(packet.ConnectionID).To(Equal(protocol.ConnectionID(0x1337))) Expect(r.Len()).To(BeZero()) Consistently(done).ShouldNot(BeClosed()) // make the go routine return ln.Close() Eventually(done).Should(BeClosed()) }) It("ignores IETF draft style Initial packets, if it doesn't support TLS", func() { b := &bytes.Buffer{} hdr := wire.Header{ Type: protocol.PacketTypeInitial, IsLongHeader: true, ConnectionID: 0x1337, PacketNumber: 0x55, Version: protocol.VersionTLS, } err := hdr.Write(b, protocol.PerspectiveClient, protocol.VersionTLS) Expect(err).ToNot(HaveOccurred()) b.Write(bytes.Repeat([]byte{0}, protocol.MinClientHelloSize)) // add a fake CHLO conn.dataToRead <- b.Bytes() conn.dataReadFrom = udpAddr ln, err := Listen(conn, testdata.GetTLSConfig(), config) Expect(err).ToNot(HaveOccurred()) defer ln.Close() Consistently(func() int { return conn.dataWritten.Len() }).Should(BeZero()) }) It("sends a PublicReset for new connections that don't have the VersionFlag set", func() { conn.dataReadFrom = udpAddr conn.dataToRead <- []byte{0x08, 0x4c, 0xfa, 0x9f, 0x9b, 0x66, 0x86, 0x19, 0xf6, 0x01} ln, err := Listen(conn, nil, config) Expect(err).ToNot(HaveOccurred()) go func() { defer GinkgoRecover() _, err := ln.Accept() Expect(err).ToNot(HaveOccurred()) }() Eventually(func() int { return conn.dataWritten.Len() }).ShouldNot(BeZero()) Expect(conn.dataWrittenTo).To(Equal(udpAddr)) Expect(conn.dataWritten.Bytes()[0] & 0x02).ToNot(BeZero()) // check that the ResetFlag is set Expect(ln.(*server).sessions).To(BeEmpty()) }) }) var _ = Describe("default source address verification", func() { It("accepts a token", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} cookie := &Cookie{ RemoteAddr: "192.168.0.1", SentTime: time.Now().Add(-protocol.CookieExpiryTime).Add(time.Second), // will expire in 1 second } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeTrue()) }) It("requests verification if no token is provided", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} Expect(defaultAcceptCookie(remoteAddr, nil)).To(BeFalse()) }) It("rejects a token if the address doesn't match", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} cookie := &Cookie{ RemoteAddr: "127.0.0.1", SentTime: time.Now(), } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeFalse()) }) It("accepts a token for a remote address is not a UDP address", func() { remoteAddr := &net.TCPAddr{IP: net.IPv4(192, 168, 0, 1), Port: 1337} cookie := &Cookie{ RemoteAddr: "192.168.0.1:1337", SentTime: time.Now(), } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeTrue()) }) It("rejects an invalid token for a remote address is not a UDP address", func() { remoteAddr := &net.TCPAddr{IP: net.IPv4(192, 168, 0, 1), Port: 1337} cookie := &Cookie{ RemoteAddr: "192.168.0.1:7331", // mismatching port SentTime: time.Now(), } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeFalse()) }) It("rejects an expired token", func() { remoteAddr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} cookie := &Cookie{ RemoteAddr: "192.168.0.1", SentTime: time.Now().Add(-protocol.CookieExpiryTime).Add(-time.Second), // expired 1 second ago } Expect(defaultAcceptCookie(remoteAddr, cookie)).To(BeFalse()) }) })
// Copyright 2012 SocialCode. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package gelf import ( "bytes" "compress/flate" "compress/gzip" "compress/zlib" "crypto/rand" "encoding/json" "fmt" "io" "net" "os" "path" "runtime" "strings" "sync" "time" ) // Writer implements io.Writer and is used to send both discrete // messages to a graylog2 server, or data from a stream-oriented // interface (like the functions in log). type Writer struct { mu sync.Mutex conn net.Conn hostname string Facility string // defaults to current process name CompressionLevel int // one of the consts from compress/flate CompressionType CompressType } // What compression type the writer should use when sending messages // to the graylog2 server type CompressType int const ( CompressGzip CompressType = iota CompressZlib CompressNone ) // Message represents the contents of the GELF message. It is gzipped // before sending. type Message struct { Version string `json:"version"` Host string `json:"host"` Short string `json:"short_message"` Full string `json:"full_message,omitempty"` TimeUnix float64 `json:"timestamp"` Level int32 `json:"level,omitempty"` Facility string `json:"facility,omitempty"` Extra map[string]interface{} `json:"-"` RawExtra json.RawMessage `json:"-"` } // Used to control GELF chunking. Should be less than (MTU - len(UDP // header)). // // TODO: generate dynamically using Path MTU Discovery? const ( ChunkSize = 1420 chunkedHeaderLen = 12 chunkedDataLen = ChunkSize - chunkedHeaderLen ) var ( magicChunked = []byte{0x1e, 0x0f} magicZlib = []byte{0x78} magicGzip = []byte{0x1f, 0x8b} ) // Syslog severity levels const ( LOG_EMERG = int32(0) LOG_ALERT = int32(1) LOG_CRIT = int32(2) LOG_ERR = int32(3) LOG_WARNING = int32(4) LOG_NOTICE = int32(5) LOG_INFO = int32(6) LOG_DEBUG = int32(7) ) // numChunks returns the number of GELF chunks necessary to transmit // the given compressed buffer. func numChunks(b []byte) int { lenB := len(b) if lenB <= ChunkSize { return 1 } return len(b)/chunkedDataLen + 1 } // New returns a new GELF Writer. This writer can be used to send the // output of the standard Go log functions to a central GELF server by // passing it to log.SetOutput() func NewWriter(addr string) (*Writer, error) { var err error w := new(Writer) w.CompressionLevel = flate.BestSpeed if w.conn, err = net.Dial("udp", addr); err != nil { return nil, err } if w.hostname, err = os.Hostname(); err != nil { return nil, err } w.Facility = path.Base(os.Args[0]) return w, nil } // writes the gzip compressed byte array to the connection as a series // of GELF chunked messages. The header format is documented at // https://github.com/Graylog2/graylog2-docs/wiki/GELF as: // // 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte // total, chunk-data func (w *Writer) writeChunked(zBytes []byte) (err error) { b := make([]byte, 0, ChunkSize) buf := bytes.NewBuffer(b) nChunksI := numChunks(zBytes) if nChunksI > 128 { return fmt.Errorf("msg too large, would need %d chunks", nChunksI) } nChunks := uint8(nChunksI) // use urandom to get a unique message id msgId := make([]byte, 8) n, err := io.ReadFull(rand.Reader, msgId) if err != nil || n != 8 { return fmt.Errorf("rand.Reader: %d/%s", n, err) } bytesLeft := len(zBytes) for i := uint8(0); i < nChunks; i++ { buf.Reset() // manually write header. Don't care about // host/network byte order, because the spec only // deals in individual bytes. buf.Write(magicChunked) //magic buf.Write(msgId) buf.WriteByte(i) buf.WriteByte(nChunks) // slice out our chunk from zBytes chunkLen := chunkedDataLen if chunkLen > bytesLeft { chunkLen = bytesLeft } off := int(i) * chunkedDataLen chunk := zBytes[off : off+chunkLen] buf.Write(chunk) // write this chunk, and make sure the write was good n, err := w.conn.Write(buf.Bytes()) if err != nil { return fmt.Errorf("Write (chunk %d/%d): %s", i, nChunks, err) } if n != len(buf.Bytes()) { return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", i, nChunks, n, len(buf.Bytes())) } bytesLeft -= chunkLen } if bytesLeft != 0 { return fmt.Errorf("error: %d bytes left after sending", bytesLeft) } return nil } // 1k bytes buffer by default var bufPool = sync.Pool{ New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 1024)) }, } func newBuffer() *bytes.Buffer { b := bufPool.Get().(*bytes.Buffer) if b != nil { b.Reset() return b } return bytes.NewBuffer(nil) } // WriteMessage sends the specified message to the GELF server // specified in the call to New(). It assumes all the fields are // filled out appropriately. In general, clients will want to use // Write, rather than WriteMessage. func (w *Writer) WriteMessage(m *Message) (err error) { mBuf := newBuffer() defer bufPool.Put(mBuf) if err = m.MarshalJSONBuf(mBuf); err != nil { return err } mBytes := mBuf.Bytes() var ( zBuf *bytes.Buffer zBytes []byte ) var zw io.WriteCloser switch w.CompressionType { case CompressGzip: zBuf = newBuffer() defer bufPool.Put(zBuf) zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel) case CompressZlib: zBuf = newBuffer() defer bufPool.Put(zBuf) zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel) case CompressNone: zBytes = mBytes default: panic(fmt.Sprintf("unknown compression type %d", w.CompressionType)) } if zw != nil { if err != nil { return } if _, err = zw.Write(mBytes); err != nil { zw.Close() return } zw.Close() zBytes = zBuf.Bytes() } if numChunks(zBytes) > 1 { return w.writeChunked(zBytes) } n, err := w.conn.Write(zBytes) if err != nil { return } if n != len(zBytes) { return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) } return nil } // Close connection and interrupt blocked Read or Write operations func (w *Writer) Close() error { return w.conn.Close() } /* func (w *Writer) Alert(m string) (err error) func (w *Writer) Close() error func (w *Writer) Crit(m string) (err error) func (w *Writer) Debug(m string) (err error) func (w *Writer) Emerg(m string) (err error) func (w *Writer) Err(m string) (err error) func (w *Writer) Info(m string) (err error) func (w *Writer) Notice(m string) (err error) func (w *Writer) Warning(m string) (err error) */ // getCaller returns the filename and the line info of a function // further down in the call stack. Passing 0 in as callDepth would // return info on the function calling getCallerIgnoringLog, 1 the // parent function, and so on. Any suffixes passed to getCaller are // path fragments like "/pkg/log/log.go", and functions in the call // stack from that file are ignored. func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { // bump by 1 to ignore the getCaller (this) stackframe callDepth++ outer: for { var ok bool _, file, line, ok = runtime.Caller(callDepth) if !ok { file = "???" line = 0 break } for _, s := range suffixesToIgnore { if strings.HasSuffix(file, s) { callDepth++ continue outer } } break } return } func getCallerIgnoringLogMulti(callDepth int) (string, int) { // the +1 is to ignore this (getCallerIgnoringLogMulti) frame return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go") } // Write encodes the given string in a GELF message and sends it to // the server specified in New(). func (w *Writer) Write(p []byte) (n int, err error) { // 1 for the function that called us. file, line := getCallerIgnoringLogMulti(1) // remove trailing and leading whitespace p = bytes.TrimSpace(p) // If there are newlines in the message, use the first line // for the short message and set the full message to the // original input. If the input has no newlines, stick the // whole thing in Short. short := p full := []byte("") if i := bytes.IndexRune(p, '\n'); i > 0 { short = p[:i] full = p } m := Message{ Version: "1.1", Host: w.hostname, Short: string(short), Full: string(full), TimeUnix: float64(time.Now().Unix()), Level: 6, // info Facility: w.Facility, Extra: map[string]interface{}{ "_file": file, "_line": line, }, } if err = w.WriteMessage(&m); err != nil { return 0, err } return len(p), nil } func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error { b, err := json.Marshal(m) if err != nil { return err } // write up until the final } if _, err = buf.Write(b[:len(b)-1]); err != nil { return err } if len(m.Extra) > 0 { eb, err := json.Marshal(m.Extra) if err != nil { return err } // merge serialized message + serialized extra map if err = buf.WriteByte(','); err != nil { return err } // write serialized extra bytes, without enclosing quotes if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil { return err } } if len(m.RawExtra) > 0 { if err := buf.WriteByte(','); err != nil { return err } // write serialized extra bytes, without enclosing quotes if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil { return err } } // write final closing quotes return buf.WriteByte('}') } func (m *Message) UnmarshalJSON(data []byte) error { i := make(map[string]interface{}, 16) if err := json.Unmarshal(data, &i); err != nil { return err } for k, v := range i { if k[0] == '_' { if m.Extra == nil { m.Extra = make(map[string]interface{}, 1) } m.Extra[k] = v continue } switch k { case "version": m.Version = v.(string) case "host": m.Host = v.(string) case "short_message": m.Short = v.(string) case "full_message": m.Full = v.(string) case "timestamp": m.TimeUnix = v.(float64) case "level": m.Level = int32(v.(float64)) case "facility": m.Facility = v.(string) } } return nil } Fix dead URL for GELF spec in writer.go (#8) // Copyright 2012 SocialCode. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package gelf import ( "bytes" "compress/flate" "compress/gzip" "compress/zlib" "crypto/rand" "encoding/json" "fmt" "io" "net" "os" "path" "runtime" "strings" "sync" "time" ) // Writer implements io.Writer and is used to send both discrete // messages to a graylog2 server, or data from a stream-oriented // interface (like the functions in log). type Writer struct { mu sync.Mutex conn net.Conn hostname string Facility string // defaults to current process name CompressionLevel int // one of the consts from compress/flate CompressionType CompressType } // What compression type the writer should use when sending messages // to the graylog2 server type CompressType int const ( CompressGzip CompressType = iota CompressZlib CompressNone ) // Message represents the contents of the GELF message. It is gzipped // before sending. type Message struct { Version string `json:"version"` Host string `json:"host"` Short string `json:"short_message"` Full string `json:"full_message,omitempty"` TimeUnix float64 `json:"timestamp"` Level int32 `json:"level,omitempty"` Facility string `json:"facility,omitempty"` Extra map[string]interface{} `json:"-"` RawExtra json.RawMessage `json:"-"` } // Used to control GELF chunking. Should be less than (MTU - len(UDP // header)). // // TODO: generate dynamically using Path MTU Discovery? const ( ChunkSize = 1420 chunkedHeaderLen = 12 chunkedDataLen = ChunkSize - chunkedHeaderLen ) var ( magicChunked = []byte{0x1e, 0x0f} magicZlib = []byte{0x78} magicGzip = []byte{0x1f, 0x8b} ) // Syslog severity levels const ( LOG_EMERG = int32(0) LOG_ALERT = int32(1) LOG_CRIT = int32(2) LOG_ERR = int32(3) LOG_WARNING = int32(4) LOG_NOTICE = int32(5) LOG_INFO = int32(6) LOG_DEBUG = int32(7) ) // numChunks returns the number of GELF chunks necessary to transmit // the given compressed buffer. func numChunks(b []byte) int { lenB := len(b) if lenB <= ChunkSize { return 1 } return len(b)/chunkedDataLen + 1 } // New returns a new GELF Writer. This writer can be used to send the // output of the standard Go log functions to a central GELF server by // passing it to log.SetOutput() func NewWriter(addr string) (*Writer, error) { var err error w := new(Writer) w.CompressionLevel = flate.BestSpeed if w.conn, err = net.Dial("udp", addr); err != nil { return nil, err } if w.hostname, err = os.Hostname(); err != nil { return nil, err } w.Facility = path.Base(os.Args[0]) return w, nil } // writes the gzip compressed byte array to the connection as a series // of GELF chunked messages. The format is documented at // http://docs.graylog.org/en/2.1/pages/gelf.html as: // // 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte // total, chunk-data func (w *Writer) writeChunked(zBytes []byte) (err error) { b := make([]byte, 0, ChunkSize) buf := bytes.NewBuffer(b) nChunksI := numChunks(zBytes) if nChunksI > 128 { return fmt.Errorf("msg too large, would need %d chunks", nChunksI) } nChunks := uint8(nChunksI) // use urandom to get a unique message id msgId := make([]byte, 8) n, err := io.ReadFull(rand.Reader, msgId) if err != nil || n != 8 { return fmt.Errorf("rand.Reader: %d/%s", n, err) } bytesLeft := len(zBytes) for i := uint8(0); i < nChunks; i++ { buf.Reset() // manually write header. Don't care about // host/network byte order, because the spec only // deals in individual bytes. buf.Write(magicChunked) //magic buf.Write(msgId) buf.WriteByte(i) buf.WriteByte(nChunks) // slice out our chunk from zBytes chunkLen := chunkedDataLen if chunkLen > bytesLeft { chunkLen = bytesLeft } off := int(i) * chunkedDataLen chunk := zBytes[off : off+chunkLen] buf.Write(chunk) // write this chunk, and make sure the write was good n, err := w.conn.Write(buf.Bytes()) if err != nil { return fmt.Errorf("Write (chunk %d/%d): %s", i, nChunks, err) } if n != len(buf.Bytes()) { return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", i, nChunks, n, len(buf.Bytes())) } bytesLeft -= chunkLen } if bytesLeft != 0 { return fmt.Errorf("error: %d bytes left after sending", bytesLeft) } return nil } // 1k bytes buffer by default var bufPool = sync.Pool{ New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 1024)) }, } func newBuffer() *bytes.Buffer { b := bufPool.Get().(*bytes.Buffer) if b != nil { b.Reset() return b } return bytes.NewBuffer(nil) } // WriteMessage sends the specified message to the GELF server // specified in the call to New(). It assumes all the fields are // filled out appropriately. In general, clients will want to use // Write, rather than WriteMessage. func (w *Writer) WriteMessage(m *Message) (err error) { mBuf := newBuffer() defer bufPool.Put(mBuf) if err = m.MarshalJSONBuf(mBuf); err != nil { return err } mBytes := mBuf.Bytes() var ( zBuf *bytes.Buffer zBytes []byte ) var zw io.WriteCloser switch w.CompressionType { case CompressGzip: zBuf = newBuffer() defer bufPool.Put(zBuf) zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel) case CompressZlib: zBuf = newBuffer() defer bufPool.Put(zBuf) zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel) case CompressNone: zBytes = mBytes default: panic(fmt.Sprintf("unknown compression type %d", w.CompressionType)) } if zw != nil { if err != nil { return } if _, err = zw.Write(mBytes); err != nil { zw.Close() return } zw.Close() zBytes = zBuf.Bytes() } if numChunks(zBytes) > 1 { return w.writeChunked(zBytes) } n, err := w.conn.Write(zBytes) if err != nil { return } if n != len(zBytes) { return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) } return nil } // Close connection and interrupt blocked Read or Write operations func (w *Writer) Close() error { return w.conn.Close() } /* func (w *Writer) Alert(m string) (err error) func (w *Writer) Close() error func (w *Writer) Crit(m string) (err error) func (w *Writer) Debug(m string) (err error) func (w *Writer) Emerg(m string) (err error) func (w *Writer) Err(m string) (err error) func (w *Writer) Info(m string) (err error) func (w *Writer) Notice(m string) (err error) func (w *Writer) Warning(m string) (err error) */ // getCaller returns the filename and the line info of a function // further down in the call stack. Passing 0 in as callDepth would // return info on the function calling getCallerIgnoringLog, 1 the // parent function, and so on. Any suffixes passed to getCaller are // path fragments like "/pkg/log/log.go", and functions in the call // stack from that file are ignored. func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { // bump by 1 to ignore the getCaller (this) stackframe callDepth++ outer: for { var ok bool _, file, line, ok = runtime.Caller(callDepth) if !ok { file = "???" line = 0 break } for _, s := range suffixesToIgnore { if strings.HasSuffix(file, s) { callDepth++ continue outer } } break } return } func getCallerIgnoringLogMulti(callDepth int) (string, int) { // the +1 is to ignore this (getCallerIgnoringLogMulti) frame return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go") } // Write encodes the given string in a GELF message and sends it to // the server specified in New(). func (w *Writer) Write(p []byte) (n int, err error) { // 1 for the function that called us. file, line := getCallerIgnoringLogMulti(1) // remove trailing and leading whitespace p = bytes.TrimSpace(p) // If there are newlines in the message, use the first line // for the short message and set the full message to the // original input. If the input has no newlines, stick the // whole thing in Short. short := p full := []byte("") if i := bytes.IndexRune(p, '\n'); i > 0 { short = p[:i] full = p } m := Message{ Version: "1.1", Host: w.hostname, Short: string(short), Full: string(full), TimeUnix: float64(time.Now().Unix()), Level: 6, // info Facility: w.Facility, Extra: map[string]interface{}{ "_file": file, "_line": line, }, } if err = w.WriteMessage(&m); err != nil { return 0, err } return len(p), nil } func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error { b, err := json.Marshal(m) if err != nil { return err } // write up until the final } if _, err = buf.Write(b[:len(b)-1]); err != nil { return err } if len(m.Extra) > 0 { eb, err := json.Marshal(m.Extra) if err != nil { return err } // merge serialized message + serialized extra map if err = buf.WriteByte(','); err != nil { return err } // write serialized extra bytes, without enclosing quotes if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil { return err } } if len(m.RawExtra) > 0 { if err := buf.WriteByte(','); err != nil { return err } // write serialized extra bytes, without enclosing quotes if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil { return err } } // write final closing quotes return buf.WriteByte('}') } func (m *Message) UnmarshalJSON(data []byte) error { i := make(map[string]interface{}, 16) if err := json.Unmarshal(data, &i); err != nil { return err } for k, v := range i { if k[0] == '_' { if m.Extra == nil { m.Extra = make(map[string]interface{}, 1) } m.Extra[k] = v continue } switch k { case "version": m.Version = v.(string) case "host": m.Host = v.(string) case "short_message": m.Short = v.(string) case "full_message": m.Full = v.(string) case "timestamp": m.TimeUnix = v.(float64) case "level": m.Level = int32(v.(float64)) case "facility": m.Facility = v.(string) } } return nil }
package main import ( "crypto" "crypto/rand" "crypto/rsa" "encoding/json" "github.com/realglobe-Inc/edo/util" "github.com/realglobe-Inc/go-lib-rg/erro" "github.com/realglobe-Inc/go-lib-rg/rglog/level" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "os" "path/filepath" "strconv" "strings" "testing" "time" ) func init() { util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) } const ( testIdLen = 5 testUiUri = "/html" testCodExpiDur = 10 * time.Millisecond testTokExpiDur = 10 * time.Millisecond testIdTokExpiDur = 10 * time.Millisecond testSessExpiDur = 10 * time.Millisecond testSigAlg = "RS256" ) var testIdpPriKey crypto.PrivateKey var testIdpPubKey crypto.PublicKey func init() { priKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { panic(err) } testIdpPriKey = priKey testIdpPubKey = &priKey.PublicKey } func newTestSystem(selfId string) *system { uiPath, err := ioutil.TempDir("", testLabel) if err != nil { panic(err) } if err := ioutil.WriteFile(filepath.Join(uiPath, selHtml), []byte{}, filePerm); err != nil { os.RemoveAll(uiPath) panic(err) } if err := ioutil.WriteFile(filepath.Join(uiPath, loginHtml), []byte{}, filePerm); err != nil { os.RemoveAll(uiPath) panic(err) } if err := ioutil.WriteFile(filepath.Join(uiPath, consHtml), []byte{}, filePerm); err != nil { os.RemoveAll(uiPath) panic(err) } return &system{ selfId, false, testIdLen, testIdLen, testUiUri, uiPath, newMemoryTaContainer(testStaleDur, testCaExpiDur), newMemoryAccountContainer(testStaleDur, testCaExpiDur), newMemoryConsentContainer(testStaleDur, testCaExpiDur), newMemorySessionContainer(testIdLen, "", testStaleDur, testCaExpiDur), newMemoryCodeContainer(testIdLen, "", testSavDur, testTicDur, testStaleDur, testCaExpiDur), newMemoryTokenContainer(testIdLen, "", testSavDur, testStaleDur, testCaExpiDur), testCodExpiDur + 2*time.Second, // 以下、プロトコルを通すと粒度が秒になるため。 testTokExpiDur + 2*time.Second, testIdTokExpiDur + 2*time.Second, testSessExpiDur + 2*time.Second, testSigAlg, "", testIdpPriKey, } } // edo-id-provider を立てる。 // 使い終わったら shutCh で終了させ、idpSys.uiPath を消すこと func setupTestIdp(testAccs []*account, testTas []*ta) (idpSys *system, shutCh chan struct{}, err error) { port, err := util.FreePort() if err != nil { return nil, nil, erro.Wrap(err) } idpSys = newTestSystem("http://localhost:" + strconv.Itoa(port)) for _, acc := range testAccs { idpSys.accCont.(*memoryAccountContainer).add(acc) } for _, ta_ := range testTas { idpSys.taCont.(*memoryTaContainer).add(ta_) } shutCh = make(chan struct{}, 10) go serve(idpSys, "tcp", "", port, "http", shutCh) return idpSys, shutCh, nil } // 起動しただけでパニックを起こさないこと。 func TestBoot(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// idpSys, shutCh, err := setupTestIdp(nil, nil) if err != nil { t.Fatal(err) } defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) } // testTa を基に TA 偽装用テストサーバーを立てる。 // 使い終わったら Close すること。 func setupTestTa(rediUriPaths []string) (ta_ *ta, rediUri, taKid string, taPriKey crypto.PrivateKey, taServ *util.TestHttpServer, err error) { taPort, err := util.FreePort() if err != nil { return nil, "", "", nil, nil, erro.Wrap(err) } taServer, err := util.NewTestHttpServer(taPort) if err != nil { return nil, "", "", nil, nil, erro.Wrap(err) } taBuff := *testTa taBuff.Id = "http://localhost:" + strconv.Itoa(taPort) if len(rediUriPaths) == 0 { rediUri = taBuff.Id + "/redirect_endpoint" taBuff.RediUris = map[string]bool{rediUri: true} } else { taBuff.RediUris = map[string]bool{} for _, v := range rediUriPaths { rediUri = taBuff.Id + v taBuff.RediUris[rediUri] = true } } return &taBuff, rediUri, testTaKid, testTaPriKey, taServer, nil } // TA 偽装サーバーと edo-id-provider を立てる。 func setupTestTaAndIdp(rediUriPaths []string, testAccs []*account, testTas []*ta) (ta_ *ta, rediUri, taKid string, taPriKey crypto.PrivateKey, taServ *util.TestHttpServer, idpSys *system, shutCh chan struct{}, err error) { // TA 偽装サーバー。 ta_, rediUri, taKid, taPriKey, taServ, err = setupTestTa(rediUriPaths) if err != nil { return } defer func() { if err != nil { taServ.Close() } }() // edo-id-provider を用意。 idpSys, shutCh, err = setupTestIdp([]*account{testAcc}, append([]*ta{ta_}, testTas...)) return } // 認証リクエストを出し結果を無検査で返す。 // 返り値を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testRequestAuthWithoutCheck(idpSys *system, cli *http.Client, authParams map[string]string) (*http.Response, error) { q := url.Values{} for k, v := range authParams { if v != "" { q.Set(k, v) } } req, err := http.NewRequest("GET", idpSys.selfId+"/auth?"+q.Encode(), nil) if err != nil { return nil, erro.Wrap(err) } resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // 認証リクエストを出す。 // 返り値を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testRequestAuth(idpSys *system, cli *http.Client, authParams map[string]string) (*http.Response, error) { resp, err := testRequestAuthWithoutCheck(idpSys, cli, authParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // アカウント選択 UI にリダイレクトされてたらアカウント選択して結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testSelectAccountWithoutCheck(idpSys *system, cli *http.Client, authResp *http.Response, selParams map[string]string) (*http.Response, error) { if authResp.Request.URL.Path != idpSys.uiUri+"/select.html" { // アカウント選択 UI にリダイレクトされてない。 return authResp, nil } if selParams == nil { selParams = map[string]string{} } tic := authResp.Request.URL.Fragment q := url.Values{} for k, v := range selParams { if v != "" { q.Set(k, v) } } if v, ok := selParams["ticket"]; !(ok && v == "") { q.Set("ticket", tic) } req, err := http.NewRequest("POST", idpSys.selfId+"/auth/select", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // アカウント選択 UI にリダイレクトされてたらアカウント選択する。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testSelectAccount(idpSys *system, cli *http.Client, authResp *http.Response, selParams map[string]string) (*http.Response, error) { resp, err := testSelectAccountWithoutCheck(idpSys, cli, authResp, selParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // ログイン UI にリダイレクトされてたらログインして結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testLoginWithoutCheck(idpSys *system, cli *http.Client, selResp *http.Response, loginParams map[string]string) (*http.Response, error) { if selResp.Request.URL.Path != idpSys.uiUri+"/login.html" { // ログイン UI にリダイレクトされてない。 return selResp, nil } if loginParams == nil { loginParams = map[string]string{} } tic := selResp.Request.URL.Fragment q := url.Values{} for k, v := range loginParams { if v != "" { q.Set(k, v) } } if v, ok := loginParams["ticket"]; !(ok && v == "") { q.Set("ticket", tic) } req, err := http.NewRequest("POST", idpSys.selfId+"/auth/login", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // ログイン UI にリダイレクトされてたらログインする。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testLogin(idpSys *system, cli *http.Client, selResp *http.Response, loginParams map[string]string) (*http.Response, error) { resp, err := testLoginWithoutCheck(idpSys, cli, selResp, loginParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // 同意 UI にリダイレクトされてたら同意して結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testConsentWithoutCheck(idpSys *system, cli *http.Client, loginResp *http.Response, consParams map[string]string) (*http.Response, error) { if loginResp.Request.URL.Path != idpSys.uiUri+"/consent.html" { // 同意 UI にリダイレクトされてない。 return loginResp, nil } if consParams == nil { consParams = map[string]string{} } tic := loginResp.Request.URL.Fragment q := url.Values{} for k, v := range consParams { if v != "" { q.Set(k, v) } } if v, ok := consParams["ticket"]; !(ok && v == "") { q.Set("ticket", tic) } q.Set("ticket", tic) req, err := http.NewRequest("POST", idpSys.selfId+"/auth/consent", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // 同意 UI にリダイレクトされてたら同意する。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testConsent(idpSys *system, cli *http.Client, loginResp *http.Response, consParams map[string]string) (*http.Response, error) { resp, err := testConsentWithoutCheck(idpSys, cli, loginResp, consParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // トークンリクエストして結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetTokenWithoutCheck(idpSys *system, consResp *http.Response, assHeads, assClms map[string]interface{}, reqParams map[string]string, kid string, sigKey crypto.PrivateKey) (*http.Response, error) { cod := consResp.Request.FormValue("code") if cod == "" { util.LogRequest(level.ERR, consResp.Request, true) return nil, erro.New("no code") } // 認可コードを取得できた。 if assHeads == nil { assHeads = map[string]interface{}{} } if assClms == nil { assClms = map[string]interface{}{} } if reqParams == nil { reqParams = map[string]string{} } // クライアント認証用データを準備。 assJws := util.NewJws() for k, v := range assHeads { assJws.SetHeader(k, v) } for k, v := range assClms { assJws.SetClaim(k, v) } if _, ok := assClms["code"]; !ok { assJws.SetClaim("code", cod) } if err := assJws.Sign(map[string]crypto.PrivateKey{kid: sigKey}); err != nil { return nil, erro.Wrap(err) } assBuff, err := assJws.Encode() if err != nil { return nil, erro.Wrap(err) } ass := string(assBuff) q := url.Values{} for k, v := range reqParams { if v != "" { q.Set(k, v) } } if v, ok := reqParams["code"]; !(ok && v == "") { q.Set("code", cod) } if v, ok := reqParams["client_assertion"]; !(ok && v == "") { q.Set("client_assertion", ass) } req, err := http.NewRequest("POST", idpSys.selfId+"/token", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := (&http.Client{}).Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // アクセストークンを取得する。 // 返り値は JSON を Unmarshal したもの。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetToken(idpSys *system, consResp *http.Response, assHeads, assClms map[string]interface{}, reqParams map[string]string, kid string, sigKey crypto.PrivateKey) (map[string]interface{}, error) { resp, err := testGetTokenWithoutCheck(idpSys, consResp, assHeads, assClms, reqParams, kid, sigKey) if err != nil { return nil, erro.Wrap(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } var res map[string]interface{} if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) return nil, erro.Wrap(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) return nil, erro.Wrap(err) } return res, nil } // アカウント情報を取得する。 // 返り値の Body を Close すること。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetAccountInfoWithoutCheck(idpSys *system, tokRes map[string]interface{}, reqHeads map[string]string) (*http.Response, error) { tok, _ := tokRes["access_token"].(string) if tok == "" { return nil, erro.New("no access token") } // アクセストークンを取得できた。 req, err := http.NewRequest("GET", idpSys.selfId+"/userinfo", nil) if err != nil { return nil, erro.Wrap(err) } for k, v := range reqHeads { if v != "" { req.Header.Set(k, v) } } if v, ok := reqHeads["Authorization"]; !(ok && v == "") { req.Header.Set("Authorization", "Bearer "+tok) } resp, err := (&http.Client{}).Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // アカウント情報を取得する。 // 返り値は JSON を Unmarshal したもの。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetAccountInfo(idpSys *system, tokRes map[string]interface{}, reqHeads map[string]string) (map[string]interface{}, error) { resp, err := testGetAccountInfoWithoutCheck(idpSys, tokRes, reqHeads) if err != nil { return nil, erro.Wrap(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } var res map[string]interface{} if data, err := ioutil.ReadAll(resp.Body); err != nil { return nil, erro.Wrap(err) } else if err := json.Unmarshal(data, &res); err != nil { return nil, erro.Wrap(err) } return res, nil } // 認証リクエストから認可コード取得までする。 func testFromRequestAuthToConsent(idpSys *system, cli *http.Client, authParams, selParams, loginParams, consParams map[string]string) (*http.Response, error) { // リクエストする。 authResp, err := testRequestAuth(idpSys, cli, authParams) if err != nil { return nil, erro.Wrap(err) } defer authResp.Body.Close() // 必要ならアカウント選択する。 selResp, err := testSelectAccount(idpSys, cli, authResp, selParams) if err != nil { return nil, erro.Wrap(err) } defer selResp.Body.Close() // 必要ならログインする。 loginResp, err := testLogin(idpSys, cli, selResp, loginParams) if err != nil { return nil, erro.Wrap(err) } defer loginResp.Body.Close() // 必要なら同意する。 return testConsent(idpSys, cli, loginResp, consParams) } // トークンリクエストからアカウント情報取得までする。 func testGetTokenAndAccountInfo(idpSys *system, consResp *http.Response, assHeads, assClms map[string]interface{}, tokParams map[string]string, kid string, sigKey crypto.PrivateKey, accInfHeads map[string]string) (map[string]interface{}, error) { // アクセストークンを取得する。 tokRes, err := testGetToken(idpSys, consResp, assHeads, assClms, tokParams, kid, sigKey) if err != nil { return nil, erro.Wrap(err) } // アカウント情報を取得する。 return testGetAccountInfo(idpSys, tokRes, accInfHeads) } // 認証リクエストからアカウント情報取得までする。 func testFromRequestAuthToGetAccountInfo(idpSys *system, cli *http.Client, authParams, selParams, loginParams, consParams map[string]string, assHeads, assClms map[string]interface{}, tokParams map[string]string, kid string, sigKey crypto.PrivateKey, accInfHeads map[string]string) (map[string]interface{}, error) { // リクエストから同意までする。 consResp, err := testFromRequestAuthToConsent(idpSys, cli, authParams, selParams, loginParams, consParams) if err != nil { return nil, erro.Wrap(err) } defer consResp.Body.Close() // アクセストークンを取得してアカウント情報を取得する。 return testGetTokenAndAccountInfo(idpSys, consResp, assHeads, assClms, tokParams, kid, sigKey, accInfHeads) } // 認証してアカウント情報を取得できるか。 func TestSuccess(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} if res, err := testFromRequestAuthToGetAccountInfo(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, "prompt": "select_account login consent", }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } } // 知らないパラメータを無視できるか。 func TestIgnoreUnknownParameterInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} if res, err := testFromRequestAuthToGetAccountInfo(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, "unknown_name": "unknown_value", }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), "unknown": "unknown", }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", "unknown": "unknown", }, kid, sigKey, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } } // 認証リクエストの重複パラメータを拒否できるか。 func TestDenyOverlapParameterInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) req, err := http.NewRequest("GET", idpSys.selfId+"/auth?"+url.Values{ "scope": {"openid email"}, "response_type": {"code"}, "client_id": {testTa2.id()}, "redirect_uri": {rediUri}, }.Encode()+"&scope=aaaa", nil) if err != nil { t.Fatal(err) } resp, err := (&http.Client{}).Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusOK) } else if resp.Request.FormValue(formErr) == "" { t.Fatal("no error") } } // 認証リクエストに client_id が無い時に拒否できるか。 func TestDenyNoClientIdInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// _, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuthWithoutCheck(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": "", "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // 認証リクエストに response_type が無い時に拒否できるか。 func TestDenyNoResponseTypeInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.Request.FormValue(formErr) != errInvReq { t.Fatal(resp.Request.FormValue(formErr), errInvReq) } } // 認証リクエストの response_type が未知の時に拒否できるか。 func TestDenyUnknownResponseTypeInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "unknown", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.Request.FormValue(formErr) != errUnsuppRespType { t.Fatal(resp.Request.FormValue(formErr), errInvReq) } } // 結果をリダイレクトで返すときに redirect_uri のパラメータを維持できるか。 func TestKeepRedirectUriParameter(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp([]string{"/redirect_endpoint?param_name=param_value"}, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if q := resp.Request.URL.Query(); q.Get("code") == "" { t.Fatal("no code") } else if q.Get("param_name") != "param_value" { t.Fatal(q.Get("param_name"), "param_value") } } // エラーをリダイレクトで返すときに redirect_uri のパラメータを維持できるか。 func TestKeepRedirectUriParameterInError(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp([]string{"/redirect_endpoint?param_name=param_value"}, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "unknown", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if q := resp.Request.URL.Query(); q.Get("error") != errUnsuppRespType { t.Fatal(q.Get("error"), errUnsuppRespType) } else if q.Get("param_name") != "param_value" { t.Fatal(q.Get("param_name"), "param_value") } } // redirect_uri が登録値と異なるときにリダイレクトせずに拒否できるか。 func TestDirectErrorResponseInInvalidRedirectUri(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuthWithoutCheck(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri + "/a", }) defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // redirect_uri が無いときにリダイレクトせずに拒否できるか。 func TestDirectErrorResponseInNoRedirectUri(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, _, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuthWithoutCheck(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // POST でないトークンリクエストを拒否できるか。 func TestDenyNonPostTokenRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) for _, meth := range []string{"GET", "PUT"} { // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() cod := consResp.Request.FormValue("code") if cod == "" { util.LogRequest(level.ERR, consResp.Request, true) t.Fatal("no code") } // 認可コードを取得できた。 assJws := util.NewJws() assJws.SetHeader("alg", "RS256") assJws.SetHeader("kid", kid) assJws.SetClaim("iss", testTa2.id()) assJws.SetClaim("sub", testTa2.id()) assJws.SetClaim("aud", idpSys.selfId+"/token") assJws.SetClaim("jti", strconv.FormatInt(time.Now().UnixNano(), 16)) assJws.SetClaim("exp", time.Now().Add(idpSys.idTokExpiDur).Unix()) assJws.SetClaim("code", cod) if err := assJws.Sign(map[string]crypto.PrivateKey{kid: sigKey}); err != nil { t.Fatal(err) } assBuff, err := assJws.Encode() if err != nil { t.Fatal(err) } ass := string(assBuff) req, err := http.NewRequest(meth, idpSys.selfId+"/token", strings.NewReader(url.Values{ "grant_type": {"authorization_code"}, "redirect_uri": {rediUri}, "client_id": {testTa2.id()}, "client_assertion_type": {"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"}, "code": {cod}, "client_assertion": {ass}, }.Encode())) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, err := (&http.Client{}).Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusMethodNotAllowed { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusMethodNotAllowed) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } } // トークンリクエストの未知のパラメータを無視できるか。 func TestIgnoreUnknownParameterInTokenRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} if res, err := testFromRequestAuthToGetAccountInfo(idpSys, cli, map[string]string{ "scope": "openid", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", "unknown_name": "unknown_value", }, kid, sigKey, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } } // トークンリクエストのパラメータが重複していたら拒否できるか。 func TestDenyOverlapParameterInTokenRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() cod := consResp.Request.FormValue("code") if cod == "" { util.LogRequest(level.ERR, consResp.Request, true) t.Fatal("no code") } // 認可コードを取得できた。 assJws := util.NewJws() assJws.SetHeader("alg", "RS256") assJws.SetHeader("kid", kid) assJws.SetClaim("iss", testTa2.id()) assJws.SetClaim("sub", testTa2.id()) assJws.SetClaim("aud", idpSys.selfId+"/token") assJws.SetClaim("jti", strconv.FormatInt(time.Now().UnixNano(), 16)) assJws.SetClaim("exp", time.Now().Add(idpSys.idTokExpiDur).Unix()) assJws.SetClaim("code", cod) if err := assJws.Sign(map[string]crypto.PrivateKey{kid: sigKey}); err != nil { t.Fatal(err) } assBuff, err := assJws.Encode() if err != nil { t.Fatal(err) } ass := string(assBuff) req, err := http.NewRequest("POST", idpSys.selfId+"/token", strings.NewReader(url.Values{ "grant_type": {"authorization_code"}, "redirect_uri": {rediUri}, "client_id": {testTa2.id()}, "client_assertion_type": {"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"}, "code": {cod}, "client_assertion": {ass}, }.Encode()+"&grant_type=authorization_code")) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, err := (&http.Client{}).Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // トークンリクエストで grant_type が authorization_code なのに client_id が無かったら拒否できるか。 func TestDenyTokenRequestWithoutClientId(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() resp, err := testGetTokenWithoutCheck(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": "", "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // 認可コードが 2 回使われたら拒否できるか。 func TestDenyUsedCode(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() // 1 回目はアクセストークンを取得できる。 tokRes, err := testGetToken(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } else if tokRes["access_token"] == "" { t.Fatal(tokRes) } // 2 回目は拒否される。 resp, err := testGetTokenWithoutCheck(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvGrnt { t.Fatal(res.Error, errInvGrnt) } } // 2 回使われた認可コードで発行したアクセストークンを無効にできるか。 func _TestDisableTokenOfUsedCode(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() // アクセストークンを取得してアカウント情報も取得する。 tokRes, err := testGetToken(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } if res, err := testGetAccountInfo(idpSys, tokRes, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } // もう一度アクセストークンを要求して拒否される。 tokResp, err := testGetTokenWithoutCheck(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } tokResp.Body.Close() // 拒否されていることは別テスト。 // さっき取得したアクセストークンでのアカウント情報取得も拒否される。 resp, err := testGetAccountInfoWithoutCheck(idpSys, tokRes, nil) if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvTok { t.Fatal(res.Error, errInvTok) } } // 認証リクエストに scope が無かったら拒否できるか。 func TestDenyAuthRequestWithoutScope(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if q := resp.Request.URL.Query(); q.Get("error") == errInvReq { t.Fatal(q.Get("error"), errInvReq) } } // 認証中にエラーが起きたら認証経過を破棄できるか。 func TestAbortSession(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} // リクエストする。 authResp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, "prompt": "select_account", "unknown": "unknown", }) if err != nil { t.Fatal(err) } defer authResp.Body.Close() // アカウント選択でアカウント選択券を渡さないで認証経過をリセット。 selResp, err := testSelectAccount(idpSys, cli, authResp, map[string]string{ "username": testAcc.name(), "ticket": "", }) if err != nil { t.Fatal(err) } defer selResp.Body.Close() if selResp.Request.FormValue(formErr) != errAccDeny { t.Fatal(selResp.Request.FormValue(formErr), errAccDeny) } // アカウント選択でさっきのアカウント選択券を渡す。 resp, err := testSelectAccountWithoutCheck(idpSys, cli, authResp, map[string]string{ "username": testAcc.name(), }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } テスト用パーツ関数の引数の扱いを修正 package main import ( "crypto" "crypto/rand" "crypto/rsa" "encoding/json" "github.com/realglobe-Inc/edo/util" "github.com/realglobe-Inc/go-lib-rg/erro" "github.com/realglobe-Inc/go-lib-rg/rglog/level" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "os" "path/filepath" "strconv" "strings" "testing" "time" ) func init() { util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) } const ( testIdLen = 5 testUiUri = "/html" testCodExpiDur = 10 * time.Millisecond testTokExpiDur = 10 * time.Millisecond testIdTokExpiDur = 10 * time.Millisecond testSessExpiDur = 10 * time.Millisecond testSigAlg = "RS256" ) var testIdpPriKey crypto.PrivateKey var testIdpPubKey crypto.PublicKey func init() { priKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { panic(err) } testIdpPriKey = priKey testIdpPubKey = &priKey.PublicKey } func newTestSystem(selfId string) *system { uiPath, err := ioutil.TempDir("", testLabel) if err != nil { panic(err) } if err := ioutil.WriteFile(filepath.Join(uiPath, selHtml), []byte{}, filePerm); err != nil { os.RemoveAll(uiPath) panic(err) } if err := ioutil.WriteFile(filepath.Join(uiPath, loginHtml), []byte{}, filePerm); err != nil { os.RemoveAll(uiPath) panic(err) } if err := ioutil.WriteFile(filepath.Join(uiPath, consHtml), []byte{}, filePerm); err != nil { os.RemoveAll(uiPath) panic(err) } return &system{ selfId, false, testIdLen, testIdLen, testUiUri, uiPath, newMemoryTaContainer(testStaleDur, testCaExpiDur), newMemoryAccountContainer(testStaleDur, testCaExpiDur), newMemoryConsentContainer(testStaleDur, testCaExpiDur), newMemorySessionContainer(testIdLen, "", testStaleDur, testCaExpiDur), newMemoryCodeContainer(testIdLen, "", testSavDur, testTicDur, testStaleDur, testCaExpiDur), newMemoryTokenContainer(testIdLen, "", testSavDur, testStaleDur, testCaExpiDur), testCodExpiDur + 2*time.Second, // 以下、プロトコルを通すと粒度が秒になるため。 testTokExpiDur + 2*time.Second, testIdTokExpiDur + 2*time.Second, testSessExpiDur + 2*time.Second, testSigAlg, "", testIdpPriKey, } } // edo-id-provider を立てる。 // 使い終わったら shutCh で終了させ、idpSys.uiPath を消すこと func setupTestIdp(testAccs []*account, testTas []*ta) (idpSys *system, shutCh chan struct{}, err error) { port, err := util.FreePort() if err != nil { return nil, nil, erro.Wrap(err) } idpSys = newTestSystem("http://localhost:" + strconv.Itoa(port)) for _, acc := range testAccs { idpSys.accCont.(*memoryAccountContainer).add(acc) } for _, ta_ := range testTas { idpSys.taCont.(*memoryTaContainer).add(ta_) } shutCh = make(chan struct{}, 10) go serve(idpSys, "tcp", "", port, "http", shutCh) return idpSys, shutCh, nil } // 起動しただけでパニックを起こさないこと。 func TestBoot(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// idpSys, shutCh, err := setupTestIdp(nil, nil) if err != nil { t.Fatal(err) } defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) } // testTa を基に TA 偽装用テストサーバーを立てる。 // 使い終わったら Close すること。 func setupTestTa(rediUriPaths []string) (ta_ *ta, rediUri, taKid string, taPriKey crypto.PrivateKey, taServ *util.TestHttpServer, err error) { taPort, err := util.FreePort() if err != nil { return nil, "", "", nil, nil, erro.Wrap(err) } taServer, err := util.NewTestHttpServer(taPort) if err != nil { return nil, "", "", nil, nil, erro.Wrap(err) } taBuff := *testTa taBuff.Id = "http://localhost:" + strconv.Itoa(taPort) if len(rediUriPaths) == 0 { rediUri = taBuff.Id + "/redirect_endpoint" taBuff.RediUris = map[string]bool{rediUri: true} } else { taBuff.RediUris = map[string]bool{} for _, v := range rediUriPaths { rediUri = taBuff.Id + v taBuff.RediUris[rediUri] = true } } return &taBuff, rediUri, testTaKid, testTaPriKey, taServer, nil } // TA 偽装サーバーと edo-id-provider を立てる。 func setupTestTaAndIdp(rediUriPaths []string, testAccs []*account, testTas []*ta) (ta_ *ta, rediUri, taKid string, taPriKey crypto.PrivateKey, taServ *util.TestHttpServer, idpSys *system, shutCh chan struct{}, err error) { // TA 偽装サーバー。 ta_, rediUri, taKid, taPriKey, taServ, err = setupTestTa(rediUriPaths) if err != nil { return } defer func() { if err != nil { taServ.Close() } }() // edo-id-provider を用意。 idpSys, shutCh, err = setupTestIdp([]*account{testAcc}, append([]*ta{ta_}, testTas...)) return } // 認証リクエストを出し結果を無検査で返す。 // 返り値を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testRequestAuthWithoutCheck(idpSys *system, cli *http.Client, authParams map[string]string) (*http.Response, error) { q := url.Values{} for k, v := range authParams { if v != "" { q.Set(k, v) } } req, err := http.NewRequest("GET", idpSys.selfId+"/auth?"+q.Encode(), nil) if err != nil { return nil, erro.Wrap(err) } resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // 認証リクエストを出す。 // 返り値を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testRequestAuth(idpSys *system, cli *http.Client, authParams map[string]string) (*http.Response, error) { resp, err := testRequestAuthWithoutCheck(idpSys, cli, authParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // アカウント選択 UI にリダイレクトされてたらアカウント選択して結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testSelectAccountWithoutCheck(idpSys *system, cli *http.Client, authResp *http.Response, selParams map[string]string) (*http.Response, error) { if authResp.Request.URL.Path != idpSys.uiUri+"/select.html" { // アカウント選択 UI にリダイレクトされてない。 return authResp, nil } if selParams == nil { selParams = map[string]string{} } tic := authResp.Request.URL.Fragment q := url.Values{} for k, v := range selParams { if v != "" { q.Set(k, v) } } if _, ok := selParams["ticket"]; !ok { q.Set("ticket", tic) } req, err := http.NewRequest("POST", idpSys.selfId+"/auth/select", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // アカウント選択 UI にリダイレクトされてたらアカウント選択する。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testSelectAccount(idpSys *system, cli *http.Client, authResp *http.Response, selParams map[string]string) (*http.Response, error) { resp, err := testSelectAccountWithoutCheck(idpSys, cli, authResp, selParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // ログイン UI にリダイレクトされてたらログインして結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testLoginWithoutCheck(idpSys *system, cli *http.Client, selResp *http.Response, loginParams map[string]string) (*http.Response, error) { if selResp.Request.URL.Path != idpSys.uiUri+"/login.html" { // ログイン UI にリダイレクトされてない。 return selResp, nil } if loginParams == nil { loginParams = map[string]string{} } tic := selResp.Request.URL.Fragment q := url.Values{} for k, v := range loginParams { if v != "" { q.Set(k, v) } } if _, ok := loginParams["ticket"]; !ok { q.Set("ticket", tic) } req, err := http.NewRequest("POST", idpSys.selfId+"/auth/login", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // ログイン UI にリダイレクトされてたらログインする。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testLogin(idpSys *system, cli *http.Client, selResp *http.Response, loginParams map[string]string) (*http.Response, error) { resp, err := testLoginWithoutCheck(idpSys, cli, selResp, loginParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // 同意 UI にリダイレクトされてたら同意して結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testConsentWithoutCheck(idpSys *system, cli *http.Client, loginResp *http.Response, consParams map[string]string) (*http.Response, error) { if loginResp.Request.URL.Path != idpSys.uiUri+"/consent.html" { // 同意 UI にリダイレクトされてない。 return loginResp, nil } if consParams == nil { consParams = map[string]string{} } tic := loginResp.Request.URL.Fragment q := url.Values{} for k, v := range consParams { if v != "" { q.Set(k, v) } } if _, ok := consParams["ticket"]; !ok { q.Set("ticket", tic) } q.Set("ticket", tic) req, err := http.NewRequest("POST", idpSys.selfId+"/auth/consent", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := cli.Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // 同意 UI にリダイレクトされてたら同意する。 // 返り値の Body を Close すること。 // パラメータ値が空文字列なら、そのパラメータを設定しない。 func testConsent(idpSys *system, cli *http.Client, loginResp *http.Response, consParams map[string]string) (*http.Response, error) { resp, err := testConsentWithoutCheck(idpSys, cli, loginResp, consParams) if err != nil { return nil, erro.Wrap(err) } if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } return resp, nil } // トークンリクエストして結果を無検査で返す。 // 返り値の Body を Close すること。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetTokenWithoutCheck(idpSys *system, consResp *http.Response, assHeads, assClms map[string]interface{}, reqParams map[string]string, kid string, sigKey crypto.PrivateKey) (*http.Response, error) { cod := consResp.Request.FormValue("code") if cod == "" { util.LogRequest(level.ERR, consResp.Request, true) return nil, erro.New("no code") } // 認可コードを取得できた。 if assHeads == nil { assHeads = map[string]interface{}{} } if assClms == nil { assClms = map[string]interface{}{} } if reqParams == nil { reqParams = map[string]string{} } // クライアント認証用データを準備。 assJws := util.NewJws() for k, v := range assHeads { assJws.SetHeader(k, v) } for k, v := range assClms { assJws.SetClaim(k, v) } if _, ok := assClms["code"]; !ok { assJws.SetClaim("code", cod) } if err := assJws.Sign(map[string]crypto.PrivateKey{kid: sigKey}); err != nil { return nil, erro.Wrap(err) } assBuff, err := assJws.Encode() if err != nil { return nil, erro.Wrap(err) } ass := string(assBuff) q := url.Values{} for k, v := range reqParams { if v != "" { q.Set(k, v) } } if _, ok := reqParams["code"]; !ok { q.Set("code", cod) } if _, ok := reqParams["client_assertion"]; !ok { q.Set("client_assertion", ass) } req, err := http.NewRequest("POST", idpSys.selfId+"/token", strings.NewReader(q.Encode())) if err != nil { return nil, erro.Wrap(err) } req.Header.Set("Content-Type", util.ContentTypeForm) resp, err := (&http.Client{}).Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // アクセストークンを取得する。 // 返り値は JSON を Unmarshal したもの。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetToken(idpSys *system, consResp *http.Response, assHeads, assClms map[string]interface{}, reqParams map[string]string, kid string, sigKey crypto.PrivateKey) (map[string]interface{}, error) { resp, err := testGetTokenWithoutCheck(idpSys, consResp, assHeads, assClms, reqParams, kid, sigKey) if err != nil { return nil, erro.Wrap(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } var res map[string]interface{} if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) return nil, erro.Wrap(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) return nil, erro.Wrap(err) } return res, nil } // アカウント情報を取得する。 // 返り値の Body を Close すること。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetAccountInfoWithoutCheck(idpSys *system, tokRes map[string]interface{}, reqHeads map[string]string) (*http.Response, error) { tok, _ := tokRes["access_token"].(string) if tok == "" { return nil, erro.New("no access token") } // アクセストークンを取得できた。 req, err := http.NewRequest("GET", idpSys.selfId+"/userinfo", nil) if err != nil { return nil, erro.Wrap(err) } for k, v := range reqHeads { if v != "" { req.Header.Set(k, v) } } if _, ok := reqHeads["Authorization"]; !ok { req.Header.Set("Authorization", "Bearer "+tok) } resp, err := (&http.Client{}).Do(req) if err != nil { return nil, erro.Wrap(err) } return resp, nil } // アカウント情報を取得する。 // 返り値は JSON を Unmarshal したもの。 // パラメータ値が空値なら、そのパラメータを設定しない。 func testGetAccountInfo(idpSys *system, tokRes map[string]interface{}, reqHeads map[string]string) (map[string]interface{}, error) { resp, err := testGetAccountInfoWithoutCheck(idpSys, tokRes, reqHeads) if err != nil { return nil, erro.Wrap(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { util.LogResponse(level.ERR, resp, true) resp.Body.Close() return nil, erro.New("invalid response ", resp.StatusCode, " "+http.StatusText(resp.StatusCode)) } var res map[string]interface{} if data, err := ioutil.ReadAll(resp.Body); err != nil { return nil, erro.Wrap(err) } else if err := json.Unmarshal(data, &res); err != nil { return nil, erro.Wrap(err) } return res, nil } // 認証リクエストから認可コード取得までする。 func testFromRequestAuthToConsent(idpSys *system, cli *http.Client, authParams, selParams, loginParams, consParams map[string]string) (*http.Response, error) { // リクエストする。 authResp, err := testRequestAuth(idpSys, cli, authParams) if err != nil { return nil, erro.Wrap(err) } defer authResp.Body.Close() // 必要ならアカウント選択する。 selResp, err := testSelectAccount(idpSys, cli, authResp, selParams) if err != nil { return nil, erro.Wrap(err) } defer selResp.Body.Close() // 必要ならログインする。 loginResp, err := testLogin(idpSys, cli, selResp, loginParams) if err != nil { return nil, erro.Wrap(err) } defer loginResp.Body.Close() // 必要なら同意する。 return testConsent(idpSys, cli, loginResp, consParams) } // トークンリクエストからアカウント情報取得までする。 func testGetTokenAndAccountInfo(idpSys *system, consResp *http.Response, assHeads, assClms map[string]interface{}, tokParams map[string]string, kid string, sigKey crypto.PrivateKey, accInfHeads map[string]string) (map[string]interface{}, error) { // アクセストークンを取得する。 tokRes, err := testGetToken(idpSys, consResp, assHeads, assClms, tokParams, kid, sigKey) if err != nil { return nil, erro.Wrap(err) } // アカウント情報を取得する。 return testGetAccountInfo(idpSys, tokRes, accInfHeads) } // 認証リクエストからアカウント情報取得までする。 func testFromRequestAuthToGetAccountInfo(idpSys *system, cli *http.Client, authParams, selParams, loginParams, consParams map[string]string, assHeads, assClms map[string]interface{}, tokParams map[string]string, kid string, sigKey crypto.PrivateKey, accInfHeads map[string]string) (map[string]interface{}, error) { // リクエストから同意までする。 consResp, err := testFromRequestAuthToConsent(idpSys, cli, authParams, selParams, loginParams, consParams) if err != nil { return nil, erro.Wrap(err) } defer consResp.Body.Close() // アクセストークンを取得してアカウント情報を取得する。 return testGetTokenAndAccountInfo(idpSys, consResp, assHeads, assClms, tokParams, kid, sigKey, accInfHeads) } // 認証してアカウント情報を取得できるか。 func TestSuccess(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} if res, err := testFromRequestAuthToGetAccountInfo(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, "prompt": "select_account login consent", }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } } // 知らないパラメータを無視できるか。 func TestIgnoreUnknownParameterInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} if res, err := testFromRequestAuthToGetAccountInfo(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, "unknown_name": "unknown_value", }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), "unknown": "unknown", }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", "unknown": "unknown", }, kid, sigKey, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } } // 認証リクエストの重複パラメータを拒否できるか。 func TestDenyOverlapParameterInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) req, err := http.NewRequest("GET", idpSys.selfId+"/auth?"+url.Values{ "scope": {"openid email"}, "response_type": {"code"}, "client_id": {testTa2.id()}, "redirect_uri": {rediUri}, }.Encode()+"&scope=aaaa", nil) if err != nil { t.Fatal(err) } resp, err := (&http.Client{}).Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusOK) } else if resp.Request.FormValue(formErr) == "" { t.Fatal("no error") } } // 認証リクエストに client_id が無い時に拒否できるか。 func TestDenyNoClientIdInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// _, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuthWithoutCheck(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": "", "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // 認証リクエストに response_type が無い時に拒否できるか。 func TestDenyNoResponseTypeInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.Request.FormValue(formErr) != errInvReq { t.Fatal(resp.Request.FormValue(formErr), errInvReq) } } // 認証リクエストの response_type が未知の時に拒否できるか。 func TestDenyUnknownResponseTypeInAuthRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "unknown", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.Request.FormValue(formErr) != errUnsuppRespType { t.Fatal(resp.Request.FormValue(formErr), errInvReq) } } // 結果をリダイレクトで返すときに redirect_uri のパラメータを維持できるか。 func TestKeepRedirectUriParameter(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp([]string{"/redirect_endpoint?param_name=param_value"}, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if q := resp.Request.URL.Query(); q.Get("code") == "" { t.Fatal("no code") } else if q.Get("param_name") != "param_value" { t.Fatal(q.Get("param_name"), "param_value") } } // エラーをリダイレクトで返すときに redirect_uri のパラメータを維持できるか。 func TestKeepRedirectUriParameterInError(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp([]string{"/redirect_endpoint?param_name=param_value"}, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "unknown", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if q := resp.Request.URL.Query(); q.Get("error") != errUnsuppRespType { t.Fatal(q.Get("error"), errUnsuppRespType) } else if q.Get("param_name") != "param_value" { t.Fatal(q.Get("param_name"), "param_value") } } // redirect_uri が登録値と異なるときにリダイレクトせずに拒否できるか。 func TestDirectErrorResponseInInvalidRedirectUri(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuthWithoutCheck(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri + "/a", }) defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // redirect_uri が無いときにリダイレクトせずに拒否できるか。 func TestDirectErrorResponseInNoRedirectUri(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, _, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuthWithoutCheck(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // POST でないトークンリクエストを拒否できるか。 func TestDenyNonPostTokenRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) for _, meth := range []string{"GET", "PUT"} { // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() cod := consResp.Request.FormValue("code") if cod == "" { util.LogRequest(level.ERR, consResp.Request, true) t.Fatal("no code") } // 認可コードを取得できた。 assJws := util.NewJws() assJws.SetHeader("alg", "RS256") assJws.SetHeader("kid", kid) assJws.SetClaim("iss", testTa2.id()) assJws.SetClaim("sub", testTa2.id()) assJws.SetClaim("aud", idpSys.selfId+"/token") assJws.SetClaim("jti", strconv.FormatInt(time.Now().UnixNano(), 16)) assJws.SetClaim("exp", time.Now().Add(idpSys.idTokExpiDur).Unix()) assJws.SetClaim("code", cod) if err := assJws.Sign(map[string]crypto.PrivateKey{kid: sigKey}); err != nil { t.Fatal(err) } assBuff, err := assJws.Encode() if err != nil { t.Fatal(err) } ass := string(assBuff) req, err := http.NewRequest(meth, idpSys.selfId+"/token", strings.NewReader(url.Values{ "grant_type": {"authorization_code"}, "redirect_uri": {rediUri}, "client_id": {testTa2.id()}, "client_assertion_type": {"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"}, "code": {cod}, "client_assertion": {ass}, }.Encode())) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, err := (&http.Client{}).Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusMethodNotAllowed { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusMethodNotAllowed) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } } // トークンリクエストの未知のパラメータを無視できるか。 func TestIgnoreUnknownParameterInTokenRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} if res, err := testFromRequestAuthToGetAccountInfo(idpSys, cli, map[string]string{ "scope": "openid", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", "unknown_name": "unknown_value", }, kid, sigKey, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } } // トークンリクエストのパラメータが重複していたら拒否できるか。 func TestDenyOverlapParameterInTokenRequest(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() cod := consResp.Request.FormValue("code") if cod == "" { util.LogRequest(level.ERR, consResp.Request, true) t.Fatal("no code") } // 認可コードを取得できた。 assJws := util.NewJws() assJws.SetHeader("alg", "RS256") assJws.SetHeader("kid", kid) assJws.SetClaim("iss", testTa2.id()) assJws.SetClaim("sub", testTa2.id()) assJws.SetClaim("aud", idpSys.selfId+"/token") assJws.SetClaim("jti", strconv.FormatInt(time.Now().UnixNano(), 16)) assJws.SetClaim("exp", time.Now().Add(idpSys.idTokExpiDur).Unix()) assJws.SetClaim("code", cod) if err := assJws.Sign(map[string]crypto.PrivateKey{kid: sigKey}); err != nil { t.Fatal(err) } assBuff, err := assJws.Encode() if err != nil { t.Fatal(err) } ass := string(assBuff) req, err := http.NewRequest("POST", idpSys.selfId+"/token", strings.NewReader(url.Values{ "grant_type": {"authorization_code"}, "redirect_uri": {rediUri}, "client_id": {testTa2.id()}, "client_assertion_type": {"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"}, "code": {cod}, "client_assertion": {ass}, }.Encode()+"&grant_type=authorization_code")) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, err := (&http.Client{}).Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogRequest(level.ERR, req, true) util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // トークンリクエストで grant_type が authorization_code なのに client_id が無かったら拒否できるか。 func TestDenyTokenRequestWithoutClientId(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() resp, err := testGetTokenWithoutCheck(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": "", "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } } // 認可コードが 2 回使われたら拒否できるか。 func TestDenyUsedCode(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() // 1 回目はアクセストークンを取得できる。 tokRes, err := testGetToken(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } else if tokRes["access_token"] == "" { t.Fatal(tokRes) } // 2 回目は拒否される。 resp, err := testGetTokenWithoutCheck(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvGrnt { t.Fatal(res.Error, errInvGrnt) } } // 2 回使われた認可コードで発行したアクセストークンを無効にできるか。 func _TestDisableTokenOfUsedCode(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, kid, sigKey, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} consResp, err := testFromRequestAuthToConsent(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }, map[string]string{ "username": testAcc.name(), }, map[string]string{ "username": testAcc.name(), "password": testAcc.password(), }, map[string]string{ "consented_scope": "openid email", }) if err != nil { t.Fatal(err) } defer consResp.Body.Close() // アクセストークンを取得してアカウント情報も取得する。 tokRes, err := testGetToken(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } if res, err := testGetAccountInfo(idpSys, tokRes, nil); err != nil { t.Fatal(err) } else if em, _ := res["email"].(string); em != testAcc.attribute("email") { t.Fatal(em, testAcc.attribute("email")) } // もう一度アクセストークンを要求して拒否される。 tokResp, err := testGetTokenWithoutCheck(idpSys, consResp, map[string]interface{}{ "alg": "RS256", "kid": kid, }, map[string]interface{}{ "iss": testTa2.id(), "sub": testTa2.id(), "aud": idpSys.selfId + "/token", "jti": strconv.FormatInt(time.Now().UnixNano(), 16), "exp": time.Now().Add(idpSys.idTokExpiDur).Unix(), }, map[string]string{ "grant_type": "authorization_code", "redirect_uri": rediUri, "client_id": testTa2.id(), "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", }, kid, sigKey) if err != nil { t.Fatal(err) } tokResp.Body.Close() // 拒否されていることは別テスト。 // さっき取得したアクセストークンでのアカウント情報取得も拒否される。 resp, err := testGetAccountInfoWithoutCheck(idpSys, tokRes, nil) if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvTok { t.Fatal(res.Error, errInvTok) } } // 認証リクエストに scope が無かったら拒否できるか。 func TestDenyAuthRequestWithoutScope(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} resp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if q := resp.Request.URL.Query(); q.Get("error") == errInvReq { t.Fatal(q.Get("error"), errInvReq) } } // 認証中にエラーが起きたら認証経過を破棄できるか。 func TestAbortSession(t *testing.T) { // //////////////////////////////// // util.SetupConsoleLog("github.com/realglobe-Inc", level.ALL) // defer util.SetupConsoleLog("github.com/realglobe-Inc", level.OFF) // //////////////////////////////// testTa2, rediUri, _, _, taServ, idpSys, shutCh, err := setupTestTaAndIdp(nil, []*account{testAcc}, nil) if err != nil { t.Fatal(err) } defer taServ.Close() defer os.RemoveAll(idpSys.uiPath) defer func() { shutCh <- struct{}{} }() // TA にリダイレクトできたときのレスポンスを設定しておく。 taServ.AddResponse(http.StatusOK, nil, []byte("success")) taServ.AddResponse(http.StatusOK, nil, []byte("success")) // サーバ起動待ち。 time.Sleep(10 * time.Millisecond) cookJar, err := cookiejar.New(nil) if err != nil { t.Fatal(err) } cli := &http.Client{Jar: cookJar} // リクエストする。 authResp, err := testRequestAuth(idpSys, cli, map[string]string{ "scope": "openid email", "response_type": "code", "client_id": testTa2.id(), "redirect_uri": rediUri, "prompt": "select_account", "unknown": "unknown", }) if err != nil { t.Fatal(err) } defer authResp.Body.Close() // アカウント選択でアカウント選択券を渡さないで認証経過をリセット。 selResp, err := testSelectAccount(idpSys, cli, authResp, map[string]string{ "username": testAcc.name(), "ticket": "", }) if err != nil { t.Fatal(err) } defer selResp.Body.Close() if selResp.Request.FormValue(formErr) != errAccDeny { t.Fatal(selResp.Request.FormValue(formErr), errAccDeny) } // アカウント選択でさっきのアカウント選択券を渡す。 resp, err := testSelectAccountWithoutCheck(idpSys, cli, authResp, map[string]string{ "username": testAcc.name(), }) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusBadRequest { util.LogResponse(level.ERR, resp, true) t.Fatal(resp.StatusCode, http.StatusBadRequest) } var res struct{ Error string } if data, err := ioutil.ReadAll(resp.Body); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if err := json.Unmarshal(data, &res); err != nil { util.LogResponse(level.ERR, resp, true) t.Fatal(err) } else if res.Error != errInvReq { t.Fatal(res.Error, errInvReq) } }
package neurgo import ( "testing" "github.com/couchbaselabs/go.assert" ) // create netwwork with topology capable of solving XNOR, but which // has not been trained yet func xnorNetworkUntrained() *NeuralNetwork { hidden_neuron1 := &Neuron{Bias: 0, ActivationFunction: sigmoid} hidden_neuron2 := &Neuron{Bias: 0, ActivationFunction: sigmoid} output_neuron := &Neuron{Bias: 0, ActivationFunction: sigmoid} sensor := &Sensor{} actuator := &Actuator{} // give names to network nodes sensor.Name = "sensor" hidden_neuron1.Name = "hidden_neuron1" hidden_neuron2.Name = "hidden_neuron2" output_neuron.Name = "output_neuron" actuator.Name = "actuator" // connect nodes together sensor.ConnectBidirectionalWeighted(hidden_neuron1, []float64{0, 0}) sensor.ConnectBidirectionalWeighted(hidden_neuron2, []float64{0, 0}) hidden_neuron1.ConnectBidirectionalWeighted(output_neuron, []float64{0}) hidden_neuron2.ConnectBidirectionalWeighted(output_neuron, []float64{0}) output_neuron.ConnectBidirectional(actuator) // create neural network sensors := []*Sensor{sensor} actuators := []*Actuator{actuator} neuralNet := &NeuralNetwork{sensors: sensors, actuators: actuators} // spinup node goroutines signallers := []Connector{sensor, hidden_neuron1, hidden_neuron2, output_neuron, actuator} for _, signaller := range signallers { go Run(signaller) } return neuralNet } func TestWeightTraining(t *testing.T) { // training set examples := []*TrainingSample{ // TODO: how to wrap this? {sampleInputs: [][]float64{[]float64{0, 1}}, expectedOutputs: [][]float64{[]float64{0}}}, {sampleInputs: [][]float64{[]float64{1, 1}}, expectedOutputs: [][]float64{[]float64{1}}}, {sampleInputs: [][]float64{[]float64{1, 0}}, expectedOutputs: [][]float64{[]float64{0}}}, {sampleInputs: [][]float64{[]float64{0, 0}}, expectedOutputs: [][]float64{[]float64{1}}}} // create netwwork with topology capable of solving XNOR neuralNet := xnorNetworkUntrained() // verify it can not yet solve the training set (since training would be useless in that case) verified := neuralNet.Verify(examples) assert.False(t, verified) // TODO - shutdown the network so we can re-use it // create stochastic hill climber trainer // train the network // verify it can now solve the training set verified = neuralNet.Verify(examples) assert.True(t, verified) } trainer test now compiles .. in last commit I said the tests were passing, was probably not true. they just compiled. package neurgo import ( "testing" "github.com/couchbaselabs/go.assert" ) // create netwwork with topology capable of solving XNOR, but which // has not been trained yet func xnorNetworkUntrained() *NeuralNetwork { // create network nodes hn1_processor := &Neuron{Bias: 0, ActivationFunction: sigmoid} hidden_neuron1 := &Node{Name: "hidden_neuron1", processor: hn1_processor} hn2_processor := &Neuron{Bias: 0, ActivationFunction: sigmoid} hidden_neuron2 := &Node{Name: "hidden_neuron2", processor: hn2_processor} outn_processor := &Neuron{Bias: 0, ActivationFunction: sigmoid} output_neuron := &Node{Name: "output_neuron", processor: outn_processor} sensor := &Node{Name: "sensor", processor: &Sensor{}} actuator := &Node{Name: "actuator", processor: &Actuator{}} // connect nodes together sensor.ConnectBidirectionalWeighted(hidden_neuron1, []float64{0, 0}) sensor.ConnectBidirectionalWeighted(hidden_neuron2, []float64{0, 0}) hidden_neuron1.ConnectBidirectionalWeighted(output_neuron, []float64{0}) hidden_neuron2.ConnectBidirectionalWeighted(output_neuron, []float64{0}) output_neuron.ConnectBidirectional(actuator) // create neural network sensors := []*Node{sensor} actuators := []*Node{actuator} neuralNet := &NeuralNetwork{sensors: sensors, actuators: actuators} // spinup node goroutines nodes := []*Node{sensor, hidden_neuron1, hidden_neuron2, output_neuron, actuator} for _, node := range nodes { go Run(node.processor, node) } return neuralNet } func TestWeightTraining(t *testing.T) { // training set examples := []*TrainingSample{ // TODO: how to wrap this? {sampleInputs: [][]float64{[]float64{0, 1}}, expectedOutputs: [][]float64{[]float64{0}}}, {sampleInputs: [][]float64{[]float64{1, 1}}, expectedOutputs: [][]float64{[]float64{1}}}, {sampleInputs: [][]float64{[]float64{1, 0}}, expectedOutputs: [][]float64{[]float64{0}}}, {sampleInputs: [][]float64{[]float64{0, 0}}, expectedOutputs: [][]float64{[]float64{1}}}} // create netwwork with topology capable of solving XNOR neuralNet := xnorNetworkUntrained() // verify it can not yet solve the training set (since training would be useless in that case) verified := neuralNet.Verify(examples) assert.False(t, verified) // TODO - shutdown the network so we can re-use it // create stochastic hill climber trainer // train the network // verify it can now solve the training set verified = neuralNet.Verify(examples) assert.True(t, verified) }
package main import ( "testing" "io/ioutil" ) // TestValidPayload tests if we can create a proper object from // a valid JSON payload. func TestValidPayload(t *testing.T) { json := `[{ "message" : { "reqHost" : "www.example.com", "respLen" : "276248", "cliIP" : "123.123.123.123", "status" : "503", "bytes" : "123440", "protoVer" : "1.1", "respCT" : "text/html", "UA" : "Mozilla%2f5.0%20(Macintosh%3b%20Intel%20Mac%20OS%20X%2010.9%3b%20rv%3a28.0)%20Gecko%2f20100101%20Firefox%2f28.0%20(FlipboardProxy%2f1.1%3b%20+http%3a%2f%2fflipboard.com%2fbrowserproxy)", "reqMethod" : "POST", "fwdHost" : "www.example.com", "proto" : "http", "reqPort" : "80", "reqPath" : "%2f" }, "netPerf" : { "asnum" : "8523", "downloadTime" : "1", "edgeIP" : "165.254.92.141", "lastByte" : "0", "lastMileRTT" : "102", "firstByte" : "0", "cacheStatus" : "0" }, "network" : { "asnum" : "8523", "edgeIP" : "165.254.92.141", "networkType" : "", "network" : "" }, "cp" : "123456", "id" : "915cfea5570f824cc27112-a", "version" : "1.0", "start" : "1460634188.565", "type" : "cloud_monitor", "format" : "default", "respHdr" : { "server" : "Microsoft-IIS/8.5", "contEnc" : "identity" }, "geo" : { "lat" : "59.33", "region" : "AB", "long" : "18.05", "country" : "DE", "city" : "dummy" }, "reqHdr" : { "cookie" : "drbanan%3d1" } }]` payloads, err := CreateObjects([]byte(json)) if err != nil { t.Errorf("Error while trying to decode valid JSON payload: %s", err) } if len(payloads) != 1 { t.Errorf("Unexpected number of payloads in JSON: Should be 1, is %d", len(payloads)) } payload := payloads[0] if payload.CP != "123456" { t.Errorf("CP not correct in payload. Should be 123456, is %s", payload.CP) } if payload.ID != "915cfea5570f824cc27112-a" { t.Errorf("ID not correct in payload. Should be 915cfea5570f824cc27112-a, is %s", payload.ID) } if payload.Geo.Country != "DE" { t.Errorf("Country not correct in payload. Should be DE, is %s", payload.Geo.Country) } } // TestMultiplePayloads tests if a correct number of payload objects are created. func TestMultiplePayloads(t *testing.T) { dat, err:= ioutil.ReadFile("tests/payload.json") payloads, err := CreateObjects([]byte(dat)) if err != nil { t.Errorf("Error while trying to decode valid JSON payload: %s", err) } // test custom number of payloads in JSON file if len(payloads) != 27 { t.Errorf("Unexpected number of payloads in JSON: Should be 7, is %d", len(payloads)) } for i := 0; i < len(payloads); i++ { if payloads[i].CP != "123456" { t.Errorf("CP not correct in payload. Should be 123456, is %s", payloads[i].CP) } } // TODO test other fields } Test changed package main import ( "io/ioutil" "testing" ) // TestValidPayload tests if we can create a proper object from // a valid JSON payload. func TestValidPayload(t *testing.T) { json := `[{ "message" : { "reqHost" : "www.example.com", "respLen" : "276248", "cliIP" : "123.123.123.123", "status" : "503", "bytes" : "123440", "protoVer" : "1.1", "respCT" : "text/html", "UA" : "Mozilla%2f5.0%20(Macintosh%3b%20Intel%20Mac%20OS%20X%2010.9%3b%20rv%3a28.0)%20Gecko%2f20100101%20Firefox%2f28.0%20(FlipboardProxy%2f1.1%3b%20+http%3a%2f%2fflipboard.com%2fbrowserproxy)", "reqMethod" : "POST", "fwdHost" : "www.example.com", "proto" : "http", "reqPort" : "80", "reqPath" : "%2f" }, "netPerf" : { "asnum" : "8523", "downloadTime" : "1", "edgeIP" : "165.254.92.141", "lastByte" : "0", "lastMileRTT" : "102", "firstByte" : "0", "cacheStatus" : "0" }, "network" : { "asnum" : "8523", "edgeIP" : "165.254.92.141", "networkType" : "", "network" : "" }, "cp" : "123456", "id" : "915cfea5570f824cc27112-a", "version" : "1.0", "start" : "1460634188.565", "type" : "cloud_monitor", "format" : "default", "respHdr" : { "server" : "Microsoft-IIS/8.5", "contEnc" : "identity" }, "geo" : { "lat" : "59.33", "region" : "AB", "long" : "18.05", "country" : "DE", "city" : "dummy" }, "reqHdr" : { "cookie" : "drbanan%3d1" } }]` payloads, err := CreateObjects([]byte(json)) if err != nil { t.Errorf("Error while trying to decode valid JSON payload: %s", err) } if len(payloads) != 1 { t.Errorf("Unexpected number of payloads in JSON: Should be 1, is %d", len(payloads)) } payload := payloads[0] if payload.CP != "123456" { t.Errorf("CP not correct in payload. Should be 123456, is %s", payload.CP) } if payload.ID != "915cfea5570f824cc27112-a" { t.Errorf("ID not correct in payload. Should be 915cfea5570f824cc27112-a, is %s", payload.ID) } if payload.Geo.Country != "DE" { t.Errorf("Country not correct in payload. Should be DE, is %s", payload.Geo.Country) } } // TestMultiplePayloads tests if a correct number of payload objects are created. func TestMultiplePayloads(t *testing.T) { dat, err := ioutil.ReadFile("tests/payload.json") payloads, err := CreateObjects([]byte(dat)) if err != nil { t.Errorf("Error while trying to decode valid JSON payload: %s", err) } // test custom number of payloads in JSON file if len(payloads) != 42 { t.Errorf("Unexpected number of payloads in JSON: Should be 7, is %d", len(payloads)) } for i := 0; i < len(payloads); i++ { if payloads[i].CP != "123456" { t.Errorf("CP not correct in payload. Should be 123456, is %s", payloads[i].CP) } } // TODO test other fields }
package gexec import ( "errors" "fmt" "go/build" "io/ioutil" "os" "os/exec" "path" "path/filepath" "runtime" "sync" ) var ( mu sync.Mutex tmpDir string ) /* Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. A path pointing to this binary is returned. Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`. */ func Build(packagePath string, args ...string) (compiledPath string, err error) { return doBuild(build.Default.GOPATH, packagePath, nil, args...) } /* BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time. */ func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) { return doBuild(build.Default.GOPATH, packagePath, env, args...) } /* BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). */ func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { return doBuild(gopath, packagePath, nil, args...) } func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) { tmpDir, err := temporaryDirectory() if err != nil { return "", err } if len(gopath) == 0 { return "", errors.New("$GOPATH not provided when building " + packagePath) } executable := filepath.Join(tmpDir, path.Base(packagePath)) if runtime.GOOS == "windows" { executable = executable + ".exe" } cmdArgs := append([]string{"build"}, args...) cmdArgs = append(cmdArgs, "-o", executable, packagePath) build := exec.Command("go", cmdArgs...) build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...) build.Env = append(build.Env, env...) output, err := build.CombinedOutput() if err != nil { return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) } return executable, nil } /* You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by gexec. In Ginkgo this is typically done in an AfterSuite callback. */ func CleanupBuildArtifacts() { mu.Lock() defer mu.Unlock() if tmpDir != "" { os.RemoveAll(tmpDir) tmpDir = "" } } func temporaryDirectory() (string, error) { var err error mu.Lock() defer mu.Unlock() if tmpDir == "" { tmpDir, err = ioutil.TempDir("", "gexec_artifacts") if err != nil { return "", err } } return ioutil.TempDir(tmpDir, "g") } Update comment package gexec import ( "errors" "fmt" "go/build" "io/ioutil" "os" "os/exec" "path" "path/filepath" "runtime" "sync" ) var ( mu sync.Mutex tmpDir string ) /* Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. A path pointing to this binary is returned. Build uses the $GOPATH set in your environment. If $GOPATH is not set and you are using Go 1.8+, $HOME/go directory is used instead. It passes the variadic args on to `go build`. */ func Build(packagePath string, args ...string) (compiledPath string, err error) { return doBuild(build.Default.GOPATH, packagePath, nil, args...) } /* BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time. */ func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) { return doBuild(build.Default.GOPATH, packagePath, env, args...) } /* BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). */ func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { return doBuild(gopath, packagePath, nil, args...) } func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) { tmpDir, err := temporaryDirectory() if err != nil { return "", err } if len(gopath) == 0 { return "", errors.New("$GOPATH not provided when building " + packagePath) } executable := filepath.Join(tmpDir, path.Base(packagePath)) if runtime.GOOS == "windows" { executable = executable + ".exe" } cmdArgs := append([]string{"build"}, args...) cmdArgs = append(cmdArgs, "-o", executable, packagePath) build := exec.Command("go", cmdArgs...) build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...) build.Env = append(build.Env, env...) output, err := build.CombinedOutput() if err != nil { return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) } return executable, nil } /* You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by gexec. In Ginkgo this is typically done in an AfterSuite callback. */ func CleanupBuildArtifacts() { mu.Lock() defer mu.Unlock() if tmpDir != "" { os.RemoveAll(tmpDir) tmpDir = "" } } func temporaryDirectory() (string, error) { var err error mu.Lock() defer mu.Unlock() if tmpDir == "" { tmpDir, err = ioutil.TempDir("", "gexec_artifacts") if err != nil { return "", err } } return ioutil.TempDir(tmpDir, "g") }
package main import ( "archive/zip" "os" "path" "github.com/MJKWoolnough/byteio" ) func (t Transfer) maps(name string, r *byteio.StickyReader, w *byteio.StickyWriter, f *os.File, size int64) error { zr, err := zip.NewReader(f, size) if err != nil { return err } m := t.c.NewMap() done := false go func() { if !done { t.c.RemoveMap(m.ID) } go t.c.Save() }() m.Lock() m.Name = name d := m.Path m.Unlock() err = unzip(zr, d) if err != nil { return err } mapProperties := DefaultMapSettings() pm, err := os.OpenFile(path.Join(d, "properties.map"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) defer pm.Close() if err != nil { if !os.IsExist(err) { return err } } else { err = mapProperties.WriteTo(pm) if err != nil { return err } } done = true return nil } removed redundent vars package main import ( "archive/zip" "os" "path" "github.com/MJKWoolnough/byteio" ) func (t Transfer) maps(name string, _ *byteio.StickyReader, _ *byteio.StickyWriter, f *os.File, size int64) error { zr, err := zip.NewReader(f, size) if err != nil { return err } m := t.c.NewMap() done := false go func() { if !done { t.c.RemoveMap(m.ID) } go t.c.Save() }() m.Lock() m.Name = name d := m.Path m.Unlock() err = unzip(zr, d) if err != nil { return err } mapProperties := DefaultMapSettings() pm, err := os.OpenFile(path.Join(d, "properties.map"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) defer pm.Close() if err != nil { if !os.IsExist(err) { return err } } else { err = mapProperties.WriteTo(pm) if err != nil { return err } } done = true return nil }
package git import ( "regexp" "strconv" "sync" "github.com/git-lfs/git-lfs/subprocess" "github.com/rubyist/tracerx" ) var ( gitVersion *string gitVersionMu sync.Mutex ) func Version() (string, error) { gitVersionMu.Lock() defer gitVersionMu.Unlock() if gitVersion == nil { v, err := subprocess.SimpleExec("git", "version") gitVersion = &v return v, err } return *gitVersion, nil } // IsVersionAtLeast returns whether the git version is the one specified or higher // argument is plain version string separated by '.' e.g. "2.3.1" but can omit minor/patch func IsGitVersionAtLeast(ver string) bool { gitver, err := Version() if err != nil { tracerx.Printf("Error getting git version: %v", err) return false } return IsVersionAtLeast(gitver, ver) } // IsVersionAtLeast compares 2 version strings (ok to be prefixed with 'git version', ignores) func IsVersionAtLeast(actualVersion, desiredVersion string) bool { // Capture 1-3 version digits, optionally prefixed with 'git version' and possibly // with suffixes which we'll ignore (e.g. unstable builds, MinGW versions) verregex := regexp.MustCompile(`(?:git version\s+)?(\d+)(?:.(\d+))?(?:.(\d+))?.*`) var atleast uint64 // Support up to 1000 in major/minor/patch digits const majorscale = 1000 * 1000 const minorscale = 1000 if match := verregex.FindStringSubmatch(desiredVersion); match != nil { // Ignore errors as regex won't match anything other than digits major, _ := strconv.Atoi(match[1]) atleast += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) atleast += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) atleast += uint64(patch) } } var actual uint64 if match := verregex.FindStringSubmatch(actualVersion); match != nil { major, _ := strconv.Atoi(match[1]) actual += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) actual += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) actual += uint64(patch) } } return actual >= atleast } git/version.go: replace sync.Mutex usage with sync.Once We use a sync.Mutex to synchronize access to the string pointer `gitVersion`, which indicates the version of Git used by the system running Git LFS. Our basic usage of the sync.Mutex is not incorrect, but we can improve the readability by instead using a sync.Once. A sync.Once determines very quickly (and in an atomic, goroutine-safe fashion) whether or not _any_ function has been run, and if it hasn't, run it. By doing this, we can--at the first request--produce a value for the result of running 'git version', and then return it later to the caller. This has the following benefits: - If 'git version' has already been run, we do not need to hold the lock for the entire duration of the function. - If 'git version' has not already been run, we only run it once, retaining the existing behavior. Only one change, which is the introduction of the `gitVersionErr` variable. This is a consequence of executing the 'git version' call in a closure: since we're in a new stack frame, we can't return from our parent. Instead, we retain the value for all time, and return _it_, along with whatever value we got from running 'git version' in the first place. package git import ( "regexp" "strconv" "sync" "github.com/git-lfs/git-lfs/subprocess" "github.com/rubyist/tracerx" ) var ( gitVersionOnce sync.Once gitVersion string gitVersionErr error ) func Version() (string, error) { gitVersionOnce.Do(func() { gitVersion, gitVersionErr = subprocess.SimpleExec("git", "version") }) return gitVersion, gitVersionErr } // IsVersionAtLeast returns whether the git version is the one specified or higher // argument is plain version string separated by '.' e.g. "2.3.1" but can omit minor/patch func IsGitVersionAtLeast(ver string) bool { gitver, err := Version() if err != nil { tracerx.Printf("Error getting git version: %v", err) return false } return IsVersionAtLeast(gitver, ver) } // IsVersionAtLeast compares 2 version strings (ok to be prefixed with 'git version', ignores) func IsVersionAtLeast(actualVersion, desiredVersion string) bool { // Capture 1-3 version digits, optionally prefixed with 'git version' and possibly // with suffixes which we'll ignore (e.g. unstable builds, MinGW versions) verregex := regexp.MustCompile(`(?:git version\s+)?(\d+)(?:.(\d+))?(?:.(\d+))?.*`) var atleast uint64 // Support up to 1000 in major/minor/patch digits const majorscale = 1000 * 1000 const minorscale = 1000 if match := verregex.FindStringSubmatch(desiredVersion); match != nil { // Ignore errors as regex won't match anything other than digits major, _ := strconv.Atoi(match[1]) atleast += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) atleast += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) atleast += uint64(patch) } } var actual uint64 if match := verregex.FindStringSubmatch(actualVersion); match != nil { major, _ := strconv.Atoi(match[1]) actual += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) actual += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) actual += uint64(patch) } } return actual >= atleast }
/* * Copyright (c) 2013 Matt Jibson <matt.jibson@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ package goapp import ( "bytes" "encoding/xml" "errors" "fmt" "html" "html/template" "image" _ "image/gif" _ "image/jpeg" _ "image/png" "io/ioutil" "math/rand" "net/http" "net/url" "strings" "time" "appengine" "appengine/blobstore" aimage "appengine/image" "appengine/urlfetch" "appengine/user" "code.google.com/p/go-charset/charset" _ "code.google.com/p/go-charset/data" "code.google.com/p/rsc/blog/atom" mpg "github.com/MiniProfiler/go/miniprofiler_gae" "github.com/mjibson/goon" "github.com/mjibson/rssgo" ) func serveError(w http.ResponseWriter, err error) { http.Error(w, err.Error(), http.StatusInternalServerError) } type Includes struct { Angular string BootstrapCss string BootstrapJs string Jquery string MiniProfiler template.HTML User *User Messages []string GoogleAnalytics string IsDev bool IsAdmin bool } var ( Angular string BootstrapCss string BootstrapJs string Jquery string isDevServer bool ) func init() { angular_ver := "1.0.5" bootstrap_ver := "2.3.1" jquery_ver := "1.9.1" isDevServer = appengine.IsDevAppServer() if appengine.IsDevAppServer() { Angular = fmt.Sprintf("/static/js/angular-%v.js", angular_ver) BootstrapCss = fmt.Sprintf("/static/css/bootstrap-combined-%v.css", bootstrap_ver) BootstrapJs = fmt.Sprintf("/static/js/bootstrap-%v.js", bootstrap_ver) Jquery = fmt.Sprintf("/static/js/jquery-%v.js", jquery_ver) } else { Angular = fmt.Sprintf("//ajax.googleapis.com/ajax/libs/angularjs/%v/angular.min.js", angular_ver) BootstrapCss = fmt.Sprintf("//netdna.bootstrapcdn.com/twitter-bootstrap/%v/css/bootstrap-combined.min.css", bootstrap_ver) BootstrapJs = fmt.Sprintf("//netdna.bootstrapcdn.com/twitter-bootstrap/%v/js/bootstrap.min.js", bootstrap_ver) Jquery = fmt.Sprintf("//ajax.googleapis.com/ajax/libs/jquery/%v/jquery.min.js", jquery_ver) } } func includes(c mpg.Context, r *http.Request) *Includes { i := &Includes{ Angular: Angular, BootstrapCss: BootstrapCss, BootstrapJs: BootstrapJs, Jquery: Jquery, MiniProfiler: c.Includes(r), GoogleAnalytics: GOOGLE_ANALYTICS_ID, IsDev: isDevServer, } if cu := user.Current(c); cu != nil { gn := goon.FromContext(c) user := &User{Id: cu.ID} if err := gn.Get(user); err == nil { i.User = user i.IsAdmin = cu.Admin if len(user.Messages) > 0 { i.Messages = user.Messages user.Messages = nil gn.Put(user) } } } return i } var dateFormats = []string{ "01.02.06", "02 Jan 2006 15:04:05 UT", "02 Jan 2006", "2 January 2006", "2006-01-02 15:04:05 MST", "2006-01-02", "2006-01-02T15:04+07:00", "2006-01-02T15:04:05 -0700", "2006-01-02T15:04:05", "2006-01-02T15:04:05-0700", "2006-01-02T15:04:05-07:00", "2006-1-2 15:04:05", "2006-1-2", "Jan 2, 2006 15:04:05 MST", "Jan 2, 2006 3:04:05 PM MST", "January 02, 2006 15:04:05 MST", "January 2, 2006 15:04:05 MST", "Mon, 02 2006 15:04:05 MST", "Mon, 02 Jan 2006 15:04:05 -0700", "Mon, 02 Jan 2006 15:04:05 MST", "Mon, 02 Jan 2006 15:04:05 UT", "Mon, 02 Jan 2006 15:04:05 Z", "Mon, 02 Jan 2006 15:04:05", "Mon, 02 Jan 2006", "Mon, 02 January 2006", "Mon, 2 Jan 2006 15:04:05 -0700", "Mon, 2 Jan 2006 15:04:05 MST", "Mon, 2 Jan 2006", "Mon, 2 Jan 2006, 15:04 -0700", "Mon, 2 January 2006, 15:04 -0700", "Mon, 2 January 2006, 15:04:05 MST", "Monday, 02 January 2006 15:04:05 -0700", "Monday, 2 Jan 2006 15:04:05 -0700", "Monday, 2 January 2006 15:04:05 -0700", time.ANSIC, time.RFC1123, time.RFC1123Z, time.RFC3339, time.RFC822, time.RFC822Z, time.RFC850, time.RubyDate, time.UnixDate, } func parseDate(c appengine.Context, ds ...string) (t time.Time, err error) { for _, d := range ds { d = strings.TrimSpace(d) if d == "" { continue } if t, err = rssgo.ParseRssDate(d); err == nil { return } for _, f := range dateFormats { if t, err = time.Parse(f, d); err == nil { return } } gn := goon.FromContext(c) gn.Put(&DateFormat{Id: d}) } err = errors.New(fmt.Sprintf("could not parse date: %v", strings.Join(ds, ", "))) return } func ParseFeed(c appengine.Context, u string, b []byte) (*Feed, []*Story) { f := Feed{Url: u} var s []*Story a := atom.Feed{} var atomerr, rsserr, rdferr error d := xml.NewDecoder(bytes.NewReader(b)) d.CharsetReader = charset.NewReader if atomerr = d.Decode(&a); atomerr == nil { f.Title = a.Title if t, err := parseDate(c, string(a.Updated)); err == nil { f.Updated = t } for _, l := range a.Link { if l.Rel != "self" { f.Link = l.Href break } } for _, i := range a.Entry { st := Story{ Id: i.ID, Title: i.Title, } if t, err := parseDate(c, string(i.Updated)); err == nil { st.Updated = t } if t, err := parseDate(c, string(i.Published)); err == nil { st.Published = t } if len(i.Link) > 0 { st.Link = i.Link[0].Href } if i.Author != nil { st.Author = i.Author.Name } if i.Content != nil { st.content, st.Summary = Sanitize(i.Content.Body) } else if i.Summary != nil { st.content, st.Summary = Sanitize(i.Summary.Body) } s = append(s, &st) } return parseFix(c, &f, s) } r := rssgo.Rss{} d = xml.NewDecoder(bytes.NewReader(b)) d.CharsetReader = charset.NewReader d.DefaultSpace = "DefaultSpace" if rsserr = d.Decode(&r); rsserr == nil { f.Title = r.Title f.Link = r.Link if t, err := parseDate(c, r.LastBuildDate, r.PubDate); err == nil { f.Updated = t } else { c.Warningf("no rss feed date: %v", f.Link) } for _, i := range r.Items { st := Story{ Link: i.Link, Author: i.Author, } if i.Title != "" { st.Title = i.Title } else if i.Description != "" { i.Title = i.Description } if i.Content != "" { st.content, st.Summary = Sanitize(i.Content) } else if i.Title != "" && i.Description != "" { st.content, st.Summary = Sanitize(i.Description) } if i.Guid != nil { st.Id = i.Guid.Guid } if t, err := parseDate(c, i.PubDate, i.Date, i.Published); err == nil { st.Published = t st.Updated = t } s = append(s, &st) } return parseFix(c, &f, s) } rdf := RDF{} d = xml.NewDecoder(bytes.NewReader(b)) d.CharsetReader = charset.NewReader if rdferr = d.Decode(&rdf); rdferr == nil { if rdf.Channel != nil { f.Title = rdf.Channel.Title f.Link = rdf.Channel.Link if t, err := parseDate(c, rdf.Channel.Date); err == nil { f.Updated = t } } for _, i := range rdf.Item { st := Story{ Id: i.About, Title: i.Title, Link: i.Link, Author: i.Creator, } st.content, st.Summary = Sanitize(html.UnescapeString(i.Description)) if t, err := parseDate(c, i.Date); err == nil { st.Published = t st.Updated = t } s = append(s, &st) } return parseFix(c, &f, s) } c.Warningf("atom parse error: %s", atomerr.Error()) c.Warningf("xml parse error: %s", rsserr.Error()) c.Warningf("rdf parse error: %s", rdferr.Error()) return nil, nil } const UpdateTime = time.Hour func parseFix(c appengine.Context, f *Feed, ss []*Story) (*Feed, []*Story) { g := goon.FromContext(c) f.Checked = time.Now() f.NextUpdate = f.Checked.Add(UpdateTime - time.Second*time.Duration(rand.Int63n(300))) fk := g.Key(f) f.Image = loadImage(c, f) for _, s := range ss { s.Parent = fk s.Created = f.Checked if !s.Updated.IsZero() && s.Published.IsZero() { s.Published = s.Updated } if s.Published.IsZero() || f.Checked.Before(s.Published) { s.Published = f.Checked } if !s.Updated.IsZero() { s.Date = s.Updated.Unix() } else { s.Date = s.Published.Unix() } if s.Id == "" { if s.Link != "" { s.Id = s.Link } else if s.Title != "" { s.Id = s.Title } else { c.Errorf("story has no id: %v", s) return nil, nil } } // if a story doesn't have a link, see if its id is a URL if s.Link == "" { if u, err := url.Parse(s.Id); err == nil { s.Link = u.String() } } } return f, ss } func loadImage(c appengine.Context, f *Feed) string { s := f.Link if s == "" { s = f.Url } u, err := url.Parse(s) if err != nil { return "" } u.Path = "/favicon.ico" u.RawQuery = "" u.Fragment = "" g := goon.FromContext(c) i := &Image{Id: u.String()} if err := g.Get(i); err == nil { return i.Url } client := urlfetch.Client(c) r, err := client.Get(u.String()) if err != nil || r.StatusCode != http.StatusOK || r.ContentLength == 0 { return "" } b, err := ioutil.ReadAll(r.Body) r.Body.Close() if err != nil { return "" } buf := bytes.NewBuffer(b) _, t, err := image.Decode(buf) if err != nil { t = "application/octet-stream" } else { t = "image/" + t } w, err := blobstore.Create(c, t) if err != nil { return "" } if _, err := w.Write(b); err != nil { return "" } if w.Close() != nil { return "" } i.Blob, _ = w.Key() su, err := aimage.ServingURL(c, i.Blob, &aimage.ServingURLOptions{Size: 16}) if err != nil { return "" } i.Url = su.String() g.Put(i) return i.Url } Date formats /* * Copyright (c) 2013 Matt Jibson <matt.jibson@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ package goapp import ( "bytes" "encoding/xml" "errors" "fmt" "html" "html/template" "image" _ "image/gif" _ "image/jpeg" _ "image/png" "io/ioutil" "math/rand" "net/http" "net/url" "strings" "time" "appengine" "appengine/blobstore" aimage "appengine/image" "appengine/urlfetch" "appengine/user" "code.google.com/p/go-charset/charset" _ "code.google.com/p/go-charset/data" "code.google.com/p/rsc/blog/atom" mpg "github.com/MiniProfiler/go/miniprofiler_gae" "github.com/mjibson/goon" "github.com/mjibson/rssgo" ) func serveError(w http.ResponseWriter, err error) { http.Error(w, err.Error(), http.StatusInternalServerError) } type Includes struct { Angular string BootstrapCss string BootstrapJs string Jquery string MiniProfiler template.HTML User *User Messages []string GoogleAnalytics string IsDev bool IsAdmin bool } var ( Angular string BootstrapCss string BootstrapJs string Jquery string isDevServer bool ) func init() { angular_ver := "1.0.5" bootstrap_ver := "2.3.1" jquery_ver := "1.9.1" isDevServer = appengine.IsDevAppServer() if appengine.IsDevAppServer() { Angular = fmt.Sprintf("/static/js/angular-%v.js", angular_ver) BootstrapCss = fmt.Sprintf("/static/css/bootstrap-combined-%v.css", bootstrap_ver) BootstrapJs = fmt.Sprintf("/static/js/bootstrap-%v.js", bootstrap_ver) Jquery = fmt.Sprintf("/static/js/jquery-%v.js", jquery_ver) } else { Angular = fmt.Sprintf("//ajax.googleapis.com/ajax/libs/angularjs/%v/angular.min.js", angular_ver) BootstrapCss = fmt.Sprintf("//netdna.bootstrapcdn.com/twitter-bootstrap/%v/css/bootstrap-combined.min.css", bootstrap_ver) BootstrapJs = fmt.Sprintf("//netdna.bootstrapcdn.com/twitter-bootstrap/%v/js/bootstrap.min.js", bootstrap_ver) Jquery = fmt.Sprintf("//ajax.googleapis.com/ajax/libs/jquery/%v/jquery.min.js", jquery_ver) } } func includes(c mpg.Context, r *http.Request) *Includes { i := &Includes{ Angular: Angular, BootstrapCss: BootstrapCss, BootstrapJs: BootstrapJs, Jquery: Jquery, MiniProfiler: c.Includes(r), GoogleAnalytics: GOOGLE_ANALYTICS_ID, IsDev: isDevServer, } if cu := user.Current(c); cu != nil { gn := goon.FromContext(c) user := &User{Id: cu.ID} if err := gn.Get(user); err == nil { i.User = user i.IsAdmin = cu.Admin if len(user.Messages) > 0 { i.Messages = user.Messages user.Messages = nil gn.Put(user) } } } return i } var dateFormats = []string{ "01.02.06", "02 Jan 2006 15:04:05 UT", "02 Jan 2006", "2 January 2006", "2006-01-02 15:04:05 MST", "2006-01-02", "2006-01-02T15:04+07:00", "2006-01-02T15:04:05 -0700", "2006-01-02T15:04:05", "2006-01-02T15:04:05-0700", "2006-01-02T15:04:05-07:00", "2006-01-02T15:04:05Z", "2006-1-2 15:04:05", "2006-1-2", "Jan 2, 2006 15:04:05 MST", "Jan 2, 2006 3:04:05 PM MST", "January 02, 2006 15:04:05 MST", "January 2, 2006 15:04:05 MST", "Mon, 02 2006 15:04:05 MST", "Mon, 02 Jan 2006 15:04:05 -0700", "Mon, 02 Jan 2006 15:04:05 MST", "Mon, 02 Jan 2006 15:04:05 UT", "Mon, 02 Jan 2006 15:04:05 Z", "Mon, 02 Jan 2006 15:04:05", "Mon, 02 Jan 2006", "Mon, 02 January 2006", "Mon, 2 Jan 2006 15:04:05 -0700", "Mon, 2 Jan 2006 15:04:05 MST", "Mon, 2 Jan 2006", "Mon, 2 Jan 2006, 15:04 -0700", "Mon, 2 January 2006 15:04:05 MST", "Mon, 2 January 2006, 15:04 -0700", "Mon, 2 January 2006, 15:04:05 MST", "Monday, 02 January 2006 15:04:05 -0700", "Monday, 2 Jan 2006 15:04:05 -0700", "Monday, 2 January 2006 15:04:05 -0700", time.ANSIC, time.RFC1123, time.RFC1123Z, time.RFC3339, time.RFC822, time.RFC822Z, time.RFC850, time.RubyDate, time.UnixDate, } func parseDate(c appengine.Context, ds ...string) (t time.Time, err error) { for _, d := range ds { d = strings.TrimSpace(d) if d == "" { continue } if t, err = rssgo.ParseRssDate(d); err == nil { return } for _, f := range dateFormats { if t, err = time.Parse(f, d); err == nil { return } } gn := goon.FromContext(c) gn.Put(&DateFormat{Id: d}) } err = errors.New(fmt.Sprintf("could not parse date: %v", strings.Join(ds, ", "))) return } func ParseFeed(c appengine.Context, u string, b []byte) (*Feed, []*Story) { f := Feed{Url: u} var s []*Story a := atom.Feed{} var atomerr, rsserr, rdferr error d := xml.NewDecoder(bytes.NewReader(b)) d.CharsetReader = charset.NewReader if atomerr = d.Decode(&a); atomerr == nil { f.Title = a.Title if t, err := parseDate(c, string(a.Updated)); err == nil { f.Updated = t } for _, l := range a.Link { if l.Rel != "self" { f.Link = l.Href break } } for _, i := range a.Entry { st := Story{ Id: i.ID, Title: i.Title, } if t, err := parseDate(c, string(i.Updated)); err == nil { st.Updated = t } if t, err := parseDate(c, string(i.Published)); err == nil { st.Published = t } if len(i.Link) > 0 { st.Link = i.Link[0].Href } if i.Author != nil { st.Author = i.Author.Name } if i.Content != nil { st.content, st.Summary = Sanitize(i.Content.Body) } else if i.Summary != nil { st.content, st.Summary = Sanitize(i.Summary.Body) } s = append(s, &st) } return parseFix(c, &f, s) } r := rssgo.Rss{} d = xml.NewDecoder(bytes.NewReader(b)) d.CharsetReader = charset.NewReader d.DefaultSpace = "DefaultSpace" if rsserr = d.Decode(&r); rsserr == nil { f.Title = r.Title f.Link = r.Link if t, err := parseDate(c, r.LastBuildDate, r.PubDate); err == nil { f.Updated = t } else { c.Warningf("no rss feed date: %v", f.Link) } for _, i := range r.Items { st := Story{ Link: i.Link, Author: i.Author, } if i.Title != "" { st.Title = i.Title } else if i.Description != "" { i.Title = i.Description } if i.Content != "" { st.content, st.Summary = Sanitize(i.Content) } else if i.Title != "" && i.Description != "" { st.content, st.Summary = Sanitize(i.Description) } if i.Guid != nil { st.Id = i.Guid.Guid } if t, err := parseDate(c, i.PubDate, i.Date, i.Published); err == nil { st.Published = t st.Updated = t } s = append(s, &st) } return parseFix(c, &f, s) } rdf := RDF{} d = xml.NewDecoder(bytes.NewReader(b)) d.CharsetReader = charset.NewReader if rdferr = d.Decode(&rdf); rdferr == nil { if rdf.Channel != nil { f.Title = rdf.Channel.Title f.Link = rdf.Channel.Link if t, err := parseDate(c, rdf.Channel.Date); err == nil { f.Updated = t } } for _, i := range rdf.Item { st := Story{ Id: i.About, Title: i.Title, Link: i.Link, Author: i.Creator, } st.content, st.Summary = Sanitize(html.UnescapeString(i.Description)) if t, err := parseDate(c, i.Date); err == nil { st.Published = t st.Updated = t } s = append(s, &st) } return parseFix(c, &f, s) } c.Warningf("atom parse error: %s", atomerr.Error()) c.Warningf("xml parse error: %s", rsserr.Error()) c.Warningf("rdf parse error: %s", rdferr.Error()) return nil, nil } const UpdateTime = time.Hour func parseFix(c appengine.Context, f *Feed, ss []*Story) (*Feed, []*Story) { g := goon.FromContext(c) f.Checked = time.Now() f.NextUpdate = f.Checked.Add(UpdateTime - time.Second*time.Duration(rand.Int63n(300))) fk := g.Key(f) f.Image = loadImage(c, f) for _, s := range ss { s.Parent = fk s.Created = f.Checked if !s.Updated.IsZero() && s.Published.IsZero() { s.Published = s.Updated } if s.Published.IsZero() || f.Checked.Before(s.Published) { s.Published = f.Checked } if !s.Updated.IsZero() { s.Date = s.Updated.Unix() } else { s.Date = s.Published.Unix() } if s.Id == "" { if s.Link != "" { s.Id = s.Link } else if s.Title != "" { s.Id = s.Title } else { c.Errorf("story has no id: %v", s) return nil, nil } } // if a story doesn't have a link, see if its id is a URL if s.Link == "" { if u, err := url.Parse(s.Id); err == nil { s.Link = u.String() } } } return f, ss } func loadImage(c appengine.Context, f *Feed) string { s := f.Link if s == "" { s = f.Url } u, err := url.Parse(s) if err != nil { return "" } u.Path = "/favicon.ico" u.RawQuery = "" u.Fragment = "" g := goon.FromContext(c) i := &Image{Id: u.String()} if err := g.Get(i); err == nil { return i.Url } client := urlfetch.Client(c) r, err := client.Get(u.String()) if err != nil || r.StatusCode != http.StatusOK || r.ContentLength == 0 { return "" } b, err := ioutil.ReadAll(r.Body) r.Body.Close() if err != nil { return "" } buf := bytes.NewBuffer(b) _, t, err := image.Decode(buf) if err != nil { t = "application/octet-stream" } else { t = "image/" + t } w, err := blobstore.Create(c, t) if err != nil { return "" } if _, err := w.Write(b); err != nil { return "" } if w.Close() != nil { return "" } i.Blob, _ = w.Key() su, err := aimage.ServingURL(c, i.Blob, &aimage.ServingURLOptions{Size: 16}) if err != nil { return "" } i.Url = su.String() g.Put(i) return i.Url }
package main import ( "flag" "fmt" "io/ioutil" "log" "net/http" "os" "path" "regexp" "strconv" "strings" "github.com/shifr/imgwizard/cache" "github.com/shifr/vips" ) type Route struct { pattern *regexp.Regexp handler http.Handler } type RegexpHandler struct { routes []*Route } func (h *RegexpHandler) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) { h.routes = append(h.routes, &Route{pattern, http.HandlerFunc(handler)}) } func (h *RegexpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { for _, route := range h.routes { if route.pattern.MatchString(r.URL.Path) { route.handler.ServeHTTP(w, r) return } } http.NotFound(w, r) } type Context struct { Path string Format string CachePath string Storage string Width int Height int } type Settings struct { ListenAddr string CacheDir string Scheme string Local404Thumb string AllowedSizes []string AllowedMedia []string Directories []string UrlExp *regexp.Regexp Context Context Options vips.Options } const ( WEBP_HEADER = "image/webp" ) var ( settings Settings supportedFormats = []string{"jpg", "jpeg", "png"} listenAddr = flag.String("l", "127.0.0.1:8070", "Address to listen on") allowedMedia = flag.String("m", "", "comma separated list of allowed media server hosts") allowedSizes = flag.String("s", "", "comma separated list of allowed sizes") cacheDir = flag.String("c", "/tmp/imgwizard", "directory for cached files") dirsToSearch = flag.String("d", "", "comma separated list of directories to search requested file") local404Thumb = flag.String("thumb", "/tmp/404.jpg", "path to default image") mark = flag.String("mark", "images", "Mark for nginx") quality = flag.Int("q", 0, "image quality after resize") ) // loadSettings loads settings from settings.json // and from command-line func (s *Settings) loadSettings() { s.Scheme = "http" s.AllowedSizes = nil s.AllowedMedia = nil //defaults for vips s.Options.Crop = true s.Options.Enlarge = true s.Options.Quality = 80 s.Options.Extend = vips.EXTEND_WHITE s.Options.Interpolator = vips.BILINEAR s.Options.Gravity = vips.CENTRE var sizes = "[0-9]*x[0-9]*" var medias = "" var proxyMark = *mark s.ListenAddr = *listenAddr if *allowedMedia != "" { s.AllowedMedia = strings.Split(*allowedMedia, ",") } if *allowedSizes != "" { s.AllowedSizes = strings.Split(*allowedSizes, ",") } if *dirsToSearch != "" { s.Directories = strings.Split(*dirsToSearch, ",") } s.CacheDir = *cacheDir s.Local404Thumb = *local404Thumb if *quality != 0 { s.Options.Quality = *quality } if len(s.AllowedSizes) > 0 { sizes = strings.Join(s.AllowedSizes, "|") } if len(s.AllowedMedia) > 0 { medias = strings.Join(s.AllowedMedia, "|") } template := fmt.Sprintf( "/(?P<mark>%s)/(?P<storage>loc|rem)/(?P<size>%s)/(?P<path>%s.+)", proxyMark, sizes, medias) s.UrlExp, _ = regexp.Compile(template) } // makeCachePath generates cache path from resized image func (s *Settings) makeCachePath() { var subPath string var cacheImageName string pathParts := strings.Split(s.Context.Path, "/") lastIndex := len(pathParts) - 1 imageData := strings.Split(pathParts[lastIndex], ".") imageName, imageFormat := imageData[0], strings.ToLower(imageData[1]) if s.Options.Webp { cacheImageName = fmt.Sprintf( "%s_%dx%d_webp_.%s", imageName, s.Options.Width, s.Options.Height, imageFormat) } else { cacheImageName = fmt.Sprintf( "%s_%dx%d.%s", imageName, s.Options.Width, s.Options.Height, imageFormat) } switch s.Context.Storage { case "loc": subPath = strings.Join(pathParts[:lastIndex], "/") case "rem": subPath = strings.Join(pathParts[1:lastIndex], "/") } s.Context.Format = imageFormat s.Context.CachePath = fmt.Sprintf( "%s/%s/%s", s.CacheDir, subPath, cacheImageName) } // getLocalImage fetches original image from file system func getLocalImage(s *Settings) ([]byte, error) { var image []byte var filePath string var file *os.File var err error if len(s.Directories) > 0 { found := false for _, dir := range s.Directories { filePath = path.Join("/", dir, s.Context.Path) file, err = os.Open(filePath) if err == nil { found = true break } } if !found { file, err = os.Open(s.Local404Thumb) if err != nil { return image, err } } } else { file, err = os.Open(path.Join("/", s.Context.Path)) if err != nil { file, err = os.Open(s.Local404Thumb) if err != nil { return image, err } } } info, _ := file.Stat() image = make([]byte, info.Size()) _, err = file.Read(image) if err != nil { return image, err } return image, nil } // getRemoteImage fetches original image by http url func getRemoteImage(url string) ([]byte, error) { var image []byte resp, err := http.Get(url) if err != nil { return image, err } defer resp.Body.Close() image, err = ioutil.ReadAll(resp.Body) if err != nil { return image, err } return image, nil } // getOrCreateImage check cache path for requested image // if image doesn't exist - creates it func getOrCreateImage() []byte { sett := settings sett.makeCachePath() var c *cache.Cache var image []byte var err error if image, err = c.Get(sett.Context.CachePath); err == nil { return image } switch sett.Context.Storage { case "loc": image, err = getLocalImage(&sett) if err != nil { log.Println("Can't get orig local file, reason - ", err) } case "rem": imgUrl := fmt.Sprintf("%s://%s", sett.Scheme, sett.Context.Path) image, err = getRemoteImage(imgUrl) if err != nil { log.Println("Can't get orig remote file, reason - ", err) } } if !stringIsExists(sett.Context.Format, supportedFormats) { err = c.Set(sett.Context.CachePath, image) if err != nil { log.Println("Can't set cache, reason - ", err) } return image } buf, err := vips.Resize(image, sett.Options) if err != nil { log.Println("Can't resize image, reason - ", err) } err = c.Set(sett.Context.CachePath, buf) if err != nil { log.Println("Can't set cache, reason - ", err) } return buf } func stringIsExists(str string, list []string) bool { for _, el := range list { if el == str { return true } } return false } func parseVars(req *http.Request) map[string]string { params := make(map[string]string) match := settings.UrlExp.FindStringSubmatch(req.RequestURI) for i, name := range settings.UrlExp.SubexpNames() { params[name] = match[i] } return params } func fetchImage(rw http.ResponseWriter, req *http.Request) { acceptedTypes := strings.Split(req.Header["Accept"][0], ",") params := parseVars(req) sizes := strings.Split(params["size"], "x") settings.Options.Webp = stringIsExists(WEBP_HEADER, acceptedTypes) settings.Context.Storage = params["storage"] settings.Context.Path = params["path"] settings.Options.Width, _ = strconv.Atoi(sizes[0]) settings.Options.Height, _ = strconv.Atoi(sizes[1]) resultImage := getOrCreateImage() rw.Header().Set("Content-Length", strconv.Itoa(len(resultImage))) rw.Write(resultImage) } func main() { flag.Parse() settings.loadSettings() r := new(RegexpHandler) r.HandleFunc(settings.UrlExp, fetchImage) log.Printf("ImgWizard started on http://%s", settings.ListenAddr) http.ListenAndServe(settings.ListenAddr, r) } Added url unescape before finding local file package main import ( "flag" "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "path" "regexp" "strconv" "strings" "github.com/shifr/imgwizard/cache" "github.com/shifr/vips" ) type Route struct { pattern *regexp.Regexp handler http.Handler } type RegexpHandler struct { routes []*Route } func (h *RegexpHandler) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) { h.routes = append(h.routes, &Route{pattern, http.HandlerFunc(handler)}) } func (h *RegexpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { for _, route := range h.routes { if route.pattern.MatchString(r.URL.Path) { route.handler.ServeHTTP(w, r) return } } http.NotFound(w, r) } type Context struct { Path string Format string CachePath string Storage string Width int Height int } type Settings struct { ListenAddr string CacheDir string Scheme string Local404Thumb string AllowedSizes []string AllowedMedia []string Directories []string UrlExp *regexp.Regexp Context Context Options vips.Options } const ( WEBP_HEADER = "image/webp" ) var ( settings Settings supportedFormats = []string{"jpg", "jpeg", "png"} listenAddr = flag.String("l", "127.0.0.1:8070", "Address to listen on") allowedMedia = flag.String("m", "", "comma separated list of allowed media server hosts") allowedSizes = flag.String("s", "", "comma separated list of allowed sizes") cacheDir = flag.String("c", "/tmp/imgwizard", "directory for cached files") dirsToSearch = flag.String("d", "", "comma separated list of directories to search requested file") local404Thumb = flag.String("thumb", "/tmp/404.jpg", "path to default image") mark = flag.String("mark", "images", "Mark for nginx") quality = flag.Int("q", 0, "image quality after resize") ) // loadSettings loads settings from settings.json // and from command-line func (s *Settings) loadSettings() { s.Scheme = "http" s.AllowedSizes = nil s.AllowedMedia = nil //defaults for vips s.Options.Crop = true s.Options.Enlarge = true s.Options.Quality = 80 s.Options.Extend = vips.EXTEND_WHITE s.Options.Interpolator = vips.BILINEAR s.Options.Gravity = vips.CENTRE var sizes = "[0-9]*x[0-9]*" var medias = "" var proxyMark = *mark s.ListenAddr = *listenAddr if *allowedMedia != "" { s.AllowedMedia = strings.Split(*allowedMedia, ",") } if *allowedSizes != "" { s.AllowedSizes = strings.Split(*allowedSizes, ",") } if *dirsToSearch != "" { s.Directories = strings.Split(*dirsToSearch, ",") } s.CacheDir = *cacheDir s.Local404Thumb = *local404Thumb if *quality != 0 { s.Options.Quality = *quality } if len(s.AllowedSizes) > 0 { sizes = strings.Join(s.AllowedSizes, "|") } if len(s.AllowedMedia) > 0 { medias = strings.Join(s.AllowedMedia, "|") } template := fmt.Sprintf( "/(?P<mark>%s)/(?P<storage>loc|rem)/(?P<size>%s)/(?P<path>%s.+)", proxyMark, sizes, medias) s.UrlExp, _ = regexp.Compile(template) } // makeCachePath generates cache path from resized image func (s *Settings) makeCachePath() { var subPath string var cacheImageName string pathParts := strings.Split(s.Context.Path, "/") lastIndex := len(pathParts) - 1 imageData := strings.Split(pathParts[lastIndex], ".") imageName, imageFormat := imageData[0], strings.ToLower(imageData[1]) if s.Options.Webp { cacheImageName = fmt.Sprintf( "%s_%dx%d_webp_.%s", imageName, s.Options.Width, s.Options.Height, imageFormat) } else { cacheImageName = fmt.Sprintf( "%s_%dx%d.%s", imageName, s.Options.Width, s.Options.Height, imageFormat) } switch s.Context.Storage { case "loc": subPath = strings.Join(pathParts[:lastIndex], "/") case "rem": subPath = strings.Join(pathParts[1:lastIndex], "/") } s.Context.Format = imageFormat s.Context.CachePath, _ = url.QueryUnescape(fmt.Sprintf( "%s/%s/%s", s.CacheDir, subPath, cacheImageName)) } // getLocalImage fetches original image from file system func getLocalImage(s *Settings) ([]byte, error) { var image []byte var filePath string var file *os.File var err error s.Context.Path, _ = url.QueryUnescape(s.Context.Path) if len(s.Directories) > 0 { found := false for _, dir := range s.Directories { filePath = path.Join("/", dir, s.Context.Path) file, err = os.Open(filePath) if err == nil { found = true break } } if !found { file, err = os.Open(s.Local404Thumb) if err != nil { return image, err } } } else { file, err = os.Open(path.Join("/", s.Context.Path)) if err != nil { file, err = os.Open(s.Local404Thumb) if err != nil { return image, err } } } info, _ := file.Stat() image = make([]byte, info.Size()) _, err = file.Read(image) if err != nil { return image, err } return image, nil } // getRemoteImage fetches original image by http url func getRemoteImage(url string) ([]byte, error) { var image []byte resp, err := http.Get(url) if err != nil { return image, err } defer resp.Body.Close() image, err = ioutil.ReadAll(resp.Body) if err != nil { return image, err } return image, nil } // getOrCreateImage check cache path for requested image // if image doesn't exist - creates it func getOrCreateImage() []byte { sett := settings sett.makeCachePath() var c *cache.Cache var image []byte var err error if image, err = c.Get(sett.Context.CachePath); err == nil { return image } switch sett.Context.Storage { case "loc": image, err = getLocalImage(&sett) if err != nil { log.Println("Can't get orig local file, reason - ", err) } case "rem": imgUrl := fmt.Sprintf("%s://%s", sett.Scheme, sett.Context.Path) image, err = getRemoteImage(imgUrl) if err != nil { log.Println("Can't get orig remote file, reason - ", err) } } if !stringIsExists(sett.Context.Format, supportedFormats) { err = c.Set(sett.Context.CachePath, image) if err != nil { log.Println("Can't set cache, reason - ", err) } return image } buf, err := vips.Resize(image, sett.Options) if err != nil { log.Println("Can't resize image, reason - ", err) } err = c.Set(sett.Context.CachePath, buf) if err != nil { log.Println("Can't set cache, reason - ", err) } return buf } func stringIsExists(str string, list []string) bool { for _, el := range list { if el == str { return true } } return false } func parseVars(req *http.Request) map[string]string { params := make(map[string]string) match := settings.UrlExp.FindStringSubmatch(req.RequestURI) for i, name := range settings.UrlExp.SubexpNames() { params[name] = match[i] } return params } func fetchImage(rw http.ResponseWriter, req *http.Request) { acceptedTypes := strings.Split(req.Header["Accept"][0], ",") params := parseVars(req) sizes := strings.Split(params["size"], "x") settings.Options.Webp = stringIsExists(WEBP_HEADER, acceptedTypes) settings.Context.Storage = params["storage"] settings.Context.Path = params["path"] settings.Options.Width, _ = strconv.Atoi(sizes[0]) settings.Options.Height, _ = strconv.Atoi(sizes[1]) resultImage := getOrCreateImage() rw.Header().Set("Content-Length", strconv.Itoa(len(resultImage))) rw.Write(resultImage) } func main() { flag.Parse() settings.loadSettings() r := new(RegexpHandler) r.HandleFunc(settings.UrlExp, fetchImage) log.Printf("ImgWizard started on http://%s", settings.ListenAddr) http.ListenAndServe(settings.ListenAddr, r) }
// Copyright 2015 Kevin Yeh. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gohunt import ( "fmt" ) type User struct { ID int `json:"id"` Name string `json:"name"` Username string `json:"username"` Headline string `json:"headline"` Created string `json:"created_at"` Image map[string]string `json:"image"` ProfileUrl string `json:"profile_url"` WebsiteUrl string `json:"website_url"` } func (u User) Summary() string { return fmt.Sprintf("user[%s: %s]", u.Name, u.Headline) } added fields // Copyright 2015 Kevin Yeh. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gohunt import ( "fmt" ) type User struct { ID int `json:"id"` Name string `json:"name"` Username string `json:"username"` Headline string `json:"headline"` Created string `json:"created_at"` ImageUrl map[string]string `json:"image_url"` ProfileUrl string `json:"profile_url"` WebsiteUrl string `json:"website_url"` Votes []Vote `json:"votes"` Posts []Post `json:"posts"` MakerOf []Post `json:"maker_of"` Followers []User `json:"followers"` Following []User `json:"followings"` } func (u User) Summary() string { return fmt.Sprintf("user[%s: %s]", u.Name, u.Headline) }
// The following enables go generate to generate the doc.go file. //go:generate go run $VEYRON_ROOT/veyron/go/src/veyron.io/lib/cmdline/testdata/gendoc.go . -help package main import ( "fmt" "regexp" "strings" "code.google.com/p/go.tools/go/loader" "code.google.com/p/go.tools/go/types" "veyron.io/lib/cmdline" ) func main() { cmdGoPkg.Main() } var cmdGoPkg = &cmdline.Command{ Run: runGoPkg, Name: "gopkg", Short: "Print information about go package(s)", Long: ` Print information about go package(s). Example of printing all top-level information about the vdl package: veyron run gopkg veyron.io/veyron/veyron2/vdl Example of printing the names of all Test* funcs from the vdl package: veyron run gopkg -test -kind=func -name_re 'Test.*' -type_re 'func\(.*testing\.T\)' -noheader -notype veyron.io/veyron/veyron2/vdl `, ArgsName: "<args>", ArgsLong: loader.FromArgsUsage, } var ( flagTest bool flagNoHeader bool flagNoName bool flagNoType bool flagKind Kinds = KindAll flagNameRE string flagTypeRE string ) func init() { cmdGoPkg.Flags.BoolVar(&flagTest, "test", false, "Load test code (*_test.go) for packages.") cmdGoPkg.Flags.BoolVar(&flagNoHeader, "noheader", false, "Don't print headers.") cmdGoPkg.Flags.BoolVar(&flagNoName, "noname", false, "Don't print identifier names.") cmdGoPkg.Flags.BoolVar(&flagNoType, "notype", false, "Don't print type descriptions.") cmdGoPkg.Flags.Var(&flagKind, "kind", "Print information for the specified kinds, in the order listed.") cmdGoPkg.Flags.StringVar(&flagNameRE, "name_re", ".*", "Filter out identifier names that don't match this regexp.") cmdGoPkg.Flags.StringVar(&flagTypeRE, "type_re", ".*", "Filter out type descriptions that don't match this regexp.") } func parseRegexp(expr string) (*regexp.Regexp, error) { // Make sure the regexp performs a full match against the target string. expr = strings.TrimSpace(expr) if !strings.HasPrefix(expr, "^") { expr = "^" + expr } if !strings.HasSuffix(expr, "$") { expr = expr + "$" } return regexp.Compile(expr) } func runGoPkg(cmd *cmdline.Command, args []string) error { // Parse flags. nameRE, err := parseRegexp(flagNameRE) if err != nil { return err } typeRE, err := parseRegexp(flagTypeRE) if err != nil { return err } // Load packages specified in args. config := loader.Config{ SourceImports: false, TypeCheckFuncBodies: func(string) bool { return false }, } args, err = config.FromArgs(args, flagTest) if err != nil { return cmd.UsageErrorf("failed to parse args: %v", err) } if len(args) != 0 { return cmd.UsageErrorf("unrecognized args %q", args) } prog, err := config.Load() if err != nil { return err } // Print information for each loaded package. for _, pkginfo := range prog.InitialPackages() { pkg := pkginfo.Pkg if !flagNoHeader { fmt.Fprintf(cmd.Stdout(), "%s (%s)\n", pkg.Path(), pkg.Name()) } scope := pkg.Scope() data := make(map[Kind][]NameType) for _, name := range scope.Names() { if !nameRE.MatchString(name) { continue } kind, nt := NameTypeFromObject(scope.Lookup(name)) if !typeRE.MatchString(nt.Type) { continue } data[kind] = append(data[kind], nt) } for _, kind := range flagKind { if !flagNoHeader { fmt.Fprintf(cmd.Stdout(), "%ss\n", strings.Title(kind.String())) } for _, nt := range data[kind] { var line string if !flagNoName { line += " " + nt.Name } if !flagNoType { line += " " + nt.Type } line = strings.TrimSpace(line) if line != "" { fmt.Fprintf(cmd.Stdout(), " %s\n", line) } } } } return nil } // NameType holds the name and type of a top-level declaration. type NameType struct { Name string Type string } func NameTypeFromObject(obj types.Object) (Kind, NameType) { var kind Kind switch obj.(type) { case *types.Const: kind = Const case *types.Var: kind = Var case *types.Func: kind = Func case *types.TypeName: kind = Type default: panic(fmt.Errorf("unhandled types.Object %#v", obj)) } return kind, NameType{obj.Name(), obj.Type().String()} } // Kind describes the kind of a top-level declaration, usable as a flag. type Kind int // Kinds holds a slice of Kind, usable as a flag. type Kinds []Kind const ( Const Kind = iota // Top-level const declaration. Var // Top-level var declaration. Func // Top-level func declaration. Type // Top-level type declaration. ) var KindAll = Kinds{Const, Var, Func, Type} func KindFromString(s string) (k Kind, err error) { err = k.Set(s) return } func (k *Kind) Set(s string) error { switch s { case "const": *k = Const return nil case "var": *k = Var return nil case "func": *k = Func return nil case "type": *k = Type return nil default: *k = -1 return fmt.Errorf("unknown Kind %q", s) } } func (k Kind) String() string { switch k { case Const: return "const" case Var: return "var" case Func: return "func" case Type: return "type" default: return fmt.Sprintf("Kind(%d)", k) } } func (kinds *Kinds) Set(s string) error { *kinds = nil seen := make(map[Kind]bool) for _, kindstr := range strings.Split(s, ",") { if kindstr == "" { continue } k, err := KindFromString(kindstr) if err != nil { return err } if !seen[k] { seen[k] = true *kinds = append(*kinds, k) } } return nil } func (kinds Kinds) String() string { var strs []string for _, k := range kinds { strs = append(strs, k.String()) } return strings.Join(strs, ",") } tools/gopkg: changing the Go 1.3 import path to Go 1.4 import path Change-Id: I34581edd754ad76e404ad11f0de2f8d34319b095 // The following enables go generate to generate the doc.go file. //go:generate go run $VEYRON_ROOT/veyron/go/src/veyron.io/lib/cmdline/testdata/gendoc.go . -help package main import ( "fmt" "regexp" "strings" "golang.org/x/tools/go/loader" "golang.org/x/tools/go/types" "veyron.io/lib/cmdline" ) func main() { cmdGoPkg.Main() } var cmdGoPkg = &cmdline.Command{ Run: runGoPkg, Name: "gopkg", Short: "Print information about go package(s)", Long: ` Print information about go package(s). Example of printing all top-level information about the vdl package: veyron run gopkg veyron.io/veyron/veyron2/vdl Example of printing the names of all Test* funcs from the vdl package: veyron run gopkg -test -kind=func -name_re 'Test.*' -type_re 'func\(.*testing\.T\)' -noheader -notype veyron.io/veyron/veyron2/vdl `, ArgsName: "<args>", ArgsLong: loader.FromArgsUsage, } var ( flagTest bool flagNoHeader bool flagNoName bool flagNoType bool flagKind Kinds = KindAll flagNameRE string flagTypeRE string ) func init() { cmdGoPkg.Flags.BoolVar(&flagTest, "test", false, "Load test code (*_test.go) for packages.") cmdGoPkg.Flags.BoolVar(&flagNoHeader, "noheader", false, "Don't print headers.") cmdGoPkg.Flags.BoolVar(&flagNoName, "noname", false, "Don't print identifier names.") cmdGoPkg.Flags.BoolVar(&flagNoType, "notype", false, "Don't print type descriptions.") cmdGoPkg.Flags.Var(&flagKind, "kind", "Print information for the specified kinds, in the order listed.") cmdGoPkg.Flags.StringVar(&flagNameRE, "name_re", ".*", "Filter out identifier names that don't match this regexp.") cmdGoPkg.Flags.StringVar(&flagTypeRE, "type_re", ".*", "Filter out type descriptions that don't match this regexp.") } func parseRegexp(expr string) (*regexp.Regexp, error) { // Make sure the regexp performs a full match against the target string. expr = strings.TrimSpace(expr) if !strings.HasPrefix(expr, "^") { expr = "^" + expr } if !strings.HasSuffix(expr, "$") { expr = expr + "$" } return regexp.Compile(expr) } func runGoPkg(cmd *cmdline.Command, args []string) error { // Parse flags. nameRE, err := parseRegexp(flagNameRE) if err != nil { return err } typeRE, err := parseRegexp(flagTypeRE) if err != nil { return err } // Load packages specified in args. config := loader.Config{ SourceImports: false, TypeCheckFuncBodies: func(string) bool { return false }, } args, err = config.FromArgs(args, flagTest) if err != nil { return cmd.UsageErrorf("failed to parse args: %v", err) } if len(args) != 0 { return cmd.UsageErrorf("unrecognized args %q", args) } prog, err := config.Load() if err != nil { return err } // Print information for each loaded package. for _, pkginfo := range prog.InitialPackages() { pkg := pkginfo.Pkg if !flagNoHeader { fmt.Fprintf(cmd.Stdout(), "%s (%s)\n", pkg.Path(), pkg.Name()) } scope := pkg.Scope() data := make(map[Kind][]NameType) for _, name := range scope.Names() { if !nameRE.MatchString(name) { continue } kind, nt := NameTypeFromObject(scope.Lookup(name)) if !typeRE.MatchString(nt.Type) { continue } data[kind] = append(data[kind], nt) } for _, kind := range flagKind { if !flagNoHeader { fmt.Fprintf(cmd.Stdout(), "%ss\n", strings.Title(kind.String())) } for _, nt := range data[kind] { var line string if !flagNoName { line += " " + nt.Name } if !flagNoType { line += " " + nt.Type } line = strings.TrimSpace(line) if line != "" { fmt.Fprintf(cmd.Stdout(), " %s\n", line) } } } } return nil } // NameType holds the name and type of a top-level declaration. type NameType struct { Name string Type string } func NameTypeFromObject(obj types.Object) (Kind, NameType) { var kind Kind switch obj.(type) { case *types.Const: kind = Const case *types.Var: kind = Var case *types.Func: kind = Func case *types.TypeName: kind = Type default: panic(fmt.Errorf("unhandled types.Object %#v", obj)) } return kind, NameType{obj.Name(), obj.Type().String()} } // Kind describes the kind of a top-level declaration, usable as a flag. type Kind int // Kinds holds a slice of Kind, usable as a flag. type Kinds []Kind const ( Const Kind = iota // Top-level const declaration. Var // Top-level var declaration. Func // Top-level func declaration. Type // Top-level type declaration. ) var KindAll = Kinds{Const, Var, Func, Type} func KindFromString(s string) (k Kind, err error) { err = k.Set(s) return } func (k *Kind) Set(s string) error { switch s { case "const": *k = Const return nil case "var": *k = Var return nil case "func": *k = Func return nil case "type": *k = Type return nil default: *k = -1 return fmt.Errorf("unknown Kind %q", s) } } func (k Kind) String() string { switch k { case Const: return "const" case Var: return "var" case Func: return "func" case Type: return "type" default: return fmt.Sprintf("Kind(%d)", k) } } func (kinds *Kinds) Set(s string) error { *kinds = nil seen := make(map[Kind]bool) for _, kindstr := range strings.Split(s, ",") { if kindstr == "" { continue } k, err := KindFromString(kindstr) if err != nil { return err } if !seen[k] { seen[k] = true *kinds = append(*kinds, k) } } return nil } func (kinds Kinds) String() string { var strs []string for _, k := range kinds { strs = append(strs, k.String()) } return strings.Join(strs, ",") }
package goroon import ( "strings" "testing" "time" "gopkg.in/h2non/gock.v1" ) func TestScheduleGetEventsByTarget(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/schedule/api"). Reply(200).File("./test/fixtures/schedule/get_events_by_target.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" tm := time.Now() req := Parameters{ Start: XmlDateTime{tm}, End: XmlDateTime{tm}, User: User{ Id: 1234, }, } res, err := client.ScheduleGetEventsByTarget(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.ScheduleEvents), 1) ev := res.ScheduleEvents[0] assert(t, ev.Id, 123) assert(t, ev.Detail, "fugafuga") assert(t, ev.Description, "hogehoge") assert(t, len(ev.Members.Member), 1) member := ev.Members.Member[0] assert(t, member.User.Id, 1) assert(t, member.User.Name, "aaa") } func TestScheduleGetEvents(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/schedule/api"). Reply(200).File("./test/fixtures/schedule/get_events.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" tm := time.Now() req := Parameters{ Start: XmlDateTime{tm}, End: XmlDateTime{tm}, } res, err := client.ScheduleGetEvents(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.ScheduleEvents), 1) ev := res.ScheduleEvents[0] assert(t, ev.Id, 123) assert(t, ev.Detail, "fugafuga") assert(t, ev.Description, "hogehoge") assert(t, len(ev.Members.Member), 1) member := ev.Members.Member[0] assert(t, member.User.Id, 1) assert(t, member.User.Name, "aaa") assert(t, ev.RepeatInfo.Condition.StartDate, XmlDate{time.Date(2016, 11, 22, 0, 0, 0, 0, time.UTC)}) assert(t, ev.RepeatInfo.Condition.EndDate, XmlDate{time.Date(2017, 4, 1, 0, 0, 0, 0, time.UTC)}) assert(t, ev.RepeatInfo.Condition.StartTime, "14:00:00") assert(t, ev.RepeatInfo.Condition.EndTime, "14:30:00") assert(t, ev.RepeatInfo.Condition.Day, 20) assert(t, ev.RepeatInfo.Condition.Week, 2) assert(t, ev.RepeatInfo.Condition.Type, "week") } func TestBaseGetUserByLoginName(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/base/api"). Reply(200).File("./test/fixtures/base/get_user_by_login_name.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" req := Parameters{ LoginName: []string{"hoge"}, } res, err := client.BaseGetUserByLoginName(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.User), 2) adm := res.User[0] assert(t, adm.Key, 1) assert(t, adm.Version, 1245376338) assert(t, adm.Name, "Administrator") assert(t, adm.Status, 0) u1 := res.User[1] assert(t, u1.Key, 2) assert(t, u1.Version, 1245919830) assert(t, u1.Name, "u1") assert(t, u1.Status, 0) assert(t, u1.Phone, "9180xxxxxx") assert(t, u1.Description, "user1 is ...") assert(t, u1.Title, "test test") } func TestBulletinGetFollows(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/bulletin/api"). Reply(200).File("./test/fixtures/bulletin/get_follows.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" req := Parameters{ TopicId: 123, Offset: 0, Limit: 20, } res, err := client.BulletinGetFollows(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.Follow), 4) assert(t, res.Follow[0].Creator.Name, "huy") } func TestUtilLogin(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/util_api/util/api"). Reply(200).File("./test/fixtures/util_api/util_login.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" req := Parameters{ LoginName: []string{"username"}, Password: "password", } res, err := client.UtilLogin(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } if !strings.Contains(res.Cookie, "CBSESSID=C735B4069ccf104Ce0f2bf12a7cc62f115db9e676f6e72f2;") { t.Fatalf("expect %v, get %v", "", res.Cookie) } assert(t, res.LoginName, "Administrator") assert(t, res.Status, "Login") } func assert(t *testing.T, expect interface{}, actual interface{}) { if expect != actual { t.Fatalf("expect %v, get %v", expect, actual) } } Use github pkg and remove URL versioning package goroon import ( "strings" "testing" "time" "github.com/h2non/gock" ) func TestScheduleGetEventsByTarget(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/schedule/api"). Reply(200).File("./test/fixtures/schedule/get_events_by_target.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" tm := time.Now() req := Parameters{ Start: XmlDateTime{tm}, End: XmlDateTime{tm}, User: User{ Id: 1234, }, } res, err := client.ScheduleGetEventsByTarget(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.ScheduleEvents), 1) ev := res.ScheduleEvents[0] assert(t, ev.Id, 123) assert(t, ev.Detail, "fugafuga") assert(t, ev.Description, "hogehoge") assert(t, len(ev.Members.Member), 1) member := ev.Members.Member[0] assert(t, member.User.Id, 1) assert(t, member.User.Name, "aaa") } func TestScheduleGetEvents(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/schedule/api"). Reply(200).File("./test/fixtures/schedule/get_events.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" tm := time.Now() req := Parameters{ Start: XmlDateTime{tm}, End: XmlDateTime{tm}, } res, err := client.ScheduleGetEvents(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.ScheduleEvents), 1) ev := res.ScheduleEvents[0] assert(t, ev.Id, 123) assert(t, ev.Detail, "fugafuga") assert(t, ev.Description, "hogehoge") assert(t, len(ev.Members.Member), 1) member := ev.Members.Member[0] assert(t, member.User.Id, 1) assert(t, member.User.Name, "aaa") assert(t, ev.RepeatInfo.Condition.StartDate, XmlDate{time.Date(2016, 11, 22, 0, 0, 0, 0, time.UTC)}) assert(t, ev.RepeatInfo.Condition.EndDate, XmlDate{time.Date(2017, 4, 1, 0, 0, 0, 0, time.UTC)}) assert(t, ev.RepeatInfo.Condition.StartTime, "14:00:00") assert(t, ev.RepeatInfo.Condition.EndTime, "14:30:00") assert(t, ev.RepeatInfo.Condition.Day, 20) assert(t, ev.RepeatInfo.Condition.Week, 2) assert(t, ev.RepeatInfo.Condition.Type, "week") } func TestBaseGetUserByLoginName(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/base/api"). Reply(200).File("./test/fixtures/base/get_user_by_login_name.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" req := Parameters{ LoginName: []string{"hoge"}, } res, err := client.BaseGetUserByLoginName(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.User), 2) adm := res.User[0] assert(t, adm.Key, 1) assert(t, adm.Version, 1245376338) assert(t, adm.Name, "Administrator") assert(t, adm.Status, 0) u1 := res.User[1] assert(t, u1.Key, 2) assert(t, u1.Version, 1245919830) assert(t, u1.Name, "u1") assert(t, u1.Status, 0) assert(t, u1.Phone, "9180xxxxxx") assert(t, u1.Description, "user1 is ...") assert(t, u1.Title, "test test") } func TestBulletinGetFollows(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/cbpapi/bulletin/api"). Reply(200).File("./test/fixtures/bulletin/get_follows.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" req := Parameters{ TopicId: 123, Offset: 0, Limit: 20, } res, err := client.BulletinGetFollows(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } assert(t, len(res.Follow), 4) assert(t, res.Follow[0].Creator.Name, "huy") } func TestUtilLogin(t *testing.T) { defer gock.Off() gock.New("https://garoon.com"). Post("/util_api/util/api"). Reply(200).File("./test/fixtures/util_api/util_login.xml") client := NewClient("https://garoon.com") client.Username = "username" client.Password = "password" req := Parameters{ LoginName: []string{"username"}, Password: "password", } res, err := client.UtilLogin(&req) if err != nil { t.Fatalf("error is occured. %s", err.Error()) } if !strings.Contains(res.Cookie, "CBSESSID=C735B4069ccf104Ce0f2bf12a7cc62f115db9e676f6e72f2;") { t.Fatalf("expect %v, get %v", "", res.Cookie) } assert(t, res.LoginName, "Administrator") assert(t, res.Status, "Login") } func assert(t *testing.T, expect interface{}, actual interface{}) { if expect != actual { t.Fatalf("expect %v, get %v", expect, actual) } }
// Copyright 2012 Andreas Louca. All rights reserved. // Use of this source code is goverend by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "github.com/alouca/gosnmp" ) func main() { const cmdTimeout = 10 const cmdOid = ".1.3.6.1.4.1.12356.101.4.1.1" // Device Version //const cmdOid = ".1.3.6.1.4.1.12356.101.4.1.4.0" // MemUsage s, err := gosnmp.NewGoSNMP("10.168.1.1", "public", gosnmp.Version2c, cmdTimeout) if err != nil { fmt.Printf("Error creating SNMP instance: %s\n", err.Error()) return } //s.SetDebug(true) //s.SetVerbose(true) s.SetTimeout(cmdTimeout) oid := cmdOid for i := 0; i < 10; i++ { fmt.Printf("Getting %s\n", oid) resp, err := s.GetNext(oid) if err != nil { fmt.Printf("Error getting response: %s\n", err.Error()) } else { for _, v := range resp.Variables { fmt.Printf("%s -> ", v.Name) switch v.Type { case gosnmp.OctetString: if s, ok := v.Value.(string); ok { fmt.Printf("%s\n", s) } else { fmt.Printf("Response is not a string\n") } default: fmt.Printf("Type: %s(%#x) - Value: %v\n", v.Type, int(v.Type), v.Value) } oid = v.Name } } } } gosnmp, Test with bulk // Copyright 2012 Andreas Louca. All rights reserved. // Use of this source code is goverend by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "github.com/alouca/gosnmp" ) func main() { const cmdTimeout = 10 const cmdOid = ".1.3.6.1.4.1.12356.101.4.1.1" // Device Version //const cmdOid = ".1.3.6.1.4.1.12356.101.4.1.4.0" // MemUsage s, err := gosnmp.NewGoSNMP("10.168.1.1", "public", gosnmp.Version2c, cmdTimeout) if err != nil { fmt.Printf("Error creating SNMP instance: %s\n", err.Error()) return } //s.SetDebug(true) //s.SetVerbose(true) oids := []string{ ".1.3.6.1.4.1.12356.101.4.1.1", //".1.3.6.1.4.1.12356.101.4.1.2.0", // ".1.3.6.1.4.1.12356.101.4.1.3.0", // ".1.3.6.1.4.1.12356.101.4.1.4.0", // ".1.3.6.1.4.1.12356.101.4.1.5.0", // ".1.3.6.1.4.1.12356.101.4.1.6.0", // ".1.3.6.1.4.1.12356.101.4.1.7.0", // ".1.3.6.1.4.1.12356.101.4.1.8.0", // ".1.3.6.1.4.1.12356.101.4.1.9.0", } s.SetTimeout(cmdTimeout) fmt.Printf("Getting %s\n", oids) resp, err := s.GetBulk(0, 50, oids...) if err != nil { fmt.Printf("Error getting response: %s\n", err.Error()) } else { for _, v := range resp.Variables { fmt.Printf("%s -> ", v.Name) switch v.Type { case gosnmp.OctetString: if s, ok := v.Value.(string); ok { fmt.Printf("%s\n", s) } else { fmt.Printf("Response is not a string\n") } default: fmt.Printf("Type: %s(%#x) - Value: %v\n", v.Type, int(v.Type), v.Value) } } } }
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Command gover manages saved versions of the Go tree. // // gover saves builds of the Go source tree and runs commands using // saved Go versions. For example, // // cd $GOROOT // git checkout go1.5.1 // gover build 1.5.1 // // will checkout Go 1.5.1, build the source tree, and save it under // the name "1.5.1", as well as its commit hash (f2e4c8b). You can // then later run commands with Go 1.5.1. For example, the following // will run "go install" using Go 1.5.1: // // gover 1.5.1 install package main import ( "bytes" "crypto/sha1" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "os/user" "path/filepath" "regexp" "runtime" "sort" "strconv" "strings" "syscall" "time" ) var ( verbose = flag.Bool("v", false, "print commands being run") verDir = flag.String("dir", defaultVerDir(), "`directory` of saved Go roots") noDedup = flag.Bool("no-dedup", false, "disable deduplication of saved trees") gorootFlag = flag.String("C", defaultGoroot(), "use `dir` as the root of the Go tree for save and build") ) var binTools = []string{"go", "godoc", "gofmt"} func defaultVerDir() string { cache := os.Getenv("XDG_CACHE_HOME") if cache == "" { home := os.Getenv("HOME") if home == "" { u, err := user.Current() if err != nil { home = u.HomeDir } } cache = filepath.Join(home, ".cache") } return filepath.Join(cache, "gover") } func defaultGoroot() string { c := exec.Command("git", "rev-parse", "--show-cdup") output, err := c.Output() if err != nil { return "" } goroot := strings.TrimSpace(string(output)) if goroot == "" { // The empty string is --show-cdup's helpful way of // saying "the current directory". goroot = "." } if !isGoroot(goroot) { return "" } return goroot } // isGoroot returns true if path is the root of a Go tree. It is // somewhat heuristic. func isGoroot(path string) bool { st, err := os.Stat(filepath.Join(path, "src", "cmd", "go")) return err == nil && st.IsDir() } func main() { log.SetFlags(0) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:\n") fmt.Fprintf(os.Stderr, " %s [flags] save [name] - save current build\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] <name> <args>... - run go <args> using build <name>\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] run <name> <command>... - run <command> using PATH and GOROOT for build <name>\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] env <name> - print the environment for build <name> as shell code\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] build [name] - build and save current version\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] list - list saved builds\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] gc - clean the deduplication cache", os.Args[0]) fmt.Fprintf(os.Stderr, "\nFlags:\n") flag.PrintDefaults() } flag.Parse() if flag.NArg() < 1 { flag.Usage() os.Exit(2) } // Make gorootFlag absolute. if *gorootFlag != "" { abs, err := filepath.Abs(*gorootFlag) if err != nil { *gorootFlag = abs } } switch flag.Arg(0) { case "save", "build": if flag.NArg() > 2 { flag.Usage() os.Exit(2) } hash, diff := getHash() name := "" if flag.NArg() >= 2 { name = flag.Arg(1) if name == hash { name = "" } } // Validate paths. savePath, hashExists := getSavePath(hash) nameExists, nameRight := false, true if name != "" { st2, err := os.Stat(filepath.Join(*verDir, name)) nameExists = err == nil && st2.IsDir() if nameExists { st, _ := os.Stat(savePath) nameRight = os.SameFile(st, st2) } } if flag.Arg(0) == "build" { if hashExists { if !nameRight { log.Fatalf("name `%s' exists and refers to another build", name) } msg := fmt.Sprintf("saved build `%s' already exists", hash) if !nameExists { doLink(hash, name) msg += fmt.Sprintf("; added name `%s'", name) } fmt.Fprintln(os.Stderr, msg) os.Exit(0) } doBuild() } else { if hashExists { log.Fatalf("saved build `%s' already exists", hash) } if nameExists { log.Fatalf("saved build `%s' already exists", name) } } doSave(hash, diff) doLink(hash, name) if name == "" { fmt.Fprintf(os.Stderr, "saved build as `%s'\n", hash) } else { fmt.Fprintf(os.Stderr, "saved build as `%s' and `%s'\n", hash, name) } case "list": if flag.NArg() > 1 { flag.Usage() os.Exit(2) } doList() case "run": if flag.NArg() < 3 { flag.Usage() os.Exit(2) } doRun(flag.Arg(1), flag.Args()[2:]) case "env": if flag.NArg() != 2 { flag.Usage() os.Exit(2) } doEnv(flag.Arg(1)) case "gc": if flag.NArg() > 1 { flag.Usage() os.Exit(2) } doGC() default: if flag.NArg() < 2 { flag.Usage() os.Exit(2) } if _, ok := getSavePath(flag.Arg(0)); !ok { log.Fatalf("unknown name or subcommand `%s'", flag.Arg(0)) } doRun(flag.Arg(0), append([]string{"go"}, flag.Args()[1:]...)) } } func goroot() string { if *gorootFlag == "" { log.Fatal("not a git repository") } return *gorootFlag } func gitCmd(cmd string, args ...string) string { args = append([]string{"-C", goroot(), cmd}, args...) c := exec.Command("git", args...) c.Stderr = os.Stderr output, err := c.Output() if err != nil { log.Fatalf("error executing git %s: %s", strings.Join(args, " "), err) } return string(output) } func getSavePath(name string) (string, bool) { savePath := filepath.Join(*verDir, name) st, err := os.Stat(savePath) return savePath, err == nil && st.IsDir() } func getHash() (string, []byte) { rev := strings.TrimSpace(string(gitCmd("rev-parse", "--short", "HEAD"))) diff := []byte(gitCmd("diff", "HEAD")) if len(bytes.TrimSpace(diff)) > 0 { diffHash := fmt.Sprintf("%x", sha1.Sum(diff)) return rev + "+" + diffHash[:10], diff } return rev, nil } func doBuild() { c := exec.Command("./make.bash") c.Dir = filepath.Join(goroot(), "src") c.Stdout = os.Stdout c.Stderr = os.Stderr if err := c.Run(); err != nil { log.Fatalf("error executing make.bash: %s", err) os.Exit(1) } } func doSave(hash string, diff []byte) { // Create a minimal GOROOT at $GOROOT/gover/hash. savePath, _ := getSavePath(hash) goos, goarch := runtime.GOOS, runtime.GOARCH if x := os.Getenv("GOOS"); x != "" { goos = x } if x := os.Getenv("GOARCH"); x != "" { goarch = x } osArch := goos + "_" + goarch goroot := goroot() for _, binTool := range binTools { src := filepath.Join(goroot, "bin", binTool) if _, err := os.Stat(src); err == nil { cp(src, filepath.Join(savePath, "bin", binTool)) } } cpR(filepath.Join(goroot, "pkg", osArch), filepath.Join(savePath, "pkg", osArch)) cpR(filepath.Join(goroot, "pkg", "tool", osArch), filepath.Join(savePath, "pkg", "tool", osArch)) cpR(filepath.Join(goroot, "pkg", "include"), filepath.Join(savePath, "pkg", "include")) cpR(filepath.Join(goroot, "src"), filepath.Join(savePath, "src")) if diff != nil { if err := ioutil.WriteFile(filepath.Join(savePath, "diff"), diff, 0666); err != nil { log.Fatal(err) } } // Save commit object. commit := gitCmd("cat-file", "commit", "HEAD") if err := ioutil.WriteFile(filepath.Join(savePath, "commit"), []byte(commit), 0666); err != nil { log.Fatal(err) } } func doLink(hash, name string) { if name != "" && name != hash { savePath, _ := getSavePath(name) err := os.Symlink(hash, savePath) if err != nil { log.Fatal(err) } } } type commit struct { authorDate time.Time topLine string } func parseCommit(obj []byte) commit { out := commit{} lines := strings.Split(string(obj), "\n") for i, line := range lines { if strings.HasPrefix(line, "author ") { fs := strings.Fields(line) secs, err := strconv.ParseInt(fs[len(fs)-2], 10, 64) if err != nil { log.Fatalf("malformed author in commit: %s", err) } out.authorDate = time.Unix(secs, 0) } if len(line) == 0 { out.topLine = lines[i+1] break } } return out } type saveInfo struct { base string names []string commit commit } type saveInfoSorter []*saveInfo func (s saveInfoSorter) Len() int { return len(s) } func (s saveInfoSorter) Less(i, j int) bool { return s[i].commit.authorDate.Before(s[j].commit.authorDate) } func (s saveInfoSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func doList() { files, err := ioutil.ReadDir(*verDir) if os.IsNotExist(err) { return } else if err != nil { log.Fatal(err) } baseMap := make(map[string]*saveInfo) bases := []*saveInfo{} for _, file := range files { if !file.IsDir() || file.Name() == "_dedup" { continue } info := &saveInfo{base: file.Name(), names: []string{}} baseMap[file.Name()] = info bases = append(bases, info) commit, err := ioutil.ReadFile(filepath.Join(*verDir, file.Name(), "commit")) if os.IsNotExist(err) { continue } info.commit = parseCommit(commit) } for _, file := range files { if file.Mode()&os.ModeType == os.ModeSymlink { base, err := os.Readlink(filepath.Join(*verDir, file.Name())) if err != nil { continue } if info, ok := baseMap[base]; ok { info.names = append(info.names, file.Name()) } } } sort.Sort(saveInfoSorter(bases)) for _, info := range bases { fmt.Print(info.base) if !info.commit.authorDate.IsZero() { fmt.Printf(" %s", info.commit.authorDate.Local().Format("2006-01-02T15:04:05")) } if len(info.names) > 0 { fmt.Printf(" %s", info.names) } if info.commit.topLine != "" { fmt.Printf(" %s", info.commit.topLine) } fmt.Println() } } func doRun(name string, cmd []string) { savePath, ok := getSavePath(name) if !ok { log.Fatalf("unknown name `%s'", name) } c := exec.Command(cmd[0], cmd[1:]...) // Build the command environment. for _, env := range os.Environ() { if strings.HasPrefix(env, "GOROOT=") || strings.HasPrefix(env, "PATH=") { continue } c.Env = append(c.Env, env) } goroot, path := getEnv(savePath) c.Env = append(c.Env, "GOROOT="+goroot, "PATH="+path) // Run command. c.Stdin, c.Stdout, c.Stderr = os.Stdin, os.Stdout, os.Stderr if err := c.Run(); err != nil { fmt.Printf("command failed: %s\n", err) os.Exit(1) } } func doEnv(name string) { savePath, ok := getSavePath(name) if !ok { log.Fatalf("unknown name `%s'", name) } goroot, path := getEnv(savePath) fmt.Printf("PATH=%s;\n", shellEscape(path)) fmt.Printf("GOROOT=%s;\n", shellEscape(goroot)) fmt.Printf("export GOROOT;\n") } // getEnv returns the GOROOT and PATH for the Go tree rooted at savePath. func getEnv(savePath string) (goroot, path string) { p := []string{filepath.Join(savePath, "bin")} // Strip existing Go tree from PATH. for _, dir := range filepath.SplitList(os.Getenv("PATH")) { if isGoroot(filepath.Join(dir, "..")) { continue } p = append(p, dir) } return savePath, strings.Join(p, string(filepath.ListSeparator)) } var goodDedupPath = regexp.MustCompile("/[0-9a-f]{2}/[0-9a-f]{38}$") func doGC() { removed := 0 filepath.Walk(filepath.Join(*verDir, "_dedup"), func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } if st, err := os.Stat(path); err == nil { st, ok := st.Sys().(*syscall.Stat_t) if !ok || st.Nlink != 1 { return nil } if !goodDedupPath.MatchString(path) { // Be paranoid about removing files. log.Printf("unexpected file in dedup cache: %s\n", path) return nil } if err := os.Remove(path); err != nil { log.Printf("failed to remove %s: %v", path, err) } else { removed++ } } return nil }) fmt.Printf("removed %d unused file(s)\n", removed) } func cp(src, dst string) { data, err := ioutil.ReadFile(src) if err != nil { log.Fatal(err) } writeFile, xdst := true, dst if !*noDedup { hash := fmt.Sprintf("%x", sha1.Sum(data)) xdst = filepath.Join(*verDir, "_dedup", hash[:2], hash[2:]) if _, err := os.Stat(xdst); err == nil { writeFile = false } } if writeFile { if *verbose { fmt.Printf("cp %s %s\n", src, xdst) } st, err := os.Stat(src) if err != nil { log.Fatal(err) } if err := os.MkdirAll(filepath.Dir(xdst), 0777); err != nil { log.Fatal(err) } if err := ioutil.WriteFile(xdst, data, st.Mode()); err != nil { log.Fatal(err) } if err := os.Chtimes(xdst, st.ModTime(), st.ModTime()); err != nil { log.Fatal(err) } } if dst != xdst { if *verbose { fmt.Printf("ln %s %s\n", xdst, dst) } if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { log.Fatal(err) } if err := os.Link(xdst, dst); err != nil { log.Fatal(err) } } } func cpR(src, dst string) { filepath.Walk(src, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } base := filepath.Base(path) if base == "core" || strings.HasSuffix(base, ".test") { return nil } cp(path, dst+path[len(src):]) return nil }) } gover: fix run for go commands 86a2394 made run work for arbitrary commands, but in the process it broke running commands from the Go tree bin. In particular, the command was resolved using the current PATH, even though it was run with the augmented PATH. As a result, if you tried to run the go command it would be taken from whatever was in your PATH (even though it would ultimately run the right compiler by consulting GOROOT). If there was no go binary in your PATH, it would simply fail. Fix this by setting gover's PATH before resolving the run command. // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Command gover manages saved versions of the Go tree. // // gover saves builds of the Go source tree and runs commands using // saved Go versions. For example, // // cd $GOROOT // git checkout go1.5.1 // gover build 1.5.1 // // will checkout Go 1.5.1, build the source tree, and save it under // the name "1.5.1", as well as its commit hash (f2e4c8b). You can // then later run commands with Go 1.5.1. For example, the following // will run "go install" using Go 1.5.1: // // gover 1.5.1 install package main import ( "bytes" "crypto/sha1" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "os/user" "path/filepath" "regexp" "runtime" "sort" "strconv" "strings" "syscall" "time" ) var ( verbose = flag.Bool("v", false, "print commands being run") verDir = flag.String("dir", defaultVerDir(), "`directory` of saved Go roots") noDedup = flag.Bool("no-dedup", false, "disable deduplication of saved trees") gorootFlag = flag.String("C", defaultGoroot(), "use `dir` as the root of the Go tree for save and build") ) var binTools = []string{"go", "godoc", "gofmt"} func defaultVerDir() string { cache := os.Getenv("XDG_CACHE_HOME") if cache == "" { home := os.Getenv("HOME") if home == "" { u, err := user.Current() if err != nil { home = u.HomeDir } } cache = filepath.Join(home, ".cache") } return filepath.Join(cache, "gover") } func defaultGoroot() string { c := exec.Command("git", "rev-parse", "--show-cdup") output, err := c.Output() if err != nil { return "" } goroot := strings.TrimSpace(string(output)) if goroot == "" { // The empty string is --show-cdup's helpful way of // saying "the current directory". goroot = "." } if !isGoroot(goroot) { return "" } return goroot } // isGoroot returns true if path is the root of a Go tree. It is // somewhat heuristic. func isGoroot(path string) bool { st, err := os.Stat(filepath.Join(path, "src", "cmd", "go")) return err == nil && st.IsDir() } func main() { log.SetFlags(0) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:\n") fmt.Fprintf(os.Stderr, " %s [flags] save [name] - save current build\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] <name> <args>... - run go <args> using build <name>\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] run <name> <command>... - run <command> using PATH and GOROOT for build <name>\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] env <name> - print the environment for build <name> as shell code\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] build [name] - build and save current version\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] list - list saved builds\n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s [flags] gc - clean the deduplication cache", os.Args[0]) fmt.Fprintf(os.Stderr, "\nFlags:\n") flag.PrintDefaults() } flag.Parse() if flag.NArg() < 1 { flag.Usage() os.Exit(2) } // Make gorootFlag absolute. if *gorootFlag != "" { abs, err := filepath.Abs(*gorootFlag) if err != nil { *gorootFlag = abs } } switch flag.Arg(0) { case "save", "build": if flag.NArg() > 2 { flag.Usage() os.Exit(2) } hash, diff := getHash() name := "" if flag.NArg() >= 2 { name = flag.Arg(1) if name == hash { name = "" } } // Validate paths. savePath, hashExists := getSavePath(hash) nameExists, nameRight := false, true if name != "" { st2, err := os.Stat(filepath.Join(*verDir, name)) nameExists = err == nil && st2.IsDir() if nameExists { st, _ := os.Stat(savePath) nameRight = os.SameFile(st, st2) } } if flag.Arg(0) == "build" { if hashExists { if !nameRight { log.Fatalf("name `%s' exists and refers to another build", name) } msg := fmt.Sprintf("saved build `%s' already exists", hash) if !nameExists { doLink(hash, name) msg += fmt.Sprintf("; added name `%s'", name) } fmt.Fprintln(os.Stderr, msg) os.Exit(0) } doBuild() } else { if hashExists { log.Fatalf("saved build `%s' already exists", hash) } if nameExists { log.Fatalf("saved build `%s' already exists", name) } } doSave(hash, diff) doLink(hash, name) if name == "" { fmt.Fprintf(os.Stderr, "saved build as `%s'\n", hash) } else { fmt.Fprintf(os.Stderr, "saved build as `%s' and `%s'\n", hash, name) } case "list": if flag.NArg() > 1 { flag.Usage() os.Exit(2) } doList() case "run": if flag.NArg() < 3 { flag.Usage() os.Exit(2) } doRun(flag.Arg(1), flag.Args()[2:]) case "env": if flag.NArg() != 2 { flag.Usage() os.Exit(2) } doEnv(flag.Arg(1)) case "gc": if flag.NArg() > 1 { flag.Usage() os.Exit(2) } doGC() default: if flag.NArg() < 2 { flag.Usage() os.Exit(2) } if _, ok := getSavePath(flag.Arg(0)); !ok { log.Fatalf("unknown name or subcommand `%s'", flag.Arg(0)) } doRun(flag.Arg(0), append([]string{"go"}, flag.Args()[1:]...)) } } func goroot() string { if *gorootFlag == "" { log.Fatal("not a git repository") } return *gorootFlag } func gitCmd(cmd string, args ...string) string { args = append([]string{"-C", goroot(), cmd}, args...) c := exec.Command("git", args...) c.Stderr = os.Stderr output, err := c.Output() if err != nil { log.Fatalf("error executing git %s: %s", strings.Join(args, " "), err) } return string(output) } func getSavePath(name string) (string, bool) { savePath := filepath.Join(*verDir, name) st, err := os.Stat(savePath) return savePath, err == nil && st.IsDir() } func getHash() (string, []byte) { rev := strings.TrimSpace(string(gitCmd("rev-parse", "--short", "HEAD"))) diff := []byte(gitCmd("diff", "HEAD")) if len(bytes.TrimSpace(diff)) > 0 { diffHash := fmt.Sprintf("%x", sha1.Sum(diff)) return rev + "+" + diffHash[:10], diff } return rev, nil } func doBuild() { c := exec.Command("./make.bash") c.Dir = filepath.Join(goroot(), "src") c.Stdout = os.Stdout c.Stderr = os.Stderr if err := c.Run(); err != nil { log.Fatalf("error executing make.bash: %s", err) os.Exit(1) } } func doSave(hash string, diff []byte) { // Create a minimal GOROOT at $GOROOT/gover/hash. savePath, _ := getSavePath(hash) goos, goarch := runtime.GOOS, runtime.GOARCH if x := os.Getenv("GOOS"); x != "" { goos = x } if x := os.Getenv("GOARCH"); x != "" { goarch = x } osArch := goos + "_" + goarch goroot := goroot() for _, binTool := range binTools { src := filepath.Join(goroot, "bin", binTool) if _, err := os.Stat(src); err == nil { cp(src, filepath.Join(savePath, "bin", binTool)) } } cpR(filepath.Join(goroot, "pkg", osArch), filepath.Join(savePath, "pkg", osArch)) cpR(filepath.Join(goroot, "pkg", "tool", osArch), filepath.Join(savePath, "pkg", "tool", osArch)) cpR(filepath.Join(goroot, "pkg", "include"), filepath.Join(savePath, "pkg", "include")) cpR(filepath.Join(goroot, "src"), filepath.Join(savePath, "src")) if diff != nil { if err := ioutil.WriteFile(filepath.Join(savePath, "diff"), diff, 0666); err != nil { log.Fatal(err) } } // Save commit object. commit := gitCmd("cat-file", "commit", "HEAD") if err := ioutil.WriteFile(filepath.Join(savePath, "commit"), []byte(commit), 0666); err != nil { log.Fatal(err) } } func doLink(hash, name string) { if name != "" && name != hash { savePath, _ := getSavePath(name) err := os.Symlink(hash, savePath) if err != nil { log.Fatal(err) } } } type commit struct { authorDate time.Time topLine string } func parseCommit(obj []byte) commit { out := commit{} lines := strings.Split(string(obj), "\n") for i, line := range lines { if strings.HasPrefix(line, "author ") { fs := strings.Fields(line) secs, err := strconv.ParseInt(fs[len(fs)-2], 10, 64) if err != nil { log.Fatalf("malformed author in commit: %s", err) } out.authorDate = time.Unix(secs, 0) } if len(line) == 0 { out.topLine = lines[i+1] break } } return out } type saveInfo struct { base string names []string commit commit } type saveInfoSorter []*saveInfo func (s saveInfoSorter) Len() int { return len(s) } func (s saveInfoSorter) Less(i, j int) bool { return s[i].commit.authorDate.Before(s[j].commit.authorDate) } func (s saveInfoSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func doList() { files, err := ioutil.ReadDir(*verDir) if os.IsNotExist(err) { return } else if err != nil { log.Fatal(err) } baseMap := make(map[string]*saveInfo) bases := []*saveInfo{} for _, file := range files { if !file.IsDir() || file.Name() == "_dedup" { continue } info := &saveInfo{base: file.Name(), names: []string{}} baseMap[file.Name()] = info bases = append(bases, info) commit, err := ioutil.ReadFile(filepath.Join(*verDir, file.Name(), "commit")) if os.IsNotExist(err) { continue } info.commit = parseCommit(commit) } for _, file := range files { if file.Mode()&os.ModeType == os.ModeSymlink { base, err := os.Readlink(filepath.Join(*verDir, file.Name())) if err != nil { continue } if info, ok := baseMap[base]; ok { info.names = append(info.names, file.Name()) } } } sort.Sort(saveInfoSorter(bases)) for _, info := range bases { fmt.Print(info.base) if !info.commit.authorDate.IsZero() { fmt.Printf(" %s", info.commit.authorDate.Local().Format("2006-01-02T15:04:05")) } if len(info.names) > 0 { fmt.Printf(" %s", info.names) } if info.commit.topLine != "" { fmt.Printf(" %s", info.commit.topLine) } fmt.Println() } } func doRun(name string, cmd []string) { savePath, ok := resolveName(name) if !ok { log.Fatalf("unknown name `%s'", name) } goroot, path := getEnv(savePath) // exec.Command looks up the command in this process' PATH. // Unfortunately, this is a rather complex process and there's // no way to provide a different PATH, so set the process' // PATH. os.Setenv("PATH", path) c := exec.Command(cmd[0], cmd[1:]...) // Build the rest of the command environment. for _, env := range os.Environ() { if strings.HasPrefix(env, "GOROOT=") { continue } c.Env = append(c.Env, env) } c.Env = append(c.Env, "GOROOT="+goroot) // Run command. c.Stdin, c.Stdout, c.Stderr = os.Stdin, os.Stdout, os.Stderr if err := c.Run(); err != nil { fmt.Printf("command failed: %s\n", err) os.Exit(1) } } func doEnv(name string) { savePath, ok := getSavePath(name) if !ok { log.Fatalf("unknown name `%s'", name) } goroot, path := getEnv(savePath) fmt.Printf("PATH=%s;\n", shellEscape(path)) fmt.Printf("GOROOT=%s;\n", shellEscape(goroot)) fmt.Printf("export GOROOT;\n") } // getEnv returns the GOROOT and PATH for the Go tree rooted at savePath. func getEnv(savePath string) (goroot, path string) { p := []string{filepath.Join(savePath, "bin")} // Strip existing Go tree from PATH. for _, dir := range filepath.SplitList(os.Getenv("PATH")) { if isGoroot(filepath.Join(dir, "..")) { continue } p = append(p, dir) } return savePath, strings.Join(p, string(filepath.ListSeparator)) } var goodDedupPath = regexp.MustCompile("/[0-9a-f]{2}/[0-9a-f]{38}$") func doGC() { removed := 0 filepath.Walk(filepath.Join(*verDir, "_dedup"), func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } if st, err := os.Stat(path); err == nil { st, ok := st.Sys().(*syscall.Stat_t) if !ok || st.Nlink != 1 { return nil } if !goodDedupPath.MatchString(path) { // Be paranoid about removing files. log.Printf("unexpected file in dedup cache: %s\n", path) return nil } if err := os.Remove(path); err != nil { log.Printf("failed to remove %s: %v", path, err) } else { removed++ } } return nil }) fmt.Printf("removed %d unused file(s)\n", removed) } func cp(src, dst string) { data, err := ioutil.ReadFile(src) if err != nil { log.Fatal(err) } writeFile, xdst := true, dst if !*noDedup { hash := fmt.Sprintf("%x", sha1.Sum(data)) xdst = filepath.Join(*verDir, "_dedup", hash[:2], hash[2:]) if _, err := os.Stat(xdst); err == nil { writeFile = false } } if writeFile { if *verbose { fmt.Printf("cp %s %s\n", src, xdst) } st, err := os.Stat(src) if err != nil { log.Fatal(err) } if err := os.MkdirAll(filepath.Dir(xdst), 0777); err != nil { log.Fatal(err) } if err := ioutil.WriteFile(xdst, data, st.Mode()); err != nil { log.Fatal(err) } if err := os.Chtimes(xdst, st.ModTime(), st.ModTime()); err != nil { log.Fatal(err) } } if dst != xdst { if *verbose { fmt.Printf("ln %s %s\n", xdst, dst) } if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { log.Fatal(err) } if err := os.Link(xdst, dst); err != nil { log.Fatal(err) } } } func cpR(src, dst string) { filepath.Walk(src, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } base := filepath.Base(path) if base == "core" || strings.HasSuffix(base, ".test") { return nil } cp(path, dst+path[len(src):]) return nil }) }
package graph add structs for graph package graph type Graph struct { vertices []int edges []Edge } type Edge struct { v1, v2 *int; }
// Copyright 2014 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. See the AUTHORS file // for names of contributors. // // Author: Jiang-Ming Yang (jiangming.yang@gmail.com) package engine import ( "bytes" "math" "testing" gogoproto "code.google.com/p/gogoprotobuf/proto" "github.com/cockroachdb/cockroach/proto" "github.com/cockroachdb/cockroach/util/encoding" ) // Constants for system-reserved keys in the KV map. var ( testKey01 = Key("/db1") testKey02 = Key("/db2") testKey03 = Key("/db3") testKey04 = Key("/db4") txn01 = []byte("Txn01") txn02 = []byte("Txn02") value01 = proto.Value{Bytes: []byte("testValue01")} value02 = proto.Value{Bytes: []byte("testValue02")} value03 = proto.Value{Bytes: []byte("testValue03")} value04 = proto.Value{Bytes: []byte("testValue04")} valueEmpty = proto.Value{} ) // createTestMVCC creates a new MVCC instance with the given engine. func createTestMVCC(t *testing.T) *MVCC { return &MVCC{ engine: NewInMem(proto.Attributes{}, 1<<20), } } // makeTS creates a new hybrid logical timestamp. func makeTS(nanos int64, logical int32) proto.Timestamp { return proto.Timestamp{ WallTime: nanos, Logical: logical, } } func TestMVCCGetNotExist(t *testing.T) { mvcc := createTestMVCC(t) value, err := mvcc.Get(testKey01, makeTS(0, 0), nil) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } } func TestMVCCPutWithBadValue(t *testing.T) { mvcc := createTestMVCC(t) badValue := proto.Value{Bytes: []byte("a"), Integer: gogoproto.Int64(1)} err := mvcc.Put(testKey01, makeTS(0, 0), badValue, nil) if err == nil { t.Fatal("expected an error putting a value with both byte slice and integer components") } } func TestMVCCPutWithTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), txn01) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCPutWithoutTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, nil) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCUpdateExistingKey(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, nil) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } err = mvcc.Put(testKey01, makeTS(2, 0), value02, nil) if err != nil { t.Fatal(err) } // Read the latest version. value, err = mvcc.Get(testKey01, makeTS(3, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value02.Bytes, value.Bytes) } // Read the old version. value, err = mvcc.Get(testKey01, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCUpdateExistingKeyOldVersion(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 1), value01, nil) if err != nil { t.Fatal(err) } // Earlier walltime. err = mvcc.Put(testKey01, makeTS(0, 0), value02, nil) if err == nil { t.Fatal("expected error on old version") } // Earlier logical time. err = mvcc.Put(testKey01, makeTS(1, 0), value02, nil) if err == nil { t.Fatal("expected error on old version") } } func TestMVCCUpdateExistingKeyInTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } err = mvcc.Put(testKey01, makeTS(1, 0), value01, txn01) if err != nil { t.Fatal(err) } } func TestMVCCUpdateExistingKeyDiffTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } err = mvcc.Put(testKey01, makeTS(1, 0), value02, txn02) if err == nil { t.Fatal("expected error on uncommitted write intent") } } func TestMVCCGetNoMoreOldVersion(t *testing.T) { // Need to handle the case here where the scan takes us to the // next key, which may not match the key we're looking for. In // other words, if we're looking for a<T=2>, and we have the // following keys: // // a: MVCCMetadata(a) // a<T=3> // b: MVCCMetadata(b) // b<T=1> // // If we search for a<T=2>, the scan should not return "b". mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(3, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) value, err := mvcc.Get(testKey01, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } } func TestMVCCGetAndDelete(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) value, err := mvcc.Get(testKey01, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } err = mvcc.Delete(testKey01, makeTS(3, 0), nil) if err != nil { t.Fatal(err) } // Read the latest version which should be deleted. value, err = mvcc.Get(testKey01, makeTS(4, 0), nil) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } // Read the old version which should still exist. for _, logical := range []int32{0, math.MaxInt32} { value, err = mvcc.Get(testKey01, makeTS(2, logical), nil) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } } } func TestMVCCGetAndDeleteInTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, txn01) value, err := mvcc.Get(testKey01, makeTS(2, 0), txn01) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } err = mvcc.Delete(testKey01, makeTS(3, 0), txn01) if err != nil { t.Fatal(err) } // Read the latest version which should be deleted. value, err = mvcc.Get(testKey01, makeTS(4, 0), txn01) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } // Read the old version which should still exist. value, err = mvcc.Get(testKey01, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } } func TestMVCCGetWriteIntentError(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } _, err = mvcc.Get(testKey01, makeTS(1, 0), nil) if err == nil { t.Fatal("cannot read the value of a write intent without TxnID") } _, err = mvcc.Get(testKey01, makeTS(1, 0), txn02) if err == nil { t.Fatal("cannot read the value of a write intent from a different TxnID") } } func TestMVCCScan(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey01, makeTS(2, 0), value04, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey02, makeTS(3, 0), value03, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, nil) err = mvcc.Put(testKey03, makeTS(4, 0), value02, nil) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) err = mvcc.Put(testKey04, makeTS(5, 0), value01, nil) kvs, _, err := mvcc.Scan(testKey02, testKey04, 0, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[1].Key, testKey03) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value03.Bytes) { t.Fatal("the value should not be empty") } kvs, _, err = mvcc.Scan(testKey02, testKey04, 0, makeTS(4, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[1].Key, testKey03) || !bytes.Equal(kvs[0].Value.Bytes, value03.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value02.Bytes) { t.Fatal("the value should not be empty") } kvs, _, err = mvcc.Scan(testKey04, KeyMax, 0, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey04) || !bytes.Equal(kvs[0].Value.Bytes, value04.Bytes) { t.Fatal("the value should not be empty") } _, err = mvcc.Get(testKey01, makeTS(1, 0), txn02) kvs, _, err = mvcc.Scan(KeyMin, testKey02, 0, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey01) || !bytes.Equal(kvs[0].Value.Bytes, value01.Bytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanMaxNum(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, nil) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) kvs, _, err := mvcc.Scan(testKey02, testKey04, 1, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanWithKeyPrefix(t *testing.T) { mvcc := createTestMVCC(t) // Let's say you have: // a // a<T=2> // a<T=1> // aa // aa<T=3> // aa<T=2> // b // b<T=5> // In this case, if we scan from "a"-"b", we wish to skip // a<T=2> and a<T=1> and find "aa'. err := mvcc.Put(Key(encoding.EncodeString([]byte{}, "/a")), makeTS(1, 0), value01, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/a")), makeTS(2, 0), value02, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/aa")), makeTS(2, 0), value02, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/aa")), makeTS(3, 0), value03, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/b")), makeTS(1, 0), value03, nil) kvs, _, err := mvcc.Scan(Key(encoding.EncodeString([]byte{}, "/a")), Key(encoding.EncodeString([]byte{}, "/b")), 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, Key(encoding.EncodeString([]byte{}, "/a"))) || !bytes.Equal(kvs[1].Key, Key(encoding.EncodeString([]byte{}, "/aa"))) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value02.Bytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanInTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, txn01) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) kvs, _, err := mvcc.Scan(testKey02, testKey04, 0, makeTS(1, 0), txn01) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[1].Key, testKey03) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value03.Bytes) { t.Fatal("the value should not be empty") } kvs, _, err = mvcc.Scan(testKey02, testKey04, 0, makeTS(1, 0), nil) if err == nil { t.Fatal("expected error on uncommitted write intent") } } func TestMVCCDeleteRange(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, nil) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) num, err := mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if num != 2 { t.Fatal("the value should not be empty") } kvs, _, _ := mvcc.Scan(KeyMin, KeyMax, 0, makeTS(2, 0), nil) if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey01) || !bytes.Equal(kvs[1].Key, testKey04) || !bytes.Equal(kvs[0].Value.Bytes, value01.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value04.Bytes) { t.Fatal("the value should not be empty") } num, err = mvcc.DeleteRange(testKey04, KeyMax, 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if num != 1 { t.Fatal("the value should not be empty") } kvs, _, _ = mvcc.Scan(KeyMin, KeyMax, 0, makeTS(2, 0), nil) if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey01) || !bytes.Equal(kvs[0].Value.Bytes, value01.Bytes) { t.Fatal("the value should not be empty") } num, err = mvcc.DeleteRange(KeyMin, testKey02, 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if num != 1 { t.Fatal("the value should not be empty") } kvs, _, _ = mvcc.Scan(KeyMin, KeyMax, 0, makeTS(2, 0), nil) if len(kvs) != 0 { t.Fatal("the value should be empty") } } func TestMVCCDeleteRangeFailed(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, txn01) err = mvcc.Put(testKey03, makeTS(1, 0), value03, txn01) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) _, err = mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(1, 0), nil) if err == nil { t.Fatal("expected error on uncommitted write intent") } _, err = mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(1, 0), txn01) if err != nil { t.Fatal(err) } } func TestMVCCDeleteRangeConcurrentTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, txn01) err = mvcc.Put(testKey03, makeTS(2, 0), value03, txn02) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) _, err = mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(1, 0), txn01) if err == nil { t.Fatal("expected error on uncommitted write intent") } } func TestMVCCConditionalPut(t *testing.T) { mvcc := createTestMVCC(t) actualVal, err := mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, &value02, nil) if err == nil { t.Fatal("expected error on key not exists") } if actualVal != nil { t.Fatalf("expected missing actual value: %v", actualVal) } // Verify the difference between missing value and empty value. actualVal, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, &valueEmpty, nil) if err == nil { t.Fatal("expected error on key not exists") } if actualVal != nil { t.Fatalf("expected missing actual value: %v", actualVal) } // Do a conditional put with expectation that the value is completely missing; will succeed. _, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, nil, nil) if err != nil { t.Fatalf("expected success with condition that key doesn't yet exist: %v", err) } // Another conditional put expecting value missing will fail, now that value01 is written. actualVal, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, nil, nil) if err == nil { t.Fatal("expected error on key already exists") } if !bytes.Equal(actualVal.Bytes, value01.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", actualVal.Bytes, value01.Bytes) } // Conditional put expecting wrong value02, will fail. actualVal, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, &value02, nil) if err == nil { t.Fatal("expected error on key does not match") } if !bytes.Equal(actualVal.Bytes, value01.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", actualVal.Bytes, value01.Bytes) } // Move to a empty value. Will succeed. _, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), valueEmpty, &value01, nil) if err != nil { t.Fatal(err) } // Now move to value02 from expected empty value. _, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value02, &valueEmpty, nil) if err != nil { t.Fatal(err) } // Verify we get value02 as expected. value, err := mvcc.Get(testKey01, makeTS(0, 0), nil) if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCResolveTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) value, err := mvcc.Get(testKey01, makeTS(1, 0), txn01) if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } err = mvcc.ResolveWriteIntent(testKey01, txn01, true) if err != nil { t.Fatal(err) } value, err = mvcc.Get(testKey01, makeTS(1, 0), nil) if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCAbortTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) err = mvcc.ResolveWriteIntent(testKey01, txn01, false) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if value != nil { t.Fatalf("the value should be empty") } meta, err := mvcc.engine.Get(encoding.EncodeBinary(nil, testKey01)) if err != nil { t.Fatal(err) } if len(meta) != 0 { t.Fatalf("expected no more MVCCMetadata") } } func TestMVCCAbortTxnWithPreviousVersion(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, nil) err = mvcc.Put(testKey01, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey01, makeTS(2, 0), value03, txn01) err = mvcc.ResolveWriteIntent(testKey01, txn01, false) meta, err := mvcc.engine.Get(encoding.EncodeBinary(nil, testKey01)) if err != nil { t.Fatal(err) } if len(meta) == 0 { t.Fatalf("expected the MVCCMetadata") } value, err := mvcc.Get(testKey01, makeTS(3, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value.Bytes, value02.Bytes) } } func TestMVCCResolveTxnFailure(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.ResolveWriteIntent(testKey01, txn01, true) if err == nil { t.Fatal("expected error on key not exist") } err = mvcc.Put(testKey01, makeTS(0, 0), value01, nil) err = mvcc.ResolveWriteIntent(testKey01, txn02, true) if err == nil { t.Fatal("expected error on write intent not exist") } err = mvcc.Put(testKey01, makeTS(1, 0), value02, txn01) err = mvcc.ResolveWriteIntent(testKey01, txn02, true) if err == nil { t.Fatal("expected error due to other txn") } } func TestMVCCResolveTxnRange(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) err = mvcc.Put(testKey02, makeTS(0, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(0, 0), value03, txn02) err = mvcc.Put(testKey04, makeTS(0, 0), value04, txn01) num, err := mvcc.ResolveWriteIntentRange(testKey01, testKey04, 0, txn01, true) if err != nil { t.Fatal(err) } if num != 1 { t.Fatal("expected only one key to be committed") } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } value, err = mvcc.Get(testKey02, makeTS(1, 0), nil) if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value02.Bytes, value.Bytes) } value, err = mvcc.Get(testKey03, makeTS(1, 0), txn02) if !bytes.Equal(value03.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value03.Bytes, value.Bytes) } value, err = mvcc.Get(testKey04, makeTS(1, 0), txn01) if !bytes.Equal(value04.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value04.Bytes, value.Bytes) } } Added a test to verify sort ordering for binary encoded MVCC metadata keys and versioned value keys. // Copyright 2014 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. See the AUTHORS file // for names of contributors. // // Author: Jiang-Ming Yang (jiangming.yang@gmail.com) package engine import ( "bytes" "math" "reflect" "sort" "testing" gogoproto "code.google.com/p/gogoprotobuf/proto" "github.com/cockroachdb/cockroach/proto" "github.com/cockroachdb/cockroach/util/encoding" ) // Constants for system-reserved keys in the KV map. var ( testKey01 = Key("/db1") testKey02 = Key("/db2") testKey03 = Key("/db3") testKey04 = Key("/db4") txn01 = []byte("Txn01") txn02 = []byte("Txn02") value01 = proto.Value{Bytes: []byte("testValue01")} value02 = proto.Value{Bytes: []byte("testValue02")} value03 = proto.Value{Bytes: []byte("testValue03")} value04 = proto.Value{Bytes: []byte("testValue04")} valueEmpty = proto.Value{} ) // createTestMVCC creates a new MVCC instance with the given engine. func createTestMVCC(t *testing.T) *MVCC { return &MVCC{ engine: NewInMem(proto.Attributes{}, 1<<20), } } // makeTS creates a new hybrid logical timestamp. func makeTS(nanos int64, logical int32) proto.Timestamp { return proto.Timestamp{ WallTime: nanos, Logical: logical, } } // Verify the sort ordering of successive keys with metadata and // versioned values. In particular, the following sequence of keys / // versions: // // a // a<t=1> // a<t=0> // a\x00 // a\x00<t=1> // a\x00<t=0> func TestMVCCKeys(t *testing.T) { aBinKey := encoding.EncodeBinary(nil, []byte("a")) a0BinKey := encoding.EncodeBinary(nil, []byte("a\x00")) keys := []string{ string(aBinKey), string(mvccEncodeKey(aBinKey, makeTS(1, 0))), string(mvccEncodeKey(aBinKey, makeTS(0, 0))), string(a0BinKey), string(mvccEncodeKey(a0BinKey, makeTS(1, 0))), string(mvccEncodeKey(a0BinKey, makeTS(0, 0))), } sortKeys := make([]string, len(keys)) copy(sortKeys, keys) sort.Strings(sortKeys) if !reflect.DeepEqual(sortKeys, keys) { t.Error("expected keys to sort in order %s, but got %s", keys, sortKeys) } } func TestMVCCGetNotExist(t *testing.T) { mvcc := createTestMVCC(t) value, err := mvcc.Get(testKey01, makeTS(0, 0), nil) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } } func TestMVCCPutWithBadValue(t *testing.T) { mvcc := createTestMVCC(t) badValue := proto.Value{Bytes: []byte("a"), Integer: gogoproto.Int64(1)} err := mvcc.Put(testKey01, makeTS(0, 0), badValue, nil) if err == nil { t.Fatal("expected an error putting a value with both byte slice and integer components") } } func TestMVCCPutWithTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), txn01) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCPutWithoutTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, nil) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCUpdateExistingKey(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, nil) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } err = mvcc.Put(testKey01, makeTS(2, 0), value02, nil) if err != nil { t.Fatal(err) } // Read the latest version. value, err = mvcc.Get(testKey01, makeTS(3, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value02.Bytes, value.Bytes) } // Read the old version. value, err = mvcc.Get(testKey01, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCUpdateExistingKeyOldVersion(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 1), value01, nil) if err != nil { t.Fatal(err) } // Earlier walltime. err = mvcc.Put(testKey01, makeTS(0, 0), value02, nil) if err == nil { t.Fatal("expected error on old version") } // Earlier logical time. err = mvcc.Put(testKey01, makeTS(1, 0), value02, nil) if err == nil { t.Fatal("expected error on old version") } } func TestMVCCUpdateExistingKeyInTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } err = mvcc.Put(testKey01, makeTS(1, 0), value01, txn01) if err != nil { t.Fatal(err) } } func TestMVCCUpdateExistingKeyDiffTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } err = mvcc.Put(testKey01, makeTS(1, 0), value02, txn02) if err == nil { t.Fatal("expected error on uncommitted write intent") } } func TestMVCCGetNoMoreOldVersion(t *testing.T) { // Need to handle the case here where the scan takes us to the // next key, which may not match the key we're looking for. In // other words, if we're looking for a<T=2>, and we have the // following keys: // // a: MVCCMetadata(a) // a<T=3> // b: MVCCMetadata(b) // b<T=1> // // If we search for a<T=2>, the scan should not return "b". mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(3, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) value, err := mvcc.Get(testKey01, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } } func TestMVCCGetAndDelete(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) value, err := mvcc.Get(testKey01, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } err = mvcc.Delete(testKey01, makeTS(3, 0), nil) if err != nil { t.Fatal(err) } // Read the latest version which should be deleted. value, err = mvcc.Get(testKey01, makeTS(4, 0), nil) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } // Read the old version which should still exist. for _, logical := range []int32{0, math.MaxInt32} { value, err = mvcc.Get(testKey01, makeTS(2, logical), nil) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } } } func TestMVCCGetAndDeleteInTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, txn01) value, err := mvcc.Get(testKey01, makeTS(2, 0), txn01) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } err = mvcc.Delete(testKey01, makeTS(3, 0), txn01) if err != nil { t.Fatal(err) } // Read the latest version which should be deleted. value, err = mvcc.Get(testKey01, makeTS(4, 0), txn01) if err != nil { t.Fatal(err) } if value != nil { t.Fatal("the value should be empty") } // Read the old version which should still exist. value, err = mvcc.Get(testKey01, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if value == nil { t.Fatal("the value should not be empty") } } func TestMVCCGetWriteIntentError(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) if err != nil { t.Fatal(err) } _, err = mvcc.Get(testKey01, makeTS(1, 0), nil) if err == nil { t.Fatal("cannot read the value of a write intent without TxnID") } _, err = mvcc.Get(testKey01, makeTS(1, 0), txn02) if err == nil { t.Fatal("cannot read the value of a write intent from a different TxnID") } } func TestMVCCScan(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey01, makeTS(2, 0), value04, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey02, makeTS(3, 0), value03, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, nil) err = mvcc.Put(testKey03, makeTS(4, 0), value02, nil) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) err = mvcc.Put(testKey04, makeTS(5, 0), value01, nil) kvs, _, err := mvcc.Scan(testKey02, testKey04, 0, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[1].Key, testKey03) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value03.Bytes) { t.Fatal("the value should not be empty") } kvs, _, err = mvcc.Scan(testKey02, testKey04, 0, makeTS(4, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[1].Key, testKey03) || !bytes.Equal(kvs[0].Value.Bytes, value03.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value02.Bytes) { t.Fatal("the value should not be empty") } kvs, _, err = mvcc.Scan(testKey04, KeyMax, 0, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey04) || !bytes.Equal(kvs[0].Value.Bytes, value04.Bytes) { t.Fatal("the value should not be empty") } _, err = mvcc.Get(testKey01, makeTS(1, 0), txn02) kvs, _, err = mvcc.Scan(KeyMin, testKey02, 0, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey01) || !bytes.Equal(kvs[0].Value.Bytes, value01.Bytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanMaxNum(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, nil) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) kvs, _, err := mvcc.Scan(testKey02, testKey04, 1, makeTS(1, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanWithKeyPrefix(t *testing.T) { mvcc := createTestMVCC(t) // Let's say you have: // a // a<T=2> // a<T=1> // aa // aa<T=3> // aa<T=2> // b // b<T=5> // In this case, if we scan from "a"-"b", we wish to skip // a<T=2> and a<T=1> and find "aa'. err := mvcc.Put(Key(encoding.EncodeString([]byte{}, "/a")), makeTS(1, 0), value01, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/a")), makeTS(2, 0), value02, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/aa")), makeTS(2, 0), value02, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/aa")), makeTS(3, 0), value03, nil) err = mvcc.Put(Key(encoding.EncodeString([]byte{}, "/b")), makeTS(1, 0), value03, nil) kvs, _, err := mvcc.Scan(Key(encoding.EncodeString([]byte{}, "/a")), Key(encoding.EncodeString([]byte{}, "/b")), 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, Key(encoding.EncodeString([]byte{}, "/a"))) || !bytes.Equal(kvs[1].Key, Key(encoding.EncodeString([]byte{}, "/aa"))) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value02.Bytes) { t.Fatal("the value should not be empty") } } func TestMVCCScanInTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, txn01) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) kvs, _, err := mvcc.Scan(testKey02, testKey04, 0, makeTS(1, 0), txn01) if err != nil { t.Fatal(err) } if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey02) || !bytes.Equal(kvs[1].Key, testKey03) || !bytes.Equal(kvs[0].Value.Bytes, value02.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value03.Bytes) { t.Fatal("the value should not be empty") } kvs, _, err = mvcc.Scan(testKey02, testKey04, 0, makeTS(1, 0), nil) if err == nil { t.Fatal("expected error on uncommitted write intent") } } func TestMVCCDeleteRange(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(1, 0), value03, nil) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) num, err := mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if num != 2 { t.Fatal("the value should not be empty") } kvs, _, _ := mvcc.Scan(KeyMin, KeyMax, 0, makeTS(2, 0), nil) if len(kvs) != 2 || !bytes.Equal(kvs[0].Key, testKey01) || !bytes.Equal(kvs[1].Key, testKey04) || !bytes.Equal(kvs[0].Value.Bytes, value01.Bytes) || !bytes.Equal(kvs[1].Value.Bytes, value04.Bytes) { t.Fatal("the value should not be empty") } num, err = mvcc.DeleteRange(testKey04, KeyMax, 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if num != 1 { t.Fatal("the value should not be empty") } kvs, _, _ = mvcc.Scan(KeyMin, KeyMax, 0, makeTS(2, 0), nil) if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey01) || !bytes.Equal(kvs[0].Value.Bytes, value01.Bytes) { t.Fatal("the value should not be empty") } num, err = mvcc.DeleteRange(KeyMin, testKey02, 0, makeTS(2, 0), nil) if err != nil { t.Fatal(err) } if num != 1 { t.Fatal("the value should not be empty") } kvs, _, _ = mvcc.Scan(KeyMin, KeyMax, 0, makeTS(2, 0), nil) if len(kvs) != 0 { t.Fatal("the value should be empty") } } func TestMVCCDeleteRangeFailed(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, txn01) err = mvcc.Put(testKey03, makeTS(1, 0), value03, txn01) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) _, err = mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(1, 0), nil) if err == nil { t.Fatal("expected error on uncommitted write intent") } _, err = mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(1, 0), txn01) if err != nil { t.Fatal(err) } } func TestMVCCDeleteRangeConcurrentTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(1, 0), value01, nil) err = mvcc.Put(testKey02, makeTS(1, 0), value02, txn01) err = mvcc.Put(testKey03, makeTS(2, 0), value03, txn02) err = mvcc.Put(testKey04, makeTS(1, 0), value04, nil) _, err = mvcc.DeleteRange(testKey02, testKey04, 0, makeTS(1, 0), txn01) if err == nil { t.Fatal("expected error on uncommitted write intent") } } func TestMVCCConditionalPut(t *testing.T) { mvcc := createTestMVCC(t) actualVal, err := mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, &value02, nil) if err == nil { t.Fatal("expected error on key not exists") } if actualVal != nil { t.Fatalf("expected missing actual value: %v", actualVal) } // Verify the difference between missing value and empty value. actualVal, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, &valueEmpty, nil) if err == nil { t.Fatal("expected error on key not exists") } if actualVal != nil { t.Fatalf("expected missing actual value: %v", actualVal) } // Do a conditional put with expectation that the value is completely missing; will succeed. _, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, nil, nil) if err != nil { t.Fatalf("expected success with condition that key doesn't yet exist: %v", err) } // Another conditional put expecting value missing will fail, now that value01 is written. actualVal, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, nil, nil) if err == nil { t.Fatal("expected error on key already exists") } if !bytes.Equal(actualVal.Bytes, value01.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", actualVal.Bytes, value01.Bytes) } // Conditional put expecting wrong value02, will fail. actualVal, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value01, &value02, nil) if err == nil { t.Fatal("expected error on key does not match") } if !bytes.Equal(actualVal.Bytes, value01.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", actualVal.Bytes, value01.Bytes) } // Move to a empty value. Will succeed. _, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), valueEmpty, &value01, nil) if err != nil { t.Fatal(err) } // Now move to value02 from expected empty value. _, err = mvcc.ConditionalPut(testKey01, makeTS(0, 0), value02, &valueEmpty, nil) if err != nil { t.Fatal(err) } // Verify we get value02 as expected. value, err := mvcc.Get(testKey01, makeTS(0, 0), nil) if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCResolveTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) value, err := mvcc.Get(testKey01, makeTS(1, 0), txn01) if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } err = mvcc.ResolveWriteIntent(testKey01, txn01, true) if err != nil { t.Fatal(err) } value, err = mvcc.Get(testKey01, makeTS(1, 0), nil) if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } } func TestMVCCAbortTxn(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) err = mvcc.ResolveWriteIntent(testKey01, txn01, false) if err != nil { t.Fatal(err) } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if value != nil { t.Fatalf("the value should be empty") } meta, err := mvcc.engine.Get(encoding.EncodeBinary(nil, testKey01)) if err != nil { t.Fatal(err) } if len(meta) != 0 { t.Fatalf("expected no more MVCCMetadata") } } func TestMVCCAbortTxnWithPreviousVersion(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, nil) err = mvcc.Put(testKey01, makeTS(1, 0), value02, nil) err = mvcc.Put(testKey01, makeTS(2, 0), value03, txn01) err = mvcc.ResolveWriteIntent(testKey01, txn01, false) meta, err := mvcc.engine.Get(encoding.EncodeBinary(nil, testKey01)) if err != nil { t.Fatal(err) } if len(meta) == 0 { t.Fatalf("expected the MVCCMetadata") } value, err := mvcc.Get(testKey01, makeTS(3, 0), nil) if err != nil { t.Fatal(err) } if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value.Bytes, value02.Bytes) } } func TestMVCCResolveTxnFailure(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.ResolveWriteIntent(testKey01, txn01, true) if err == nil { t.Fatal("expected error on key not exist") } err = mvcc.Put(testKey01, makeTS(0, 0), value01, nil) err = mvcc.ResolveWriteIntent(testKey01, txn02, true) if err == nil { t.Fatal("expected error on write intent not exist") } err = mvcc.Put(testKey01, makeTS(1, 0), value02, txn01) err = mvcc.ResolveWriteIntent(testKey01, txn02, true) if err == nil { t.Fatal("expected error due to other txn") } } func TestMVCCResolveTxnRange(t *testing.T) { mvcc := createTestMVCC(t) err := mvcc.Put(testKey01, makeTS(0, 0), value01, txn01) err = mvcc.Put(testKey02, makeTS(0, 0), value02, nil) err = mvcc.Put(testKey03, makeTS(0, 0), value03, txn02) err = mvcc.Put(testKey04, makeTS(0, 0), value04, txn01) num, err := mvcc.ResolveWriteIntentRange(testKey01, testKey04, 0, txn01, true) if err != nil { t.Fatal(err) } if num != 1 { t.Fatal("expected only one key to be committed") } value, err := mvcc.Get(testKey01, makeTS(1, 0), nil) if !bytes.Equal(value01.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value01.Bytes, value.Bytes) } value, err = mvcc.Get(testKey02, makeTS(1, 0), nil) if !bytes.Equal(value02.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value02.Bytes, value.Bytes) } value, err = mvcc.Get(testKey03, makeTS(1, 0), txn02) if !bytes.Equal(value03.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value03.Bytes, value.Bytes) } value, err = mvcc.Get(testKey04, makeTS(1, 0), txn01) if !bytes.Equal(value04.Bytes, value.Bytes) { t.Fatalf("the value %s in get result does not match the value %s in request", value04.Bytes, value.Bytes) } }
package kubernetes import ( "strings" "time" jose "gopkg.in/square/go-jose.v2" "github.com/dexidp/dex/storage" "github.com/dexidp/dex/storage/kubernetes/k8sapi" ) var crdMeta = k8sapi.TypeMeta{ APIVersion: "apiextensions.k8s.io/v1beta1", Kind: "CustomResourceDefinition", } const apiGroup = "dex.coreos.com" // The set of custom resource definitions required by the storage. These are managed by // the storage so it can migrate itself by creating new resources. var customResourceDefinitions = []k8sapi.CustomResourceDefinition{ { ObjectMeta: k8sapi.ObjectMeta{ Name: "authcodes.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "authcodes", Singular: "authcode", Kind: "AuthCode", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "authrequests.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "authrequests", Singular: "authrequest", Kind: "AuthRequest", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "oauth2clients.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "oauth2clients", Singular: "oauth2client", Kind: "OAuth2Client", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "signingkeies.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ // `signingkeies` is an artifact from the old TPR pluralization. // Users don't directly interact with this value, hence leaving it // as is. Plural: "signingkeies", Singular: "signingkey", Kind: "SigningKey", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "refreshtokens.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "refreshtokens", Singular: "refreshtoken", Kind: "RefreshToken", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "passwords.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "passwords", Singular: "password", Kind: "Password", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "offlinesessionses.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "offlinesessionses", Singular: "offlinesessions", Kind: "OfflineSessions", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "connectors.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "connectors", Singular: "connector", Kind: "Connector", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "devicerequests.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "devicerequests", Singular: "devicerequest", Kind: "DeviceRequest", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "devicetokens.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "devicetokens", Singular: "devicetoken", Kind: "DeviceToken", }, }, }, } // There will only ever be a single keys resource. Maintain this by setting a // common name. const keysName = "openid-connect-keys" // Client is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type Client struct { // Name is a hash of the ID. k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` // ID is immutable, since it's a primary key and should not be changed. ID string `json:"id,omitempty"` Secret string `json:"secret,omitempty"` RedirectURIs []string `json:"redirectURIs,omitempty"` TrustedPeers []string `json:"trustedPeers,omitempty"` Public bool `json:"public"` Name string `json:"name,omitempty"` LogoURL string `json:"logoURL,omitempty"` } // ClientList is a list of Clients. type ClientList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` Clients []Client `json:"items"` } func (cli *client) fromStorageClient(c storage.Client) Client { return Client{ TypeMeta: k8sapi.TypeMeta{ Kind: kindClient, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: cli.idToName(c.ID), Namespace: cli.namespace, }, ID: c.ID, Secret: c.Secret, RedirectURIs: c.RedirectURIs, TrustedPeers: c.TrustedPeers, Public: c.Public, Name: c.Name, LogoURL: c.LogoURL, } } func toStorageClient(c Client) storage.Client { return storage.Client{ ID: c.ID, Secret: c.Secret, RedirectURIs: c.RedirectURIs, TrustedPeers: c.TrustedPeers, Public: c.Public, Name: c.Name, LogoURL: c.LogoURL, } } // Claims is a mirrored struct from storage with JSON struct tags. type Claims struct { UserID string `json:"userID"` Username string `json:"username"` PreferredUsername string `json:"preferredUsername"` Email string `json:"email"` EmailVerified bool `json:"emailVerified"` Groups []string `json:"groups,omitempty"` } func fromStorageClaims(i storage.Claims) Claims { return Claims{ UserID: i.UserID, Username: i.Username, PreferredUsername: i.PreferredUsername, Email: i.Email, EmailVerified: i.EmailVerified, Groups: i.Groups, } } func toStorageClaims(i Claims) storage.Claims { return storage.Claims{ UserID: i.UserID, Username: i.Username, PreferredUsername: i.PreferredUsername, Email: i.Email, EmailVerified: i.EmailVerified, Groups: i.Groups, } } // AuthRequest is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type AuthRequest struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` ClientID string `json:"clientID"` ResponseTypes []string `json:"responseTypes,omitempty"` Scopes []string `json:"scopes,omitempty"` RedirectURI string `json:"redirectURI"` Nonce string `json:"nonce,omitempty"` State string `json:"state,omitempty"` // The client has indicated that the end user must be shown an approval prompt // on all requests. The server cannot cache their initial action for subsequent // attempts. ForceApprovalPrompt bool `json:"forceApprovalPrompt,omitempty"` LoggedIn bool `json:"loggedIn"` // The identity of the end user. Generally nil until the user authenticates // with a backend. Claims Claims `json:"claims,omitempty"` // The connector used to login the user. Set when the user authenticates. ConnectorID string `json:"connectorID,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` Expiry time.Time `json:"expiry"` CodeChallenge string `json:"code_challenge,omitempty"` CodeChallengeMethod string `json:"code_challenge_method,omitempty"` } // AuthRequestList is a list of AuthRequests. type AuthRequestList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` AuthRequests []AuthRequest `json:"items"` } func toStorageAuthRequest(req AuthRequest) storage.AuthRequest { a := storage.AuthRequest{ ID: req.ObjectMeta.Name, ClientID: req.ClientID, ResponseTypes: req.ResponseTypes, Scopes: req.Scopes, RedirectURI: req.RedirectURI, Nonce: req.Nonce, State: req.State, ForceApprovalPrompt: req.ForceApprovalPrompt, LoggedIn: req.LoggedIn, ConnectorID: req.ConnectorID, ConnectorData: req.ConnectorData, Expiry: req.Expiry, Claims: toStorageClaims(req.Claims), PKCE: storage.PKCE{ CodeChallenge: req.CodeChallenge, CodeChallengeMethod: req.CodeChallengeMethod, }, } return a } func (cli *client) fromStorageAuthRequest(a storage.AuthRequest) AuthRequest { req := AuthRequest{ TypeMeta: k8sapi.TypeMeta{ Kind: kindAuthRequest, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: a.ID, Namespace: cli.namespace, }, ClientID: a.ClientID, ResponseTypes: a.ResponseTypes, Scopes: a.Scopes, RedirectURI: a.RedirectURI, Nonce: a.Nonce, State: a.State, LoggedIn: a.LoggedIn, ForceApprovalPrompt: a.ForceApprovalPrompt, ConnectorID: a.ConnectorID, ConnectorData: a.ConnectorData, Expiry: a.Expiry, Claims: fromStorageClaims(a.Claims), CodeChallenge: a.PKCE.CodeChallenge, CodeChallengeMethod: a.PKCE.CodeChallengeMethod, } return req } // Password is a mirrored struct from the stroage with JSON struct tags and // Kubernetes type metadata. type Password struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` // The Kubernetes name is actually an encoded version of this value. // // This field is IMMUTABLE. Do not change. Email string `json:"email,omitempty"` Hash []byte `json:"hash,omitempty"` Username string `json:"username,omitempty"` UserID string `json:"userID,omitempty"` } // PasswordList is a list of Passwords. type PasswordList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` Passwords []Password `json:"items"` } func (cli *client) fromStoragePassword(p storage.Password) Password { email := strings.ToLower(p.Email) return Password{ TypeMeta: k8sapi.TypeMeta{ Kind: kindPassword, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: cli.idToName(email), Namespace: cli.namespace, }, Email: email, Hash: p.Hash, Username: p.Username, UserID: p.UserID, } } func toStoragePassword(p Password) storage.Password { return storage.Password{ Email: p.Email, Hash: p.Hash, Username: p.Username, UserID: p.UserID, } } // AuthCode is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type AuthCode struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` ClientID string `json:"clientID"` Scopes []string `json:"scopes,omitempty"` RedirectURI string `json:"redirectURI"` Nonce string `json:"nonce,omitempty"` State string `json:"state,omitempty"` Claims Claims `json:"claims,omitempty"` ConnectorID string `json:"connectorID,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` Expiry time.Time `json:"expiry"` CodeChallenge string `json:"code_challenge,omitempty"` CodeChallengeMethod string `json:"code_challenge_method,omitempty"` } // AuthCodeList is a list of AuthCodes. type AuthCodeList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` AuthCodes []AuthCode `json:"items"` } func (cli *client) fromStorageAuthCode(a storage.AuthCode) AuthCode { return AuthCode{ TypeMeta: k8sapi.TypeMeta{ Kind: kindAuthCode, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: a.ID, Namespace: cli.namespace, }, ClientID: a.ClientID, RedirectURI: a.RedirectURI, ConnectorID: a.ConnectorID, ConnectorData: a.ConnectorData, Nonce: a.Nonce, Scopes: a.Scopes, Claims: fromStorageClaims(a.Claims), Expiry: a.Expiry, CodeChallenge: a.PKCE.CodeChallenge, CodeChallengeMethod: a.PKCE.CodeChallengeMethod, } } func toStorageAuthCode(a AuthCode) storage.AuthCode { return storage.AuthCode{ ID: a.ObjectMeta.Name, ClientID: a.ClientID, RedirectURI: a.RedirectURI, ConnectorID: a.ConnectorID, ConnectorData: a.ConnectorData, Nonce: a.Nonce, Scopes: a.Scopes, Claims: toStorageClaims(a.Claims), Expiry: a.Expiry, PKCE: storage.PKCE{ CodeChallenge: a.CodeChallenge, CodeChallengeMethod: a.CodeChallengeMethod, }, } } // RefreshToken is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type RefreshToken struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` CreatedAt time.Time LastUsed time.Time ClientID string `json:"clientID"` Scopes []string `json:"scopes,omitempty"` Token string `json:"token,omitempty"` Nonce string `json:"nonce,omitempty"` Claims Claims `json:"claims,omitempty"` ConnectorID string `json:"connectorID,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` } // RefreshList is a list of refresh tokens. type RefreshList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` RefreshTokens []RefreshToken `json:"items"` } func toStorageRefreshToken(r RefreshToken) storage.RefreshToken { return storage.RefreshToken{ ID: r.ObjectMeta.Name, Token: r.Token, CreatedAt: r.CreatedAt, LastUsed: r.LastUsed, ClientID: r.ClientID, ConnectorID: r.ConnectorID, ConnectorData: r.ConnectorData, Scopes: r.Scopes, Nonce: r.Nonce, Claims: toStorageClaims(r.Claims), } } func (cli *client) fromStorageRefreshToken(r storage.RefreshToken) RefreshToken { return RefreshToken{ TypeMeta: k8sapi.TypeMeta{ Kind: kindRefreshToken, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: r.ID, Namespace: cli.namespace, }, Token: r.Token, CreatedAt: r.CreatedAt, LastUsed: r.LastUsed, ClientID: r.ClientID, ConnectorID: r.ConnectorID, ConnectorData: r.ConnectorData, Scopes: r.Scopes, Nonce: r.Nonce, Claims: fromStorageClaims(r.Claims), } } // Keys is a mirrored struct from storage with JSON struct tags and Kubernetes // type metadata. type Keys struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` // Key for creating and verifying signatures. These may be nil. SigningKey *jose.JSONWebKey `json:"signingKey,omitempty"` SigningKeyPub *jose.JSONWebKey `json:"signingKeyPub,omitempty"` // Old signing keys which have been rotated but can still be used to validate // existing signatures. VerificationKeys []storage.VerificationKey `json:"verificationKeys,omitempty"` // The next time the signing key will rotate. // // For caching purposes, implementations MUST NOT update keys before this time. NextRotation time.Time `json:"nextRotation"` } func (cli *client) fromStorageKeys(keys storage.Keys) Keys { return Keys{ TypeMeta: k8sapi.TypeMeta{ Kind: kindKeys, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: keysName, Namespace: cli.namespace, }, SigningKey: keys.SigningKey, SigningKeyPub: keys.SigningKeyPub, VerificationKeys: keys.VerificationKeys, NextRotation: keys.NextRotation, } } func toStorageKeys(keys Keys) storage.Keys { return storage.Keys{ SigningKey: keys.SigningKey, SigningKeyPub: keys.SigningKeyPub, VerificationKeys: keys.VerificationKeys, NextRotation: keys.NextRotation, } } // OfflineSessions is a mirrored struct from storage with JSON struct tags and Kubernetes // type metadata. type OfflineSessions struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` UserID string `json:"userID,omitempty"` ConnID string `json:"connID,omitempty"` Refresh map[string]*storage.RefreshTokenRef `json:"refresh,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` } func (cli *client) fromStorageOfflineSessions(o storage.OfflineSessions) OfflineSessions { return OfflineSessions{ TypeMeta: k8sapi.TypeMeta{ Kind: kindOfflineSessions, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: cli.offlineTokenName(o.UserID, o.ConnID), Namespace: cli.namespace, }, UserID: o.UserID, ConnID: o.ConnID, Refresh: o.Refresh, ConnectorData: o.ConnectorData, } } func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { s := storage.OfflineSessions{ UserID: o.UserID, ConnID: o.ConnID, Refresh: o.Refresh, ConnectorData: o.ConnectorData, } if s.Refresh == nil { // Server code assumes this will be non-nil. s.Refresh = make(map[string]*storage.RefreshTokenRef) } return s } // Connector is a mirrored struct from storage with JSON struct tags and Kubernetes // type metadata. type Connector struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` ID string `json:"id,omitempty"` Type string `json:"type,omitempty"` Name string `json:"name,omitempty"` // Config holds connector specific configuration information Config []byte `json:"config,omitempty"` } func (cli *client) fromStorageConnector(c storage.Connector) Connector { return Connector{ TypeMeta: k8sapi.TypeMeta{ Kind: kindConnector, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: c.ID, Namespace: cli.namespace, }, ID: c.ID, Type: c.Type, Name: c.Name, Config: c.Config, } } func toStorageConnector(c Connector) storage.Connector { return storage.Connector{ ID: c.ID, Type: c.Type, Name: c.Name, ResourceVersion: c.ObjectMeta.ResourceVersion, Config: c.Config, } } // ConnectorList is a list of Connectors. type ConnectorList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` Connectors []Connector `json:"items"` } // DeviceRequest is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type DeviceRequest struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` DeviceCode string `json:"device_code,omitempty"` ClientID string `json:"client_id,omitempty"` ClientSecret string `json:"client_secret,omitempty"` Scopes []string `json:"scopes,omitempty"` Expiry time.Time `json:"expiry"` } // DeviceRequestList is a list of DeviceRequests. type DeviceRequestList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` DeviceRequests []DeviceRequest `json:"items"` } func (cli *client) fromStorageDeviceRequest(a storage.DeviceRequest) DeviceRequest { req := DeviceRequest{ TypeMeta: k8sapi.TypeMeta{ Kind: kindDeviceRequest, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: strings.ToLower(a.UserCode), Namespace: cli.namespace, }, DeviceCode: a.DeviceCode, ClientID: a.ClientID, ClientSecret: a.ClientSecret, Scopes: a.Scopes, Expiry: a.Expiry, } return req } func toStorageDeviceRequest(req DeviceRequest) storage.DeviceRequest { return storage.DeviceRequest{ UserCode: strings.ToUpper(req.ObjectMeta.Name), DeviceCode: req.DeviceCode, ClientID: req.ClientID, ClientSecret: req.ClientSecret, Scopes: req.Scopes, Expiry: req.Expiry, } } // DeviceToken is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type DeviceToken struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` Status string `json:"status,omitempty"` Token string `json:"token,omitempty"` Expiry time.Time `json:"expiry"` LastRequestTime time.Time `json:"last_request"` PollIntervalSeconds int `json:"poll_interval"` } // DeviceTokenList is a list of DeviceTokens. type DeviceTokenList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` DeviceTokens []DeviceToken `json:"items"` } func (cli *client) fromStorageDeviceToken(t storage.DeviceToken) DeviceToken { req := DeviceToken{ TypeMeta: k8sapi.TypeMeta{ Kind: kindDeviceToken, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: t.DeviceCode, Namespace: cli.namespace, }, Status: t.Status, Token: t.Token, Expiry: t.Expiry, LastRequestTime: t.LastRequestTime, PollIntervalSeconds: t.PollIntervalSeconds, } return req } func toStorageDeviceToken(t DeviceToken) storage.DeviceToken { return storage.DeviceToken{ DeviceCode: t.ObjectMeta.Name, Status: t.Status, Token: t.Token, Expiry: t.Expiry, LastRequestTime: t.LastRequestTime, PollIntervalSeconds: t.PollIntervalSeconds, } } spelling: storage Signed-off-by: Josh Soref <dc510c92cc1794ea84000fde88becdce67bf7624@users.noreply.github.com> package kubernetes import ( "strings" "time" jose "gopkg.in/square/go-jose.v2" "github.com/dexidp/dex/storage" "github.com/dexidp/dex/storage/kubernetes/k8sapi" ) var crdMeta = k8sapi.TypeMeta{ APIVersion: "apiextensions.k8s.io/v1beta1", Kind: "CustomResourceDefinition", } const apiGroup = "dex.coreos.com" // The set of custom resource definitions required by the storage. These are managed by // the storage so it can migrate itself by creating new resources. var customResourceDefinitions = []k8sapi.CustomResourceDefinition{ { ObjectMeta: k8sapi.ObjectMeta{ Name: "authcodes.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "authcodes", Singular: "authcode", Kind: "AuthCode", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "authrequests.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "authrequests", Singular: "authrequest", Kind: "AuthRequest", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "oauth2clients.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "oauth2clients", Singular: "oauth2client", Kind: "OAuth2Client", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "signingkeies.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ // `signingkeies` is an artifact from the old TPR pluralization. // Users don't directly interact with this value, hence leaving it // as is. Plural: "signingkeies", Singular: "signingkey", Kind: "SigningKey", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "refreshtokens.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "refreshtokens", Singular: "refreshtoken", Kind: "RefreshToken", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "passwords.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "passwords", Singular: "password", Kind: "Password", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "offlinesessionses.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "offlinesessionses", Singular: "offlinesessions", Kind: "OfflineSessions", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "connectors.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "connectors", Singular: "connector", Kind: "Connector", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "devicerequests.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "devicerequests", Singular: "devicerequest", Kind: "DeviceRequest", }, }, }, { ObjectMeta: k8sapi.ObjectMeta{ Name: "devicetokens.dex.coreos.com", }, TypeMeta: crdMeta, Spec: k8sapi.CustomResourceDefinitionSpec{ Group: apiGroup, Version: "v1", Names: k8sapi.CustomResourceDefinitionNames{ Plural: "devicetokens", Singular: "devicetoken", Kind: "DeviceToken", }, }, }, } // There will only ever be a single keys resource. Maintain this by setting a // common name. const keysName = "openid-connect-keys" // Client is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type Client struct { // Name is a hash of the ID. k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` // ID is immutable, since it's a primary key and should not be changed. ID string `json:"id,omitempty"` Secret string `json:"secret,omitempty"` RedirectURIs []string `json:"redirectURIs,omitempty"` TrustedPeers []string `json:"trustedPeers,omitempty"` Public bool `json:"public"` Name string `json:"name,omitempty"` LogoURL string `json:"logoURL,omitempty"` } // ClientList is a list of Clients. type ClientList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` Clients []Client `json:"items"` } func (cli *client) fromStorageClient(c storage.Client) Client { return Client{ TypeMeta: k8sapi.TypeMeta{ Kind: kindClient, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: cli.idToName(c.ID), Namespace: cli.namespace, }, ID: c.ID, Secret: c.Secret, RedirectURIs: c.RedirectURIs, TrustedPeers: c.TrustedPeers, Public: c.Public, Name: c.Name, LogoURL: c.LogoURL, } } func toStorageClient(c Client) storage.Client { return storage.Client{ ID: c.ID, Secret: c.Secret, RedirectURIs: c.RedirectURIs, TrustedPeers: c.TrustedPeers, Public: c.Public, Name: c.Name, LogoURL: c.LogoURL, } } // Claims is a mirrored struct from storage with JSON struct tags. type Claims struct { UserID string `json:"userID"` Username string `json:"username"` PreferredUsername string `json:"preferredUsername"` Email string `json:"email"` EmailVerified bool `json:"emailVerified"` Groups []string `json:"groups,omitempty"` } func fromStorageClaims(i storage.Claims) Claims { return Claims{ UserID: i.UserID, Username: i.Username, PreferredUsername: i.PreferredUsername, Email: i.Email, EmailVerified: i.EmailVerified, Groups: i.Groups, } } func toStorageClaims(i Claims) storage.Claims { return storage.Claims{ UserID: i.UserID, Username: i.Username, PreferredUsername: i.PreferredUsername, Email: i.Email, EmailVerified: i.EmailVerified, Groups: i.Groups, } } // AuthRequest is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type AuthRequest struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` ClientID string `json:"clientID"` ResponseTypes []string `json:"responseTypes,omitempty"` Scopes []string `json:"scopes,omitempty"` RedirectURI string `json:"redirectURI"` Nonce string `json:"nonce,omitempty"` State string `json:"state,omitempty"` // The client has indicated that the end user must be shown an approval prompt // on all requests. The server cannot cache their initial action for subsequent // attempts. ForceApprovalPrompt bool `json:"forceApprovalPrompt,omitempty"` LoggedIn bool `json:"loggedIn"` // The identity of the end user. Generally nil until the user authenticates // with a backend. Claims Claims `json:"claims,omitempty"` // The connector used to login the user. Set when the user authenticates. ConnectorID string `json:"connectorID,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` Expiry time.Time `json:"expiry"` CodeChallenge string `json:"code_challenge,omitempty"` CodeChallengeMethod string `json:"code_challenge_method,omitempty"` } // AuthRequestList is a list of AuthRequests. type AuthRequestList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` AuthRequests []AuthRequest `json:"items"` } func toStorageAuthRequest(req AuthRequest) storage.AuthRequest { a := storage.AuthRequest{ ID: req.ObjectMeta.Name, ClientID: req.ClientID, ResponseTypes: req.ResponseTypes, Scopes: req.Scopes, RedirectURI: req.RedirectURI, Nonce: req.Nonce, State: req.State, ForceApprovalPrompt: req.ForceApprovalPrompt, LoggedIn: req.LoggedIn, ConnectorID: req.ConnectorID, ConnectorData: req.ConnectorData, Expiry: req.Expiry, Claims: toStorageClaims(req.Claims), PKCE: storage.PKCE{ CodeChallenge: req.CodeChallenge, CodeChallengeMethod: req.CodeChallengeMethod, }, } return a } func (cli *client) fromStorageAuthRequest(a storage.AuthRequest) AuthRequest { req := AuthRequest{ TypeMeta: k8sapi.TypeMeta{ Kind: kindAuthRequest, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: a.ID, Namespace: cli.namespace, }, ClientID: a.ClientID, ResponseTypes: a.ResponseTypes, Scopes: a.Scopes, RedirectURI: a.RedirectURI, Nonce: a.Nonce, State: a.State, LoggedIn: a.LoggedIn, ForceApprovalPrompt: a.ForceApprovalPrompt, ConnectorID: a.ConnectorID, ConnectorData: a.ConnectorData, Expiry: a.Expiry, Claims: fromStorageClaims(a.Claims), CodeChallenge: a.PKCE.CodeChallenge, CodeChallengeMethod: a.PKCE.CodeChallengeMethod, } return req } // Password is a mirrored struct from the storage with JSON struct tags and // Kubernetes type metadata. type Password struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` // The Kubernetes name is actually an encoded version of this value. // // This field is IMMUTABLE. Do not change. Email string `json:"email,omitempty"` Hash []byte `json:"hash,omitempty"` Username string `json:"username,omitempty"` UserID string `json:"userID,omitempty"` } // PasswordList is a list of Passwords. type PasswordList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` Passwords []Password `json:"items"` } func (cli *client) fromStoragePassword(p storage.Password) Password { email := strings.ToLower(p.Email) return Password{ TypeMeta: k8sapi.TypeMeta{ Kind: kindPassword, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: cli.idToName(email), Namespace: cli.namespace, }, Email: email, Hash: p.Hash, Username: p.Username, UserID: p.UserID, } } func toStoragePassword(p Password) storage.Password { return storage.Password{ Email: p.Email, Hash: p.Hash, Username: p.Username, UserID: p.UserID, } } // AuthCode is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type AuthCode struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` ClientID string `json:"clientID"` Scopes []string `json:"scopes,omitempty"` RedirectURI string `json:"redirectURI"` Nonce string `json:"nonce,omitempty"` State string `json:"state,omitempty"` Claims Claims `json:"claims,omitempty"` ConnectorID string `json:"connectorID,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` Expiry time.Time `json:"expiry"` CodeChallenge string `json:"code_challenge,omitempty"` CodeChallengeMethod string `json:"code_challenge_method,omitempty"` } // AuthCodeList is a list of AuthCodes. type AuthCodeList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` AuthCodes []AuthCode `json:"items"` } func (cli *client) fromStorageAuthCode(a storage.AuthCode) AuthCode { return AuthCode{ TypeMeta: k8sapi.TypeMeta{ Kind: kindAuthCode, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: a.ID, Namespace: cli.namespace, }, ClientID: a.ClientID, RedirectURI: a.RedirectURI, ConnectorID: a.ConnectorID, ConnectorData: a.ConnectorData, Nonce: a.Nonce, Scopes: a.Scopes, Claims: fromStorageClaims(a.Claims), Expiry: a.Expiry, CodeChallenge: a.PKCE.CodeChallenge, CodeChallengeMethod: a.PKCE.CodeChallengeMethod, } } func toStorageAuthCode(a AuthCode) storage.AuthCode { return storage.AuthCode{ ID: a.ObjectMeta.Name, ClientID: a.ClientID, RedirectURI: a.RedirectURI, ConnectorID: a.ConnectorID, ConnectorData: a.ConnectorData, Nonce: a.Nonce, Scopes: a.Scopes, Claims: toStorageClaims(a.Claims), Expiry: a.Expiry, PKCE: storage.PKCE{ CodeChallenge: a.CodeChallenge, CodeChallengeMethod: a.CodeChallengeMethod, }, } } // RefreshToken is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type RefreshToken struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` CreatedAt time.Time LastUsed time.Time ClientID string `json:"clientID"` Scopes []string `json:"scopes,omitempty"` Token string `json:"token,omitempty"` Nonce string `json:"nonce,omitempty"` Claims Claims `json:"claims,omitempty"` ConnectorID string `json:"connectorID,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` } // RefreshList is a list of refresh tokens. type RefreshList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` RefreshTokens []RefreshToken `json:"items"` } func toStorageRefreshToken(r RefreshToken) storage.RefreshToken { return storage.RefreshToken{ ID: r.ObjectMeta.Name, Token: r.Token, CreatedAt: r.CreatedAt, LastUsed: r.LastUsed, ClientID: r.ClientID, ConnectorID: r.ConnectorID, ConnectorData: r.ConnectorData, Scopes: r.Scopes, Nonce: r.Nonce, Claims: toStorageClaims(r.Claims), } } func (cli *client) fromStorageRefreshToken(r storage.RefreshToken) RefreshToken { return RefreshToken{ TypeMeta: k8sapi.TypeMeta{ Kind: kindRefreshToken, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: r.ID, Namespace: cli.namespace, }, Token: r.Token, CreatedAt: r.CreatedAt, LastUsed: r.LastUsed, ClientID: r.ClientID, ConnectorID: r.ConnectorID, ConnectorData: r.ConnectorData, Scopes: r.Scopes, Nonce: r.Nonce, Claims: fromStorageClaims(r.Claims), } } // Keys is a mirrored struct from storage with JSON struct tags and Kubernetes // type metadata. type Keys struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` // Key for creating and verifying signatures. These may be nil. SigningKey *jose.JSONWebKey `json:"signingKey,omitempty"` SigningKeyPub *jose.JSONWebKey `json:"signingKeyPub,omitempty"` // Old signing keys which have been rotated but can still be used to validate // existing signatures. VerificationKeys []storage.VerificationKey `json:"verificationKeys,omitempty"` // The next time the signing key will rotate. // // For caching purposes, implementations MUST NOT update keys before this time. NextRotation time.Time `json:"nextRotation"` } func (cli *client) fromStorageKeys(keys storage.Keys) Keys { return Keys{ TypeMeta: k8sapi.TypeMeta{ Kind: kindKeys, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: keysName, Namespace: cli.namespace, }, SigningKey: keys.SigningKey, SigningKeyPub: keys.SigningKeyPub, VerificationKeys: keys.VerificationKeys, NextRotation: keys.NextRotation, } } func toStorageKeys(keys Keys) storage.Keys { return storage.Keys{ SigningKey: keys.SigningKey, SigningKeyPub: keys.SigningKeyPub, VerificationKeys: keys.VerificationKeys, NextRotation: keys.NextRotation, } } // OfflineSessions is a mirrored struct from storage with JSON struct tags and Kubernetes // type metadata. type OfflineSessions struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` UserID string `json:"userID,omitempty"` ConnID string `json:"connID,omitempty"` Refresh map[string]*storage.RefreshTokenRef `json:"refresh,omitempty"` ConnectorData []byte `json:"connectorData,omitempty"` } func (cli *client) fromStorageOfflineSessions(o storage.OfflineSessions) OfflineSessions { return OfflineSessions{ TypeMeta: k8sapi.TypeMeta{ Kind: kindOfflineSessions, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: cli.offlineTokenName(o.UserID, o.ConnID), Namespace: cli.namespace, }, UserID: o.UserID, ConnID: o.ConnID, Refresh: o.Refresh, ConnectorData: o.ConnectorData, } } func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { s := storage.OfflineSessions{ UserID: o.UserID, ConnID: o.ConnID, Refresh: o.Refresh, ConnectorData: o.ConnectorData, } if s.Refresh == nil { // Server code assumes this will be non-nil. s.Refresh = make(map[string]*storage.RefreshTokenRef) } return s } // Connector is a mirrored struct from storage with JSON struct tags and Kubernetes // type metadata. type Connector struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` ID string `json:"id,omitempty"` Type string `json:"type,omitempty"` Name string `json:"name,omitempty"` // Config holds connector specific configuration information Config []byte `json:"config,omitempty"` } func (cli *client) fromStorageConnector(c storage.Connector) Connector { return Connector{ TypeMeta: k8sapi.TypeMeta{ Kind: kindConnector, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: c.ID, Namespace: cli.namespace, }, ID: c.ID, Type: c.Type, Name: c.Name, Config: c.Config, } } func toStorageConnector(c Connector) storage.Connector { return storage.Connector{ ID: c.ID, Type: c.Type, Name: c.Name, ResourceVersion: c.ObjectMeta.ResourceVersion, Config: c.Config, } } // ConnectorList is a list of Connectors. type ConnectorList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` Connectors []Connector `json:"items"` } // DeviceRequest is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type DeviceRequest struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` DeviceCode string `json:"device_code,omitempty"` ClientID string `json:"client_id,omitempty"` ClientSecret string `json:"client_secret,omitempty"` Scopes []string `json:"scopes,omitempty"` Expiry time.Time `json:"expiry"` } // DeviceRequestList is a list of DeviceRequests. type DeviceRequestList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` DeviceRequests []DeviceRequest `json:"items"` } func (cli *client) fromStorageDeviceRequest(a storage.DeviceRequest) DeviceRequest { req := DeviceRequest{ TypeMeta: k8sapi.TypeMeta{ Kind: kindDeviceRequest, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: strings.ToLower(a.UserCode), Namespace: cli.namespace, }, DeviceCode: a.DeviceCode, ClientID: a.ClientID, ClientSecret: a.ClientSecret, Scopes: a.Scopes, Expiry: a.Expiry, } return req } func toStorageDeviceRequest(req DeviceRequest) storage.DeviceRequest { return storage.DeviceRequest{ UserCode: strings.ToUpper(req.ObjectMeta.Name), DeviceCode: req.DeviceCode, ClientID: req.ClientID, ClientSecret: req.ClientSecret, Scopes: req.Scopes, Expiry: req.Expiry, } } // DeviceToken is a mirrored struct from storage with JSON struct tags and // Kubernetes type metadata. type DeviceToken struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ObjectMeta `json:"metadata,omitempty"` Status string `json:"status,omitempty"` Token string `json:"token,omitempty"` Expiry time.Time `json:"expiry"` LastRequestTime time.Time `json:"last_request"` PollIntervalSeconds int `json:"poll_interval"` } // DeviceTokenList is a list of DeviceTokens. type DeviceTokenList struct { k8sapi.TypeMeta `json:",inline"` k8sapi.ListMeta `json:"metadata,omitempty"` DeviceTokens []DeviceToken `json:"items"` } func (cli *client) fromStorageDeviceToken(t storage.DeviceToken) DeviceToken { req := DeviceToken{ TypeMeta: k8sapi.TypeMeta{ Kind: kindDeviceToken, APIVersion: cli.apiVersion, }, ObjectMeta: k8sapi.ObjectMeta{ Name: t.DeviceCode, Namespace: cli.namespace, }, Status: t.Status, Token: t.Token, Expiry: t.Expiry, LastRequestTime: t.LastRequestTime, PollIntervalSeconds: t.PollIntervalSeconds, } return req } func toStorageDeviceToken(t DeviceToken) storage.DeviceToken { return storage.DeviceToken{ DeviceCode: t.ObjectMeta.Name, Status: t.Status, Token: t.Token, Expiry: t.Expiry, LastRequestTime: t.LastRequestTime, PollIntervalSeconds: t.PollIntervalSeconds, } }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package helper_test import ( "crypto/tls" "encoding/json" "fmt" "net/http" "testing" "time" "github.com/gorilla/mux" . "github.com/pingcap/check" "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/pdapi" "go.uber.org/zap" ) type HelperTestSuite struct { store tikv.Storage } var _ = Suite(new(HelperTestSuite)) func TestT(t *testing.T) { CustomVerboseFlag = true TestingT(t) } type mockStore struct { tikv.Storage pdAddrs []string } func (s *mockStore) EtcdAddrs() []string { return s.pdAddrs } func (s *mockStore) StartGCWorker() error { panic("not implemented") } func (s *mockStore) TLSConfig() *tls.Config { panic("not implemented") } func (s *HelperTestSuite) SetUpSuite(c *C) { go s.mockPDHTTPServer(c) time.Sleep(100 * time.Millisecond) mvccStore := mocktikv.MustNewMVCCStore() mockTikvStore, err := mockstore.NewMockTikvStore(mockstore.WithMVCCStore(mvccStore)) s.store = &mockStore{ mockTikvStore.(tikv.Storage), []string{"127.0.0.1:10100/"}, } c.Assert(err, IsNil) } func (s *HelperTestSuite) TestHotRegion(c *C) { helper := helper.Helper{ Store: s.store, RegionCache: s.store.GetRegionCache(), } regionMetric, err := helper.FetchHotRegion(pdapi.HotRead) c.Assert(err, IsNil, Commentf("err: %+v", err)) c.Assert(fmt.Sprintf("%v", regionMetric), Equals, "map[1:{100 1 0}]") dbInfo := &model.DBInfo{ Name: model.NewCIStr("test"), } c.Assert(err, IsNil) _, err = helper.FetchRegionTableIndex(regionMetric, []*model.DBInfo{dbInfo}) c.Assert(err, IsNil, Commentf("err: %+v", err)) } func (s *HelperTestSuite) TestGetRegionsTableInfo(c *C) { h := helper.NewHelper(s.store) regionsInfo := getMockTiKVRegionsInfo() schemas := getMockRegionsTableInfoSchema() tableInfos := h.GetRegionsTableInfo(regionsInfo, schemas) ans := getRegionsTableInfoAns(schemas) c.Assert(fmt.Sprintf("%v", tableInfos), Equals, fmt.Sprintf("%v", ans)) } func (s *HelperTestSuite) TestTiKVRegionsInfo(c *C) { h := helper.Helper{ Store: s.store, RegionCache: s.store.GetRegionCache(), } regionsInfo, err := h.GetRegionsInfo() c.Assert(err, IsNil, Commentf("err: %+v", err)) c.Assert(fmt.Sprintf("%v", regionsInfo), Equals, fmt.Sprintf("%v", getMockTiKVRegionsInfo())) } func (s *HelperTestSuite) TestTiKVStoresStat(c *C) { h := helper.Helper{ Store: s.store, RegionCache: s.store.GetRegionCache(), } stat, err := h.GetStoresStat() c.Assert(err, IsNil, Commentf("err: %+v", err)) data, err := json.Marshal(stat) c.Assert(err, IsNil) c.Assert(fmt.Sprintf("%s", data), Equals, "{\"count\":1,\"stores\":[{\"store\":{\"id\":1,\"address\":\"127.0.0.1:20160\",\"state\":0,\"state_name\":\"Up\",\"version\":\"3.0.0-beta\",\"labels\":[{\"key\":\"test\",\"value\":\"test\"}]},\"status\":{\"capacity\":\"60 GiB\",\"available\":\"100 GiB\",\"leader_count\":10,\"leader_weight\":1,\"leader_score\":1000,\"leader_size\":1000,\"region_count\":200,\"region_weight\":1,\"region_score\":1000,\"region_size\":1000,\"start_ts\":\"2019-04-23T19:30:30+08:00\",\"last_heartbeat_ts\":\"2019-04-23T19:31:30+08:00\",\"uptime\":\"1h30m\"}}]}") } func (s *HelperTestSuite) mockPDHTTPServer(c *C) { router := mux.NewRouter() router.HandleFunc(pdapi.HotRead, s.mockHotRegionResponse) router.HandleFunc(pdapi.Regions, s.mockTiKVRegionsInfoResponse) router.HandleFunc(pdapi.Stores, s.mockStoreStatResponse) serverMux := http.NewServeMux() serverMux.Handle("/", router) server := &http.Server{Addr: "127.0.0.1:10100", Handler: serverMux} err := server.ListenAndServe() c.Assert(err, IsNil) } func (s *HelperTestSuite) mockHotRegionResponse(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) regionsStat := helper.HotRegionsStat{ RegionsStat: []helper.RegionStat{ { FlowBytes: 100, RegionID: 1, HotDegree: 1, }, }, } resp := helper.StoreHotRegionInfos{ AsLeader: make(map[uint64]*helper.HotRegionsStat), } resp.AsLeader[0] = &regionsStat data, err := json.MarshalIndent(resp, "", " ") if err != nil { log.Panic("json marshal failed", zap.Error(err)) } _, err = w.Write(data) if err != nil { log.Panic("write http response failed", zap.Error(err)) } } func getMockRegionsTableInfoSchema() []*model.DBInfo { return []*model.DBInfo{ { Name: model.NewCIStr("test"), Tables: []*model.TableInfo{ { ID: 41, Indices: []*model.IndexInfo{{ID: 1}}, }, { ID: 63, Indices: []*model.IndexInfo{{ID: 1}, {ID: 2}}, }, { ID: 66, Indices: []*model.IndexInfo{{ID: 1}, {ID: 2}, {ID: 3}}, }, }, }, } } func getRegionsTableInfoAns(dbs []*model.DBInfo) map[uint64][]helper.TableInfo { ans := make(map[uint64][]helper.TableInfo) db := dbs[0] ans[1] = []helper.TableInfo{} ans[2] = []helper.TableInfo{ {db, db.Tables[0], true, db.Tables[0].Indices[0]}, {db, db.Tables[0], false, nil}, } ans[3] = []helper.TableInfo{ {db, db.Tables[1], true, db.Tables[1].Indices[0]}, {db, db.Tables[1], true, db.Tables[1].Indices[1]}, {db, db.Tables[1], false, nil}, } ans[4] = []helper.TableInfo{ {db, db.Tables[2], false, nil}, } ans[5] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[2]}, {db, db.Tables[2], false, nil}, } ans[6] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[0]}, } ans[7] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[1]}, } ans[8] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[1]}, {db, db.Tables[2], true, db.Tables[2].Indices[2]}, {db, db.Tables[2], false, nil}, } return ans } func getMockTiKVRegionsInfo() *helper.RegionsInfo { regions := []helper.RegionInfo{ { ID: 1, StartKey: "", EndKey: "12341234", Epoch: helper.RegionEpoch{ ConfVer: 1, Version: 1, }, Peers: []helper.RegionPeer{ {ID: 2, StoreID: 1}, {ID: 15, StoreID: 51}, {ID: 66, StoreID: 99, IsLearner: true}, {ID: 123, StoreID: 111, IsLearner: true}, }, Leader: helper.RegionPeer{ ID: 2, StoreID: 1, }, DownPeers: []helper.RegionPeerStat{ { helper.RegionPeer{ID: 66, StoreID: 99, IsLearner: true}, 120, }, }, PendingPeers: []helper.RegionPeer{ {ID: 15, StoreID: 51}, }, WrittenBytes: 100, ReadBytes: 1000, ApproximateKeys: 200, ApproximateSize: 500, }, // table: 41, record + index: 1 { ID: 2, StartKey: "7480000000000000FF295F698000000000FF0000010000000000FA", EndKey: "7480000000000000FF2B5F698000000000FF0000010000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 3, StoreID: 1}}, Leader: helper.RegionPeer{ID: 3, StoreID: 1}, }, // table: 63, record + index: 1, 2 { ID: 3, StartKey: "7480000000000000FF3F5F698000000000FF0000010000000000FA", EndKey: "7480000000000000FF425F698000000000FF0000010000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 4, StoreID: 1}}, Leader: helper.RegionPeer{ID: 4, StoreID: 1}, }, // table: 66, record { ID: 4, StartKey: "7480000000000000FF425F72C000000000FF0000000000000000FA", EndKey: "", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 5, StoreID: 1}}, Leader: helper.RegionPeer{ID: 5, StoreID: 1}, }, // table: 66, record + index: 3 { ID: 5, StartKey: "7480000000000000FF425F698000000000FF0000030000000000FA", EndKey: "7480000000000000FF425F72C000000000FF0000000000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 6, StoreID: 1}}, Leader: helper.RegionPeer{ID: 6, StoreID: 1}, }, // table: 66, index: 1 { ID: 6, StartKey: "7480000000000000FF425F698000000000FF0000010000000000FA", EndKey: "7480000000000000FF425F698000000000FF0000020000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 7, StoreID: 1}}, Leader: helper.RegionPeer{ID: 7, StoreID: 1}, }, // table: 66, index: 2 { ID: 7, StartKey: "7480000000000000FF425F698000000000FF0000020000000000FA", EndKey: "7480000000000000FF425F698000000000FF0000030000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 8, StoreID: 1}}, Leader: helper.RegionPeer{ID: 8, StoreID: 1}, }, // merge region 7, 5 { ID: 8, StartKey: "7480000000000000FF425F698000000000FF0000020000000000FA", EndKey: "7480000000000000FF425F72C000000000FF0000000000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 9, StoreID: 1}}, Leader: helper.RegionPeer{ID: 9, StoreID: 1}, }, } return &helper.RegionsInfo{ Count: int64(len(regions)), Regions: regions, } } func (s *HelperTestSuite) mockTiKVRegionsInfoResponse(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) resp := getMockTiKVRegionsInfo() data, err := json.MarshalIndent(resp, "", " ") if err != nil { log.Panic("json marshal failed", zap.Error(err)) } _, err = w.Write(data) if err != nil { log.Panic("write http response failed", zap.Error(err)) } } func (s *HelperTestSuite) mockStoreStatResponse(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) startTs, err := time.Parse(time.RFC3339, "2019-04-23T19:30:30+08:00") if err != nil { log.Panic("mock tikv store api response failed", zap.Error(err)) } lastHeartbeatTs, err := time.Parse(time.RFC3339, "2019-04-23T19:31:30+08:00") if err != nil { log.Panic("mock tikv store api response failed", zap.Error(err)) } storesStat := helper.StoresStat{ Count: 1, Stores: []helper.StoreStat{ { Store: helper.StoreBaseStat{ ID: 1, Address: "127.0.0.1:20160", State: 0, StateName: "Up", Version: "3.0.0-beta", Labels: []helper.StoreLabel{ { Key: "test", Value: "test", }, }, }, Status: helper.StoreDetailStat{ Capacity: "60 GiB", Available: "100 GiB", LeaderCount: 10, LeaderWeight: 1, LeaderScore: 1000, LeaderSize: 1000, RegionCount: 200, RegionWeight: 1, RegionScore: 1000, RegionSize: 1000, StartTs: startTs, LastHeartbeatTs: lastHeartbeatTs, Uptime: "1h30m", }, }, }, } data, err := json.MarshalIndent(storesStat, "", " ") if err != nil { log.Panic("json marshal failed", zap.Error(err)) } _, err = w.Write(data) if err != nil { log.Panic("write http response failed", zap.Error(err)) } } improve asserts oftest cases in `helper_test.go` and fix ci (#11553) // Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package helper_test import ( "crypto/tls" "encoding/json" "net/http" "testing" "time" "github.com/gorilla/mux" . "github.com/pingcap/check" "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/pdapi" "go.uber.org/zap" ) type HelperTestSuite struct { store tikv.Storage } var _ = Suite(new(HelperTestSuite)) func TestT(t *testing.T) { CustomVerboseFlag = true TestingT(t) } type mockStore struct { tikv.Storage pdAddrs []string } func (s *mockStore) EtcdAddrs() []string { return s.pdAddrs } func (s *mockStore) StartGCWorker() error { panic("not implemented") } func (s *mockStore) TLSConfig() *tls.Config { panic("not implemented") } func (s *HelperTestSuite) SetUpSuite(c *C) { go s.mockPDHTTPServer(c) time.Sleep(100 * time.Millisecond) mvccStore := mocktikv.MustNewMVCCStore() mockTikvStore, err := mockstore.NewMockTikvStore(mockstore.WithMVCCStore(mvccStore)) s.store = &mockStore{ mockTikvStore.(tikv.Storage), []string{"127.0.0.1:10100/"}, } c.Assert(err, IsNil) } func (s *HelperTestSuite) TestHotRegion(c *C) { h := helper.Helper{ Store: s.store, RegionCache: s.store.GetRegionCache(), } regionMetric, err := h.FetchHotRegion(pdapi.HotRead) c.Assert(err, IsNil, Commentf("err: %+v", err)) expected := make(map[uint64]helper.RegionMetric) expected[1] = helper.RegionMetric{ FlowBytes: 100, MaxHotDegree: 1, Count: 0, } c.Assert(regionMetric, DeepEquals, expected) dbInfo := &model.DBInfo{ Name: model.NewCIStr("test"), } c.Assert(err, IsNil) _, err = h.FetchRegionTableIndex(regionMetric, []*model.DBInfo{dbInfo}) c.Assert(err, IsNil, Commentf("err: %+v", err)) } func (s *HelperTestSuite) TestGetRegionsTableInfo(c *C) { h := helper.NewHelper(s.store) regionsInfo := getMockTiKVRegionsInfo() schemas := getMockRegionsTableInfoSchema() tableInfos := h.GetRegionsTableInfo(regionsInfo, schemas) c.Assert(tableInfos, DeepEquals, getRegionsTableInfoAns(schemas)) } func (s *HelperTestSuite) TestTiKVRegionsInfo(c *C) { h := helper.Helper{ Store: s.store, RegionCache: s.store.GetRegionCache(), } regionsInfo, err := h.GetRegionsInfo() c.Assert(err, IsNil, Commentf("err: %+v", err)) c.Assert(regionsInfo, DeepEquals, getMockTiKVRegionsInfo()) } func (s *HelperTestSuite) TestTiKVStoresStat(c *C) { h := helper.Helper{ Store: s.store, RegionCache: s.store.GetRegionCache(), } stat, err := h.GetStoresStat() c.Assert(err, IsNil, Commentf("err: %+v", err)) data, err := json.Marshal(stat) c.Assert(err, IsNil) c.Assert(string(data), Equals, `{"count":1,"stores":[{"store":{"id":1,"address":"127.0.0.1:20160","state":0,"state_name":"Up","version":"3.0.0-beta","labels":[{"key":"test","value":"test"}]},"status":{"capacity":"60 GiB","available":"100 GiB","leader_count":10,"leader_weight":1,"leader_score":1000,"leader_size":1000,"region_count":200,"region_weight":1,"region_score":1000,"region_size":1000,"start_ts":"2019-04-23T19:30:30+08:00","last_heartbeat_ts":"2019-04-23T19:31:30+08:00","uptime":"1h30m"}}]}`) } func (s *HelperTestSuite) mockPDHTTPServer(c *C) { router := mux.NewRouter() router.HandleFunc(pdapi.HotRead, s.mockHotRegionResponse) router.HandleFunc(pdapi.Regions, s.mockTiKVRegionsInfoResponse) router.HandleFunc(pdapi.Stores, s.mockStoreStatResponse) serverMux := http.NewServeMux() serverMux.Handle("/", router) server := &http.Server{Addr: "127.0.0.1:10100", Handler: serverMux} err := server.ListenAndServe() c.Assert(err, IsNil) } func (s *HelperTestSuite) mockHotRegionResponse(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) regionsStat := helper.HotRegionsStat{ RegionsStat: []helper.RegionStat{ { FlowBytes: 100, RegionID: 1, HotDegree: 1, }, }, } resp := helper.StoreHotRegionInfos{ AsLeader: make(map[uint64]*helper.HotRegionsStat), } resp.AsLeader[0] = &regionsStat data, err := json.MarshalIndent(resp, "", " ") if err != nil { log.Panic("json marshal failed", zap.Error(err)) } _, err = w.Write(data) if err != nil { log.Panic("write http response failed", zap.Error(err)) } } func getMockRegionsTableInfoSchema() []*model.DBInfo { return []*model.DBInfo{ { Name: model.NewCIStr("test"), Tables: []*model.TableInfo{ { ID: 41, Indices: []*model.IndexInfo{{ID: 1}}, }, { ID: 63, Indices: []*model.IndexInfo{{ID: 1}, {ID: 2}}, }, { ID: 66, Indices: []*model.IndexInfo{{ID: 1}, {ID: 2}, {ID: 3}}, }, }, }, } } func getRegionsTableInfoAns(dbs []*model.DBInfo) map[int64][]helper.TableInfo { ans := make(map[int64][]helper.TableInfo) db := dbs[0] ans[1] = []helper.TableInfo{} ans[2] = []helper.TableInfo{ {db, db.Tables[0], true, db.Tables[0].Indices[0]}, {db, db.Tables[0], false, nil}, } ans[3] = []helper.TableInfo{ {db, db.Tables[1], true, db.Tables[1].Indices[0]}, {db, db.Tables[1], true, db.Tables[1].Indices[1]}, {db, db.Tables[1], false, nil}, } ans[4] = []helper.TableInfo{ {db, db.Tables[2], false, nil}, } ans[5] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[2]}, {db, db.Tables[2], false, nil}, } ans[6] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[0]}, } ans[7] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[1]}, } ans[8] = []helper.TableInfo{ {db, db.Tables[2], true, db.Tables[2].Indices[1]}, {db, db.Tables[2], true, db.Tables[2].Indices[2]}, {db, db.Tables[2], false, nil}, } return ans } func getMockTiKVRegionsInfo() *helper.RegionsInfo { regions := []helper.RegionInfo{ { ID: 1, StartKey: "", EndKey: "12341234", Epoch: helper.RegionEpoch{ ConfVer: 1, Version: 1, }, Peers: []helper.RegionPeer{ {ID: 2, StoreID: 1}, {ID: 15, StoreID: 51}, {ID: 66, StoreID: 99, IsLearner: true}, {ID: 123, StoreID: 111, IsLearner: true}, }, Leader: helper.RegionPeer{ ID: 2, StoreID: 1, }, DownPeers: []helper.RegionPeerStat{ { helper.RegionPeer{ID: 66, StoreID: 99, IsLearner: true}, 120, }, }, PendingPeers: []helper.RegionPeer{ {ID: 15, StoreID: 51}, }, WrittenBytes: 100, ReadBytes: 1000, ApproximateKeys: 200, ApproximateSize: 500, }, // table: 41, record + index: 1 { ID: 2, StartKey: "7480000000000000FF295F698000000000FF0000010000000000FA", EndKey: "7480000000000000FF2B5F698000000000FF0000010000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 3, StoreID: 1}}, Leader: helper.RegionPeer{ID: 3, StoreID: 1}, }, // table: 63, record + index: 1, 2 { ID: 3, StartKey: "7480000000000000FF3F5F698000000000FF0000010000000000FA", EndKey: "7480000000000000FF425F698000000000FF0000010000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 4, StoreID: 1}}, Leader: helper.RegionPeer{ID: 4, StoreID: 1}, }, // table: 66, record { ID: 4, StartKey: "7480000000000000FF425F72C000000000FF0000000000000000FA", EndKey: "", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 5, StoreID: 1}}, Leader: helper.RegionPeer{ID: 5, StoreID: 1}, }, // table: 66, record + index: 3 { ID: 5, StartKey: "7480000000000000FF425F698000000000FF0000030000000000FA", EndKey: "7480000000000000FF425F72C000000000FF0000000000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 6, StoreID: 1}}, Leader: helper.RegionPeer{ID: 6, StoreID: 1}, }, // table: 66, index: 1 { ID: 6, StartKey: "7480000000000000FF425F698000000000FF0000010000000000FA", EndKey: "7480000000000000FF425F698000000000FF0000020000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 7, StoreID: 1}}, Leader: helper.RegionPeer{ID: 7, StoreID: 1}, }, // table: 66, index: 2 { ID: 7, StartKey: "7480000000000000FF425F698000000000FF0000020000000000FA", EndKey: "7480000000000000FF425F698000000000FF0000030000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 8, StoreID: 1}}, Leader: helper.RegionPeer{ID: 8, StoreID: 1}, }, // merge region 7, 5 { ID: 8, StartKey: "7480000000000000FF425F698000000000FF0000020000000000FA", EndKey: "7480000000000000FF425F72C000000000FF0000000000000000FA", Epoch: helper.RegionEpoch{ConfVer: 1, Version: 1}, Peers: []helper.RegionPeer{{ID: 9, StoreID: 1}}, Leader: helper.RegionPeer{ID: 9, StoreID: 1}, }, } return &helper.RegionsInfo{ Count: int64(len(regions)), Regions: regions, } } func (s *HelperTestSuite) mockTiKVRegionsInfoResponse(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) resp := getMockTiKVRegionsInfo() data, err := json.MarshalIndent(resp, "", " ") if err != nil { log.Panic("json marshal failed", zap.Error(err)) } _, err = w.Write(data) if err != nil { log.Panic("write http response failed", zap.Error(err)) } } func (s *HelperTestSuite) mockStoreStatResponse(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) startTs, err := time.Parse(time.RFC3339, "2019-04-23T19:30:30+08:00") if err != nil { log.Panic("mock tikv store api response failed", zap.Error(err)) } lastHeartbeatTs, err := time.Parse(time.RFC3339, "2019-04-23T19:31:30+08:00") if err != nil { log.Panic("mock tikv store api response failed", zap.Error(err)) } storesStat := helper.StoresStat{ Count: 1, Stores: []helper.StoreStat{ { Store: helper.StoreBaseStat{ ID: 1, Address: "127.0.0.1:20160", State: 0, StateName: "Up", Version: "3.0.0-beta", Labels: []helper.StoreLabel{ { Key: "test", Value: "test", }, }, }, Status: helper.StoreDetailStat{ Capacity: "60 GiB", Available: "100 GiB", LeaderCount: 10, LeaderWeight: 1, LeaderScore: 1000, LeaderSize: 1000, RegionCount: 200, RegionWeight: 1, RegionScore: 1000, RegionSize: 1000, StartTs: startTs, LastHeartbeatTs: lastHeartbeatTs, Uptime: "1h30m", }, }, }, } data, err := json.MarshalIndent(storesStat, "", " ") if err != nil { log.Panic("json marshal failed", zap.Error(err)) } _, err = w.Write(data) if err != nil { log.Panic("write http response failed", zap.Error(err)) } }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tikv import ( "bytes" "container/list" "context" "fmt" "math" "sync" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) // ResolvedCacheSize is max number of cached txn status. const ResolvedCacheSize = 2048 // bigTxnThreshold : transaction involves keys exceed this threshold can be treated as `big transaction`. const bigTxnThreshold = 16 var ( tikvLockResolverCountWithBatchResolve = metrics.TiKVLockResolverCounter.WithLabelValues("batch_resolve") tikvLockResolverCountWithExpired = metrics.TiKVLockResolverCounter.WithLabelValues("expired") tikvLockResolverCountWithNotExpired = metrics.TiKVLockResolverCounter.WithLabelValues("not_expired") tikvLockResolverCountWithWaitExpired = metrics.TiKVLockResolverCounter.WithLabelValues("wait_expired") tikvLockResolverCountWithResolve = metrics.TiKVLockResolverCounter.WithLabelValues("resolve") tikvLockResolverCountWithResolveForWrite = metrics.TiKVLockResolverCounter.WithLabelValues("resolve_for_write") tikvLockResolverCountWithWriteConflict = metrics.TiKVLockResolverCounter.WithLabelValues("write_conflict") tikvLockResolverCountWithQueryTxnStatus = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status") tikvLockResolverCountWithQueryTxnStatusCommitted = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status_committed") tikvLockResolverCountWithQueryTxnStatusRolledBack = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status_rolled_back") tikvLockResolverCountWithResolveLocks = metrics.TiKVLockResolverCounter.WithLabelValues("query_resolve_locks") tikvLockResolverCountWithResolveLockLite = metrics.TiKVLockResolverCounter.WithLabelValues("query_resolve_lock_lite") ) // LockResolver resolves locks and also caches resolved txn status. type LockResolver struct { store Storage mu struct { sync.RWMutex // resolved caches resolved txns (FIFO, txn id -> txnStatus). resolved map[uint64]TxnStatus recentResolved *list.List } } func newLockResolver(store Storage) *LockResolver { r := &LockResolver{ store: store, } r.mu.resolved = make(map[uint64]TxnStatus) r.mu.recentResolved = list.New() return r } // NewLockResolver is exported for other pkg to use, suppress unused warning. var _ = NewLockResolver // NewLockResolver creates a LockResolver. // It is exported for other pkg to use. For instance, binlog service needs // to determine a transaction's commit state. func NewLockResolver(etcdAddrs []string, security config.Security, opts ...pd.ClientOption) (*LockResolver, error) { pdCli, err := pd.NewClient(etcdAddrs, pd.SecurityOption{ CAPath: security.ClusterSSLCA, CertPath: security.ClusterSSLCert, KeyPath: security.ClusterSSLKey, }, opts...) if err != nil { return nil, errors.Trace(err) } uuid := fmt.Sprintf("tikv-%v", pdCli.GetClusterID(context.TODO())) tlsConfig, err := security.ToTLSConfig() if err != nil { return nil, errors.Trace(err) } spkv, err := NewEtcdSafePointKV(etcdAddrs, tlsConfig) if err != nil { return nil, errors.Trace(err) } s, err := newTikvStore(uuid, &codecPDClient{pdCli}, spkv, newRPCClient(security), false, nil) if err != nil { return nil, errors.Trace(err) } return s.lockResolver, nil } // TxnStatus represents a txn's final status. It should be Lock or Commit or Rollback. type TxnStatus struct { ttl uint64 commitTS uint64 action kvrpcpb.Action } // IsCommitted returns true if the txn's final status is Commit. func (s TxnStatus) IsCommitted() bool { return s.ttl == 0 && s.commitTS > 0 } // CommitTS returns the txn's commitTS. It is valid iff `IsCommitted` is true. func (s TxnStatus) CommitTS() uint64 { return s.commitTS } // By default, locks after 3000ms is considered unusual (the client created the // lock might be dead). Other client may cleanup this kind of lock. // For locks created recently, we will do backoff and retry. var defaultLockTTL uint64 = 3000 // ttl = ttlFactor * sqrt(writeSizeInMiB) var ttlFactor = 6000 // Lock represents a lock from tikv server. type Lock struct { Key []byte Primary []byte TxnID uint64 TTL uint64 TxnSize uint64 LockType kvrpcpb.Op LockForUpdateTS uint64 } func (l *Lock) String() string { buf := bytes.NewBuffer(make([]byte, 0, 128)) buf.WriteString("key: ") prettyWriteKey(buf, l.Key) buf.WriteString(", primary: ") prettyWriteKey(buf, l.Primary) return fmt.Sprintf("%s, txnStartTS: %d, lockForUpdateTS:%d, ttl: %d, type: %s", buf.String(), l.TxnID, l.LockForUpdateTS, l.TTL, l.LockType) } // NewLock creates a new *Lock. func NewLock(l *kvrpcpb.LockInfo) *Lock { return &Lock{ Key: l.GetKey(), Primary: l.GetPrimaryLock(), TxnID: l.GetLockVersion(), TTL: l.GetLockTtl(), TxnSize: l.GetTxnSize(), LockType: l.LockType, LockForUpdateTS: l.LockForUpdateTs, } } func (lr *LockResolver) saveResolved(txnID uint64, status TxnStatus) { lr.mu.Lock() defer lr.mu.Unlock() if _, ok := lr.mu.resolved[txnID]; ok { return } lr.mu.resolved[txnID] = status lr.mu.recentResolved.PushBack(txnID) if len(lr.mu.resolved) > ResolvedCacheSize { front := lr.mu.recentResolved.Front() delete(lr.mu.resolved, front.Value.(uint64)) lr.mu.recentResolved.Remove(front) } } func (lr *LockResolver) getResolved(txnID uint64) (TxnStatus, bool) { lr.mu.RLock() defer lr.mu.RUnlock() s, ok := lr.mu.resolved[txnID] return s, ok } // BatchResolveLocks resolve locks in a batch. // Used it in gcworker only! func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc RegionVerID) (bool, error) { if len(locks) == 0 { return true, nil } tikvLockResolverCountWithBatchResolve.Inc() // The GCWorker kill all ongoing transactions, because it must make sure all // locks have been cleaned before GC. expiredLocks := locks callerStartTS, err := lr.store.GetOracle().GetTimestamp(bo.ctx) if err != nil { return false, errors.Trace(err) } txnInfos := make(map[uint64]uint64) startTime := time.Now() for _, l := range expiredLocks { if _, ok := txnInfos[l.TxnID]; ok { continue } tikvLockResolverCountWithExpired.Inc() // Use currentTS = math.MaxUint64 means rollback the txn, no matter the lock is expired or not! status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, math.MaxUint64, true) if err != nil { return false, err } if status.ttl > 0 { logutil.BgLogger().Error("BatchResolveLocks fail to clean locks, this result is not expected!") return false, errors.New("TiDB ask TiKV to rollback locks but it doesn't, the protocol maybe wrong") } txnInfos[l.TxnID] = status.commitTS } logutil.BgLogger().Info("BatchResolveLocks: lookup txn status", zap.Duration("cost time", time.Since(startTime)), zap.Int("num of txn", len(txnInfos))) listTxnInfos := make([]*kvrpcpb.TxnInfo, 0, len(txnInfos)) for txnID, status := range txnInfos { listTxnInfos = append(listTxnInfos, &kvrpcpb.TxnInfo{ Txn: txnID, Status: status, }) } req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, &kvrpcpb.ResolveLockRequest{TxnInfos: listTxnInfos}) startTime = time.Now() resp, err := lr.store.SendReq(bo, req, loc, readTimeoutShort) if err != nil { return false, errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return false, errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return false, errors.Trace(err) } return false, nil } if resp.Resp == nil { return false, errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.ResolveLockResponse) if keyErr := cmdResp.GetError(); keyErr != nil { return false, errors.Errorf("unexpected resolve err: %s", keyErr) } logutil.BgLogger().Info("BatchResolveLocks: resolve locks in a batch", zap.Duration("cost time", time.Since(startTime)), zap.Int("num of locks", len(expiredLocks))) return true, nil } // ResolveLocks tries to resolve Locks. The resolving process is in 3 steps: // 1) Use the `lockTTL` to pick up all expired locks. Only locks that are too // old are considered orphan locks and will be handled later. If all locks // are expired then all locks will be resolved so the returned `ok` will be // true, otherwise caller should sleep a while before retry. // 2) For each lock, query the primary key to get txn(which left the lock)'s // commit status. // 3) Send `ResolveLock` cmd to the lock's region to resolve all locks belong to // the same transaction. func (lr *LockResolver) ResolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, []uint64 /*pushed*/, error) { return lr.resolveLocks(bo, callerStartTS, locks, false, false) } func (lr *LockResolver) resolveLocksLite(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, []uint64 /*pushed*/, error) { return lr.resolveLocks(bo, callerStartTS, locks, false, true) } func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock, forWrite bool, lite bool) (int64, []uint64 /*pushed*/, error) { var msBeforeTxnExpired txnExpireTime if len(locks) == 0 { return msBeforeTxnExpired.value(), nil, nil } if forWrite { tikvLockResolverCountWithResolveForWrite.Inc() } else { tikvLockResolverCountWithResolve.Inc() } var pushFail bool // TxnID -> []Region, record resolved Regions. // TODO: Maybe put it in LockResolver and share by all txns. cleanTxns := make(map[uint64]map[RegionVerID]struct{}) var pushed []uint64 // pushed is only used in the read operation. if !forWrite { pushed = make([]uint64, 0, len(locks)) } for _, l := range locks { status, err := lr.getTxnStatusFromLock(bo, l, callerStartTS) if err != nil { msBeforeTxnExpired.update(0) err = errors.Trace(err) return msBeforeTxnExpired.value(), nil, err } if status.ttl == 0 { tikvLockResolverCountWithExpired.Inc() // If the lock is committed or rollbacked, resolve lock. cleanRegions, exists := cleanTxns[l.TxnID] if !exists { cleanRegions = make(map[RegionVerID]struct{}) cleanTxns[l.TxnID] = cleanRegions } if l.LockType == kvrpcpb.Op_PessimisticLock { err = lr.resolvePessimisticLock(bo, l, cleanRegions) } else { err = lr.resolveLock(bo, l, status, lite, cleanRegions) } if err != nil { msBeforeTxnExpired.update(0) err = errors.Trace(err) return msBeforeTxnExpired.value(), nil, err } } else { tikvLockResolverCountWithNotExpired.Inc() // If the lock is valid, the txn may be a pessimistic transaction. // Update the txn expire time. msBeforeLockExpired := lr.store.GetOracle().UntilExpired(l.TxnID, status.ttl) msBeforeTxnExpired.update(msBeforeLockExpired) if forWrite { // Write conflict detected! // If it's a optimistic conflict and current txn is earlier than the lock owner, // abort current transaction. // This could avoids the deadlock scene of two large transaction. if l.LockType != kvrpcpb.Op_PessimisticLock && l.TxnID > callerStartTS { tikvLockResolverCountWithWriteConflict.Inc() return msBeforeTxnExpired.value(), nil, kv.ErrWriteConflict.GenWithStackByArgs(callerStartTS, l.TxnID, status.commitTS, l.Key) } } else { if status.action != kvrpcpb.Action_MinCommitTSPushed { pushFail = true continue } pushed = append(pushed, l.TxnID) } } } if pushFail { // If any of the lock fails to push minCommitTS, don't return the pushed array. pushed = nil } if msBeforeTxnExpired.value() > 0 && len(pushed) == 0 { // If len(pushed) > 0, the caller will not block on the locks, it push the minCommitTS instead. tikvLockResolverCountWithWaitExpired.Inc() } return msBeforeTxnExpired.value(), pushed, nil } func (lr *LockResolver) resolveLocksForWrite(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, error) { msBeforeTxnExpired, _, err := lr.resolveLocks(bo, callerStartTS, locks, true, false) return msBeforeTxnExpired, err } type txnExpireTime struct { initialized bool txnExpire int64 } func (t *txnExpireTime) update(lockExpire int64) { if lockExpire <= 0 { lockExpire = 0 } if !t.initialized { t.txnExpire = lockExpire t.initialized = true return } if lockExpire < t.txnExpire { t.txnExpire = lockExpire } } func (t *txnExpireTime) value() int64 { if !t.initialized { return 0 } return t.txnExpire } // GetTxnStatus queries tikv-server for a txn's status (commit/rollback). // If the primary key is still locked, it will launch a Rollback to abort it. // To avoid unnecessarily aborting too many txns, it is wiser to wait a few // seconds before calling it after Prewrite. func (lr *LockResolver) GetTxnStatus(txnID uint64, callerStartTS uint64, primary []byte) (TxnStatus, error) { var status TxnStatus bo := NewBackoffer(context.Background(), cleanupMaxBackoff) currentTS, err := lr.store.GetOracle().GetLowResolutionTimestamp(bo.ctx) if err != nil { return status, err } return lr.getTxnStatus(bo, txnID, primary, callerStartTS, currentTS, true) } func (lr *LockResolver) getTxnStatusFromLock(bo *Backoffer, l *Lock, callerStartTS uint64) (TxnStatus, error) { var currentTS uint64 var err error var status TxnStatus if l.TTL == 0 { // NOTE: l.TTL = 0 is a special protocol!!! // When the pessimistic txn prewrite meets locks of a txn, it should resolve the lock **unconditionally**. // In this case, TiKV use lock TTL = 0 to notify TiDB, and TiDB should resolve the lock! // Set currentTS to max uint64 to make the lock expired. currentTS = math.MaxUint64 } else { currentTS, err = lr.store.GetOracle().GetLowResolutionTimestamp(bo.ctx) if err != nil { return TxnStatus{}, err } } rollbackIfNotExist := false failpoint.Inject("getTxnStatusDelay", func() { time.Sleep(100 * time.Millisecond) }) for { status, err = lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, currentTS, rollbackIfNotExist) if err == nil { return status, nil } // If the error is something other than txnNotFoundErr, throw the error (network // unavailable, tikv down, backoff timeout etc) to the caller. if _, ok := errors.Cause(err).(txnNotFoundErr); !ok { return TxnStatus{}, err } failpoint.Inject("txnNotFoundRetTTL", func() { failpoint.Return(TxnStatus{l.TTL, 0, kvrpcpb.Action_NoAction}, nil) }) // Handle txnNotFound error. // getTxnStatus() returns it when the secondary locks exist while the primary lock doesn't. // This is likely to happen in the concurrently prewrite when secondary regions // success before the primary region. if err := bo.Backoff(boTxnNotFound, err); err != nil { logutil.Logger(bo.ctx).Warn("getTxnStatusFromLock backoff fail", zap.Error(err)) } if lr.store.GetOracle().UntilExpired(l.TxnID, l.TTL) <= 0 { logutil.Logger(bo.ctx).Warn("lock txn not found, lock has expired", zap.Uint64("CallerStartTs", callerStartTS), zap.Stringer("lock str", l)) if l.LockType == kvrpcpb.Op_PessimisticLock { failpoint.Inject("txnExpireRetTTL", func() { failpoint.Return(TxnStatus{l.TTL, 0, kvrpcpb.Action_NoAction}, errors.New("error txn not found and lock expired")) }) return TxnStatus{}, nil } rollbackIfNotExist = true } else { if l.LockType == kvrpcpb.Op_PessimisticLock { return TxnStatus{ttl: l.TTL}, nil } } } } type txnNotFoundErr struct { *kvrpcpb.TxnNotFound } func (e txnNotFoundErr) Error() string { return e.TxnNotFound.String() } // getTxnStatus sends the CheckTxnStatus request to the TiKV server. // When rollbackIfNotExist is false, the caller should be careful with the txnNotFoundErr error. func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte, callerStartTS, currentTS uint64, rollbackIfNotExist bool) (TxnStatus, error) { if s, ok := lr.getResolved(txnID); ok { return s, nil } tikvLockResolverCountWithQueryTxnStatus.Inc() // CheckTxnStatus may meet the following cases: // 1. LOCK // 1.1 Lock expired -- orphan lock, fail to update TTL, crash recovery etc. // 1.2 Lock TTL -- active transaction holding the lock. // 2. NO LOCK // 2.1 Txn Committed // 2.2 Txn Rollbacked -- rollback itself, rollback by others, GC tomb etc. // 2.3 No lock -- pessimistic lock rollback, concurrence prewrite. var status TxnStatus req := tikvrpc.NewRequest(tikvrpc.CmdCheckTxnStatus, &kvrpcpb.CheckTxnStatusRequest{ PrimaryKey: primary, LockTs: txnID, CallerStartTs: callerStartTS, CurrentTs: currentTS, RollbackIfNotExist: rollbackIfNotExist, }) for { loc, err := lr.store.GetRegionCache().LocateKey(bo, primary) if err != nil { return status, errors.Trace(err) } resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) if err != nil { return status, errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return status, errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return status, errors.Trace(err) } continue } if resp.Resp == nil { return status, errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.CheckTxnStatusResponse) if keyErr := cmdResp.GetError(); keyErr != nil { txnNotFound := keyErr.GetTxnNotFound() if txnNotFound != nil { return status, txnNotFoundErr{txnNotFound} } err = errors.Errorf("unexpected err: %s, tid: %v", keyErr, txnID) logutil.BgLogger().Error("getTxnStatus error", zap.Error(err)) return status, err } status.action = cmdResp.Action if cmdResp.LockTtl != 0 { status.ttl = cmdResp.LockTtl } else { if cmdResp.CommitVersion == 0 { tikvLockResolverCountWithQueryTxnStatusRolledBack.Inc() } else { tikvLockResolverCountWithQueryTxnStatusCommitted.Inc() } status.commitTS = cmdResp.CommitVersion lr.saveResolved(txnID, status) } return status, nil } } func (lr *LockResolver) resolveLock(bo *Backoffer, l *Lock, status TxnStatus, lite bool, cleanRegions map[RegionVerID]struct{}) error { tikvLockResolverCountWithResolveLocks.Inc() resolveLite := lite || l.TxnSize < bigTxnThreshold for { loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) if err != nil { return errors.Trace(err) } if _, ok := cleanRegions[loc.Region]; ok { return nil } lreq := &kvrpcpb.ResolveLockRequest{ StartVersion: l.TxnID, } if status.IsCommitted() { lreq.CommitVersion = status.CommitTS() } if resolveLite { // Only resolve specified keys when it is a small transaction, // prevent from scanning the whole region in this case. tikvLockResolverCountWithResolveLockLite.Inc() lreq.Keys = [][]byte{l.Key} if !status.IsCommitted() { logutil.BgLogger().Info("resolveLock rollback", zap.String("lock", l.String())) } } req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, lreq) resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) if err != nil { return errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } continue } if resp.Resp == nil { return errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.ResolveLockResponse) if keyErr := cmdResp.GetError(); keyErr != nil { err = errors.Errorf("unexpected resolve err: %s, lock: %v", keyErr, l) logutil.BgLogger().Error("resolveLock error", zap.Error(err)) return err } if !resolveLite { cleanRegions[loc.Region] = struct{}{} } return nil } } func (lr *LockResolver) resolvePessimisticLock(bo *Backoffer, l *Lock, cleanRegions map[RegionVerID]struct{}) error { tikvLockResolverCountWithResolveLocks.Inc() for { loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) if err != nil { return errors.Trace(err) } if _, ok := cleanRegions[loc.Region]; ok { return nil } forUpdateTS := l.LockForUpdateTS if forUpdateTS == 0 { forUpdateTS = math.MaxUint64 } pessimisticRollbackReq := &kvrpcpb.PessimisticRollbackRequest{ StartVersion: l.TxnID, ForUpdateTs: forUpdateTS, Keys: [][]byte{l.Key}, } req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, pessimisticRollbackReq) resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) if err != nil { return errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } continue } if resp.Resp == nil { return errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.PessimisticRollbackResponse) if keyErr := cmdResp.GetErrors(); len(keyErr) > 0 { err = errors.Errorf("unexpected resolve pessimistic lock err: %s, lock: %v", keyErr[0], l) logutil.Logger(bo.ctx).Error("resolveLock error", zap.Error(err)) return err } return nil } } Expose all fields of TxnStatus (#17582) Co-authored-by: MyonKeminta <f61fcbf0b1b8e66e29bfd79a313ac22303163882@users.noreply.github.com> Co-authored-by: pingcap-github-bot <02289d1d97168591eb26d2551f7cc1b10910d341@pingcap.com> Co-authored-by: lysu <456460f9c62282f2a0fbdf1594101f151916955e@gmail.com> // Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tikv import ( "bytes" "container/list" "context" "fmt" "math" "sync" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) // ResolvedCacheSize is max number of cached txn status. const ResolvedCacheSize = 2048 // bigTxnThreshold : transaction involves keys exceed this threshold can be treated as `big transaction`. const bigTxnThreshold = 16 var ( tikvLockResolverCountWithBatchResolve = metrics.TiKVLockResolverCounter.WithLabelValues("batch_resolve") tikvLockResolverCountWithExpired = metrics.TiKVLockResolverCounter.WithLabelValues("expired") tikvLockResolverCountWithNotExpired = metrics.TiKVLockResolverCounter.WithLabelValues("not_expired") tikvLockResolverCountWithWaitExpired = metrics.TiKVLockResolverCounter.WithLabelValues("wait_expired") tikvLockResolverCountWithResolve = metrics.TiKVLockResolverCounter.WithLabelValues("resolve") tikvLockResolverCountWithResolveForWrite = metrics.TiKVLockResolverCounter.WithLabelValues("resolve_for_write") tikvLockResolverCountWithWriteConflict = metrics.TiKVLockResolverCounter.WithLabelValues("write_conflict") tikvLockResolverCountWithQueryTxnStatus = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status") tikvLockResolverCountWithQueryTxnStatusCommitted = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status_committed") tikvLockResolverCountWithQueryTxnStatusRolledBack = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status_rolled_back") tikvLockResolverCountWithResolveLocks = metrics.TiKVLockResolverCounter.WithLabelValues("query_resolve_locks") tikvLockResolverCountWithResolveLockLite = metrics.TiKVLockResolverCounter.WithLabelValues("query_resolve_lock_lite") ) // LockResolver resolves locks and also caches resolved txn status. type LockResolver struct { store Storage mu struct { sync.RWMutex // resolved caches resolved txns (FIFO, txn id -> txnStatus). resolved map[uint64]TxnStatus recentResolved *list.List } } func newLockResolver(store Storage) *LockResolver { r := &LockResolver{ store: store, } r.mu.resolved = make(map[uint64]TxnStatus) r.mu.recentResolved = list.New() return r } // NewLockResolver is exported for other pkg to use, suppress unused warning. var _ = NewLockResolver // NewLockResolver creates a LockResolver. // It is exported for other pkg to use. For instance, binlog service needs // to determine a transaction's commit state. func NewLockResolver(etcdAddrs []string, security config.Security, opts ...pd.ClientOption) (*LockResolver, error) { pdCli, err := pd.NewClient(etcdAddrs, pd.SecurityOption{ CAPath: security.ClusterSSLCA, CertPath: security.ClusterSSLCert, KeyPath: security.ClusterSSLKey, }, opts...) if err != nil { return nil, errors.Trace(err) } uuid := fmt.Sprintf("tikv-%v", pdCli.GetClusterID(context.TODO())) tlsConfig, err := security.ToTLSConfig() if err != nil { return nil, errors.Trace(err) } spkv, err := NewEtcdSafePointKV(etcdAddrs, tlsConfig) if err != nil { return nil, errors.Trace(err) } s, err := newTikvStore(uuid, &codecPDClient{pdCli}, spkv, newRPCClient(security), false, nil) if err != nil { return nil, errors.Trace(err) } return s.lockResolver, nil } // TxnStatus represents a txn's final status. It should be Lock or Commit or Rollback. type TxnStatus struct { ttl uint64 commitTS uint64 action kvrpcpb.Action } // IsCommitted returns true if the txn's final status is Commit. func (s TxnStatus) IsCommitted() bool { return s.ttl == 0 && s.commitTS > 0 } // CommitTS returns the txn's commitTS. It is valid iff `IsCommitted` is true. func (s TxnStatus) CommitTS() uint64 { return s.commitTS } // TTL returns the TTL of the transaction if the transaction is still alive. func (s TxnStatus) TTL() uint64 { return s.ttl } // Action returns what the CheckTxnStatus request have done to the transaction. func (s TxnStatus) Action() kvrpcpb.Action { return s.action } // By default, locks after 3000ms is considered unusual (the client created the // lock might be dead). Other client may cleanup this kind of lock. // For locks created recently, we will do backoff and retry. var defaultLockTTL uint64 = 3000 // ttl = ttlFactor * sqrt(writeSizeInMiB) var ttlFactor = 6000 // Lock represents a lock from tikv server. type Lock struct { Key []byte Primary []byte TxnID uint64 TTL uint64 TxnSize uint64 LockType kvrpcpb.Op LockForUpdateTS uint64 } func (l *Lock) String() string { buf := bytes.NewBuffer(make([]byte, 0, 128)) buf.WriteString("key: ") prettyWriteKey(buf, l.Key) buf.WriteString(", primary: ") prettyWriteKey(buf, l.Primary) return fmt.Sprintf("%s, txnStartTS: %d, lockForUpdateTS:%d, ttl: %d, type: %s", buf.String(), l.TxnID, l.LockForUpdateTS, l.TTL, l.LockType) } // NewLock creates a new *Lock. func NewLock(l *kvrpcpb.LockInfo) *Lock { return &Lock{ Key: l.GetKey(), Primary: l.GetPrimaryLock(), TxnID: l.GetLockVersion(), TTL: l.GetLockTtl(), TxnSize: l.GetTxnSize(), LockType: l.LockType, LockForUpdateTS: l.LockForUpdateTs, } } func (lr *LockResolver) saveResolved(txnID uint64, status TxnStatus) { lr.mu.Lock() defer lr.mu.Unlock() if _, ok := lr.mu.resolved[txnID]; ok { return } lr.mu.resolved[txnID] = status lr.mu.recentResolved.PushBack(txnID) if len(lr.mu.resolved) > ResolvedCacheSize { front := lr.mu.recentResolved.Front() delete(lr.mu.resolved, front.Value.(uint64)) lr.mu.recentResolved.Remove(front) } } func (lr *LockResolver) getResolved(txnID uint64) (TxnStatus, bool) { lr.mu.RLock() defer lr.mu.RUnlock() s, ok := lr.mu.resolved[txnID] return s, ok } // BatchResolveLocks resolve locks in a batch. // Used it in gcworker only! func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc RegionVerID) (bool, error) { if len(locks) == 0 { return true, nil } tikvLockResolverCountWithBatchResolve.Inc() // The GCWorker kill all ongoing transactions, because it must make sure all // locks have been cleaned before GC. expiredLocks := locks callerStartTS, err := lr.store.GetOracle().GetTimestamp(bo.ctx) if err != nil { return false, errors.Trace(err) } txnInfos := make(map[uint64]uint64) startTime := time.Now() for _, l := range expiredLocks { if _, ok := txnInfos[l.TxnID]; ok { continue } tikvLockResolverCountWithExpired.Inc() // Use currentTS = math.MaxUint64 means rollback the txn, no matter the lock is expired or not! status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, math.MaxUint64, true) if err != nil { return false, err } if status.ttl > 0 { logutil.BgLogger().Error("BatchResolveLocks fail to clean locks, this result is not expected!") return false, errors.New("TiDB ask TiKV to rollback locks but it doesn't, the protocol maybe wrong") } txnInfos[l.TxnID] = status.commitTS } logutil.BgLogger().Info("BatchResolveLocks: lookup txn status", zap.Duration("cost time", time.Since(startTime)), zap.Int("num of txn", len(txnInfos))) listTxnInfos := make([]*kvrpcpb.TxnInfo, 0, len(txnInfos)) for txnID, status := range txnInfos { listTxnInfos = append(listTxnInfos, &kvrpcpb.TxnInfo{ Txn: txnID, Status: status, }) } req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, &kvrpcpb.ResolveLockRequest{TxnInfos: listTxnInfos}) startTime = time.Now() resp, err := lr.store.SendReq(bo, req, loc, readTimeoutShort) if err != nil { return false, errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return false, errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return false, errors.Trace(err) } return false, nil } if resp.Resp == nil { return false, errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.ResolveLockResponse) if keyErr := cmdResp.GetError(); keyErr != nil { return false, errors.Errorf("unexpected resolve err: %s", keyErr) } logutil.BgLogger().Info("BatchResolveLocks: resolve locks in a batch", zap.Duration("cost time", time.Since(startTime)), zap.Int("num of locks", len(expiredLocks))) return true, nil } // ResolveLocks tries to resolve Locks. The resolving process is in 3 steps: // 1) Use the `lockTTL` to pick up all expired locks. Only locks that are too // old are considered orphan locks and will be handled later. If all locks // are expired then all locks will be resolved so the returned `ok` will be // true, otherwise caller should sleep a while before retry. // 2) For each lock, query the primary key to get txn(which left the lock)'s // commit status. // 3) Send `ResolveLock` cmd to the lock's region to resolve all locks belong to // the same transaction. func (lr *LockResolver) ResolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, []uint64 /*pushed*/, error) { return lr.resolveLocks(bo, callerStartTS, locks, false, false) } func (lr *LockResolver) resolveLocksLite(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, []uint64 /*pushed*/, error) { return lr.resolveLocks(bo, callerStartTS, locks, false, true) } func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock, forWrite bool, lite bool) (int64, []uint64 /*pushed*/, error) { var msBeforeTxnExpired txnExpireTime if len(locks) == 0 { return msBeforeTxnExpired.value(), nil, nil } if forWrite { tikvLockResolverCountWithResolveForWrite.Inc() } else { tikvLockResolverCountWithResolve.Inc() } var pushFail bool // TxnID -> []Region, record resolved Regions. // TODO: Maybe put it in LockResolver and share by all txns. cleanTxns := make(map[uint64]map[RegionVerID]struct{}) var pushed []uint64 // pushed is only used in the read operation. if !forWrite { pushed = make([]uint64, 0, len(locks)) } for _, l := range locks { status, err := lr.getTxnStatusFromLock(bo, l, callerStartTS) if err != nil { msBeforeTxnExpired.update(0) err = errors.Trace(err) return msBeforeTxnExpired.value(), nil, err } if status.ttl == 0 { tikvLockResolverCountWithExpired.Inc() // If the lock is committed or rollbacked, resolve lock. cleanRegions, exists := cleanTxns[l.TxnID] if !exists { cleanRegions = make(map[RegionVerID]struct{}) cleanTxns[l.TxnID] = cleanRegions } if l.LockType == kvrpcpb.Op_PessimisticLock { err = lr.resolvePessimisticLock(bo, l, cleanRegions) } else { err = lr.resolveLock(bo, l, status, lite, cleanRegions) } if err != nil { msBeforeTxnExpired.update(0) err = errors.Trace(err) return msBeforeTxnExpired.value(), nil, err } } else { tikvLockResolverCountWithNotExpired.Inc() // If the lock is valid, the txn may be a pessimistic transaction. // Update the txn expire time. msBeforeLockExpired := lr.store.GetOracle().UntilExpired(l.TxnID, status.ttl) msBeforeTxnExpired.update(msBeforeLockExpired) if forWrite { // Write conflict detected! // If it's a optimistic conflict and current txn is earlier than the lock owner, // abort current transaction. // This could avoids the deadlock scene of two large transaction. if l.LockType != kvrpcpb.Op_PessimisticLock && l.TxnID > callerStartTS { tikvLockResolverCountWithWriteConflict.Inc() return msBeforeTxnExpired.value(), nil, kv.ErrWriteConflict.GenWithStackByArgs(callerStartTS, l.TxnID, status.commitTS, l.Key) } } else { if status.action != kvrpcpb.Action_MinCommitTSPushed { pushFail = true continue } pushed = append(pushed, l.TxnID) } } } if pushFail { // If any of the lock fails to push minCommitTS, don't return the pushed array. pushed = nil } if msBeforeTxnExpired.value() > 0 && len(pushed) == 0 { // If len(pushed) > 0, the caller will not block on the locks, it push the minCommitTS instead. tikvLockResolverCountWithWaitExpired.Inc() } return msBeforeTxnExpired.value(), pushed, nil } func (lr *LockResolver) resolveLocksForWrite(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, error) { msBeforeTxnExpired, _, err := lr.resolveLocks(bo, callerStartTS, locks, true, false) return msBeforeTxnExpired, err } type txnExpireTime struct { initialized bool txnExpire int64 } func (t *txnExpireTime) update(lockExpire int64) { if lockExpire <= 0 { lockExpire = 0 } if !t.initialized { t.txnExpire = lockExpire t.initialized = true return } if lockExpire < t.txnExpire { t.txnExpire = lockExpire } } func (t *txnExpireTime) value() int64 { if !t.initialized { return 0 } return t.txnExpire } // GetTxnStatus queries tikv-server for a txn's status (commit/rollback). // If the primary key is still locked, it will launch a Rollback to abort it. // To avoid unnecessarily aborting too many txns, it is wiser to wait a few // seconds before calling it after Prewrite. func (lr *LockResolver) GetTxnStatus(txnID uint64, callerStartTS uint64, primary []byte) (TxnStatus, error) { var status TxnStatus bo := NewBackoffer(context.Background(), cleanupMaxBackoff) currentTS, err := lr.store.GetOracle().GetLowResolutionTimestamp(bo.ctx) if err != nil { return status, err } return lr.getTxnStatus(bo, txnID, primary, callerStartTS, currentTS, true) } func (lr *LockResolver) getTxnStatusFromLock(bo *Backoffer, l *Lock, callerStartTS uint64) (TxnStatus, error) { var currentTS uint64 var err error var status TxnStatus if l.TTL == 0 { // NOTE: l.TTL = 0 is a special protocol!!! // When the pessimistic txn prewrite meets locks of a txn, it should resolve the lock **unconditionally**. // In this case, TiKV use lock TTL = 0 to notify TiDB, and TiDB should resolve the lock! // Set currentTS to max uint64 to make the lock expired. currentTS = math.MaxUint64 } else { currentTS, err = lr.store.GetOracle().GetLowResolutionTimestamp(bo.ctx) if err != nil { return TxnStatus{}, err } } rollbackIfNotExist := false failpoint.Inject("getTxnStatusDelay", func() { time.Sleep(100 * time.Millisecond) }) for { status, err = lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, currentTS, rollbackIfNotExist) if err == nil { return status, nil } // If the error is something other than txnNotFoundErr, throw the error (network // unavailable, tikv down, backoff timeout etc) to the caller. if _, ok := errors.Cause(err).(txnNotFoundErr); !ok { return TxnStatus{}, err } failpoint.Inject("txnNotFoundRetTTL", func() { failpoint.Return(TxnStatus{l.TTL, 0, kvrpcpb.Action_NoAction}, nil) }) // Handle txnNotFound error. // getTxnStatus() returns it when the secondary locks exist while the primary lock doesn't. // This is likely to happen in the concurrently prewrite when secondary regions // success before the primary region. if err := bo.Backoff(boTxnNotFound, err); err != nil { logutil.Logger(bo.ctx).Warn("getTxnStatusFromLock backoff fail", zap.Error(err)) } if lr.store.GetOracle().UntilExpired(l.TxnID, l.TTL) <= 0 { logutil.Logger(bo.ctx).Warn("lock txn not found, lock has expired", zap.Uint64("CallerStartTs", callerStartTS), zap.Stringer("lock str", l)) if l.LockType == kvrpcpb.Op_PessimisticLock { failpoint.Inject("txnExpireRetTTL", func() { failpoint.Return(TxnStatus{l.TTL, 0, kvrpcpb.Action_NoAction}, errors.New("error txn not found and lock expired")) }) return TxnStatus{}, nil } rollbackIfNotExist = true } else { if l.LockType == kvrpcpb.Op_PessimisticLock { return TxnStatus{ttl: l.TTL}, nil } } } } type txnNotFoundErr struct { *kvrpcpb.TxnNotFound } func (e txnNotFoundErr) Error() string { return e.TxnNotFound.String() } // getTxnStatus sends the CheckTxnStatus request to the TiKV server. // When rollbackIfNotExist is false, the caller should be careful with the txnNotFoundErr error. func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte, callerStartTS, currentTS uint64, rollbackIfNotExist bool) (TxnStatus, error) { if s, ok := lr.getResolved(txnID); ok { return s, nil } tikvLockResolverCountWithQueryTxnStatus.Inc() // CheckTxnStatus may meet the following cases: // 1. LOCK // 1.1 Lock expired -- orphan lock, fail to update TTL, crash recovery etc. // 1.2 Lock TTL -- active transaction holding the lock. // 2. NO LOCK // 2.1 Txn Committed // 2.2 Txn Rollbacked -- rollback itself, rollback by others, GC tomb etc. // 2.3 No lock -- pessimistic lock rollback, concurrence prewrite. var status TxnStatus req := tikvrpc.NewRequest(tikvrpc.CmdCheckTxnStatus, &kvrpcpb.CheckTxnStatusRequest{ PrimaryKey: primary, LockTs: txnID, CallerStartTs: callerStartTS, CurrentTs: currentTS, RollbackIfNotExist: rollbackIfNotExist, }) for { loc, err := lr.store.GetRegionCache().LocateKey(bo, primary) if err != nil { return status, errors.Trace(err) } resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) if err != nil { return status, errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return status, errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return status, errors.Trace(err) } continue } if resp.Resp == nil { return status, errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.CheckTxnStatusResponse) if keyErr := cmdResp.GetError(); keyErr != nil { txnNotFound := keyErr.GetTxnNotFound() if txnNotFound != nil { return status, txnNotFoundErr{txnNotFound} } err = errors.Errorf("unexpected err: %s, tid: %v", keyErr, txnID) logutil.BgLogger().Error("getTxnStatus error", zap.Error(err)) return status, err } status.action = cmdResp.Action if cmdResp.LockTtl != 0 { status.ttl = cmdResp.LockTtl } else { if cmdResp.CommitVersion == 0 { tikvLockResolverCountWithQueryTxnStatusRolledBack.Inc() } else { tikvLockResolverCountWithQueryTxnStatusCommitted.Inc() } status.commitTS = cmdResp.CommitVersion lr.saveResolved(txnID, status) } return status, nil } } func (lr *LockResolver) resolveLock(bo *Backoffer, l *Lock, status TxnStatus, lite bool, cleanRegions map[RegionVerID]struct{}) error { tikvLockResolverCountWithResolveLocks.Inc() resolveLite := lite || l.TxnSize < bigTxnThreshold for { loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) if err != nil { return errors.Trace(err) } if _, ok := cleanRegions[loc.Region]; ok { return nil } lreq := &kvrpcpb.ResolveLockRequest{ StartVersion: l.TxnID, } if status.IsCommitted() { lreq.CommitVersion = status.CommitTS() } if resolveLite { // Only resolve specified keys when it is a small transaction, // prevent from scanning the whole region in this case. tikvLockResolverCountWithResolveLockLite.Inc() lreq.Keys = [][]byte{l.Key} if !status.IsCommitted() { logutil.BgLogger().Info("resolveLock rollback", zap.String("lock", l.String())) } } req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, lreq) resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) if err != nil { return errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } continue } if resp.Resp == nil { return errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.ResolveLockResponse) if keyErr := cmdResp.GetError(); keyErr != nil { err = errors.Errorf("unexpected resolve err: %s, lock: %v", keyErr, l) logutil.BgLogger().Error("resolveLock error", zap.Error(err)) return err } if !resolveLite { cleanRegions[loc.Region] = struct{}{} } return nil } } func (lr *LockResolver) resolvePessimisticLock(bo *Backoffer, l *Lock, cleanRegions map[RegionVerID]struct{}) error { tikvLockResolverCountWithResolveLocks.Inc() for { loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) if err != nil { return errors.Trace(err) } if _, ok := cleanRegions[loc.Region]; ok { return nil } forUpdateTS := l.LockForUpdateTS if forUpdateTS == 0 { forUpdateTS = math.MaxUint64 } pessimisticRollbackReq := &kvrpcpb.PessimisticRollbackRequest{ StartVersion: l.TxnID, ForUpdateTs: forUpdateTS, Keys: [][]byte{l.Key}, } req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, pessimisticRollbackReq) resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) if err != nil { return errors.Trace(err) } regionErr, err := resp.GetRegionError() if err != nil { return errors.Trace(err) } if regionErr != nil { err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } continue } if resp.Resp == nil { return errors.Trace(ErrBodyMissing) } cmdResp := resp.Resp.(*kvrpcpb.PessimisticRollbackResponse) if keyErr := cmdResp.GetErrors(); len(keyErr) > 0 { err = errors.Errorf("unexpected resolve pessimistic lock err: %s, lock: %v", keyErr[0], l) logutil.Logger(bo.ctx).Error("resolveLock error", zap.Error(err)) return err } return nil } }
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ratelimit_test import ( "errors" "io" "testing" "golang.org/x/net/context" "github.com/googlecloudplatform/gcsfuse/ratelimit" . "github.com/jacobsa/oglematchers" . "github.com/jacobsa/ogletest" ) func TestThrottledReader(t *testing.T) { RunTests(t) } //////////////////////////////////////////////////////////////////////// // Helpers //////////////////////////////////////////////////////////////////////// // An io.Reader that defers to a function. type funcReader struct { f func([]byte) (int, error) } func (fr *funcReader) Read(p []byte) (n int, err error) { n, err = fr.f(p) return } // A throttler that defers to a function. type funcThrottle struct { f func(context.Context, uint64) bool } func (ft *funcThrottle) Capacity() (c uint64) { return 1024 } func (ft *funcThrottle) Wait( ctx context.Context, tokens uint64) (ok bool) { ok = ft.f(ctx, tokens) return } //////////////////////////////////////////////////////////////////////// // Boilerplate //////////////////////////////////////////////////////////////////////// type ThrottledReaderTest struct { ctx context.Context wrapped funcReader throttle funcThrottle reader io.Reader } var _ SetUpInterface = &ThrottledReaderTest{} func init() { RegisterTestSuite(&ThrottledReaderTest{}) } func (t *ThrottledReaderTest) SetUp(ti *TestInfo) { t.ctx = ti.Ctx // Set up the default throttle function. t.throttle.f = func(ctx context.Context, tokens uint64) (ok bool) { ok = true return } // Set up the reader. t.reader = ratelimit.ThrottledReader(t.ctx, &t.wrapped, &t.throttle) } //////////////////////////////////////////////////////////////////////// // Tests //////////////////////////////////////////////////////////////////////// func (t *ThrottledReaderTest) CallsThrottle() { const readSize = 17 AssertLe(readSize, t.throttle.Capacity()) // Throttle var throttleCalled bool t.throttle.f = func(ctx context.Context, tokens uint64) (ok bool) { AssertFalse(throttleCalled) throttleCalled = true AssertEq(t.ctx, ctx) AssertEq(readSize, tokens) return } // Call t.reader.Read(make([]byte, readSize)) ExpectTrue(throttleCalled) } func (t *ThrottledReaderTest) ThrottleSaysCancelled() { // Throttle t.throttle.f = func(ctx context.Context, tokens uint64) (ok bool) { return } // Call n, err := t.reader.Read(make([]byte, 1)) ExpectEq(0, n) ExpectThat(err, Error(HasSubstr("throttle"))) ExpectThat(err, Error(HasSubstr("cancel"))) } func (t *ThrottledReaderTest) CallsWrapped() { buf := make([]byte, 16) AssertLe(len(buf), t.throttle.Capacity()) // Wrapped var readCalled bool t.wrapped.f = func(p []byte) (n int, err error) { AssertFalse(readCalled) readCalled = true AssertEq(buf, p) err = errors.New("") return } // Call t.reader.Read(buf) ExpectTrue(readCalled) } func (t *ThrottledReaderTest) WrappedReturnsError() { // Wrapped expectedErr := errors.New("taco") t.wrapped.f = func(p []byte) (n int, err error) { n = 11 err = expectedErr return } // Call n, err := t.reader.Read(make([]byte, 16)) ExpectEq(11, n) ExpectEq(expectedErr, err) } func (t *ThrottledReaderTest) WrappedReturnsEOF() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsFullRead() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsShortRead_CallsAgain() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondFails() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondSuceeds() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) ReadSizeIsAboveThrottleCapacity() { AssertTrue(false, "TODO") } ThrottledReaderTest.WrappedReturnsEOF // Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ratelimit_test import ( "errors" "io" "testing" "golang.org/x/net/context" "github.com/googlecloudplatform/gcsfuse/ratelimit" . "github.com/jacobsa/oglematchers" . "github.com/jacobsa/ogletest" ) func TestThrottledReader(t *testing.T) { RunTests(t) } //////////////////////////////////////////////////////////////////////// // Helpers //////////////////////////////////////////////////////////////////////// // An io.Reader that defers to a function. type funcReader struct { f func([]byte) (int, error) } func (fr *funcReader) Read(p []byte) (n int, err error) { n, err = fr.f(p) return } // A throttler that defers to a function. type funcThrottle struct { f func(context.Context, uint64) bool } func (ft *funcThrottle) Capacity() (c uint64) { return 1024 } func (ft *funcThrottle) Wait( ctx context.Context, tokens uint64) (ok bool) { ok = ft.f(ctx, tokens) return } //////////////////////////////////////////////////////////////////////// // Boilerplate //////////////////////////////////////////////////////////////////////// type ThrottledReaderTest struct { ctx context.Context wrapped funcReader throttle funcThrottle reader io.Reader } var _ SetUpInterface = &ThrottledReaderTest{} func init() { RegisterTestSuite(&ThrottledReaderTest{}) } func (t *ThrottledReaderTest) SetUp(ti *TestInfo) { t.ctx = ti.Ctx // Set up the default throttle function. t.throttle.f = func(ctx context.Context, tokens uint64) (ok bool) { ok = true return } // Set up the reader. t.reader = ratelimit.ThrottledReader(t.ctx, &t.wrapped, &t.throttle) } //////////////////////////////////////////////////////////////////////// // Tests //////////////////////////////////////////////////////////////////////// func (t *ThrottledReaderTest) CallsThrottle() { const readSize = 17 AssertLe(readSize, t.throttle.Capacity()) // Throttle var throttleCalled bool t.throttle.f = func(ctx context.Context, tokens uint64) (ok bool) { AssertFalse(throttleCalled) throttleCalled = true AssertEq(t.ctx, ctx) AssertEq(readSize, tokens) return } // Call t.reader.Read(make([]byte, readSize)) ExpectTrue(throttleCalled) } func (t *ThrottledReaderTest) ThrottleSaysCancelled() { // Throttle t.throttle.f = func(ctx context.Context, tokens uint64) (ok bool) { return } // Call n, err := t.reader.Read(make([]byte, 1)) ExpectEq(0, n) ExpectThat(err, Error(HasSubstr("throttle"))) ExpectThat(err, Error(HasSubstr("cancel"))) } func (t *ThrottledReaderTest) CallsWrapped() { buf := make([]byte, 16) AssertLe(len(buf), t.throttle.Capacity()) // Wrapped var readCalled bool t.wrapped.f = func(p []byte) (n int, err error) { AssertFalse(readCalled) readCalled = true AssertEq(buf, p) err = errors.New("") return } // Call t.reader.Read(buf) ExpectTrue(readCalled) } func (t *ThrottledReaderTest) WrappedReturnsError() { // Wrapped expectedErr := errors.New("taco") t.wrapped.f = func(p []byte) (n int, err error) { n = 11 err = expectedErr return } // Call n, err := t.reader.Read(make([]byte, 16)) ExpectEq(11, n) ExpectEq(expectedErr, err) } func (t *ThrottledReaderTest) WrappedReturnsEOF() { // Wrapped t.wrapped.f = func(p []byte) (n int, err error) { n = 11 err = io.EOF return } // Call n, err := t.reader.Read(make([]byte, 16)) ExpectEq(11, n) ExpectEq(io.EOF, err) } func (t *ThrottledReaderTest) WrappedReturnsFullRead() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsShortRead_CallsAgain() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondFails() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondSuceeds() { AssertTrue(false, "TODO") } func (t *ThrottledReaderTest) ReadSizeIsAboveThrottleCapacity() { AssertTrue(false, "TODO") }
// Copyright 2017-2019, Square, Inc. // Package request provides an interface for managing requests. package request import ( "context" "database/sql" "encoding/json" "fmt" "net/url" "sort" "strings" "sync" "time" "github.com/go-sql-driver/mysql" "github.com/rs/xid" log "github.com/sirupsen/logrus" serr "github.com/square/spincycle/errors" jr "github.com/square/spincycle/job-runner" "github.com/square/spincycle/proto" "github.com/square/spincycle/request-manager/grapher" "github.com/square/spincycle/retry" ) const ( DB_TRIES = 3 DB_RETRY_WAIT = time.Duration(500 * time.Millisecond) JR_TRIES = 3 JR_RETRY_WAIT = time.Duration(5 * time.Second) ) // A Manager creates and manages the life cycle of requests. type Manager interface { // Create creates a request and saves it to the db. The request is not // started; its state is pending until Start is called. Create(proto.CreateRequest) (proto.Request, error) // Get retrieves the request corresponding to the provided id, // without its job chain or parameters set. Get(requestId string) (proto.Request, error) // Get retrieves the request corresponding to the provided id, // with its job chain and parameters. GetWithJC(requestId string) (proto.Request, error) // Start starts a request (sends it to the JR). Start(requestId string) error // Stop stops a request (sends a stop signal to the JR). Stop(requestId string) error // Finish marks a request as being finished. It gets the request's final // state from the proto.FinishRequest argument. Finish(requestId string, finishParams proto.FinishRequest) error // Specs returns a list of all the request specs the the RM knows about. Specs() []proto.RequestSpec // JobChain returns the job chain for the given request id. JobChain(requestId string) (proto.JobChain, error) // Find returns a list of requests that match the given filter criteria, // in descending order by create time (i.e. most recent first) and ascending // by request id where create time is not unique. Returned requests do // not have job chain or args set. Find(filter proto.RequestFilter) ([]proto.Request, error) } // manager implements the Manager interface. type manager struct { grf grapher.GrapherFactory dbc *sql.DB jrc jr.Client defaultJRURL string shutdownChan chan struct{} *sync.Mutex } type ManagerConfig struct { GrapherFactory grapher.GrapherFactory DBConnector *sql.DB JRClient jr.Client DefaultJRURL string ShutdownChan chan struct{} } func NewManager(config ManagerConfig) Manager { return &manager{ grf: config.GrapherFactory, dbc: config.DBConnector, jrc: config.JRClient, defaultJRURL: config.DefaultJRURL, shutdownChan: config.ShutdownChan, Mutex: &sync.Mutex{}, } } func (m *manager) Create(newReq proto.CreateRequest) (proto.Request, error) { var req proto.Request if newReq.Type == "" { return req, serr.ErrInvalidCreateRequest{Message: "Type is empty, must be a request name"} } reqIdBytes := xid.New() reqId := reqIdBytes.String() req = proto.Request{ Id: reqId, Type: newReq.Type, CreatedAt: time.Now().UTC(), State: proto.STATE_PENDING, User: newReq.User, // Caller.Name if not set by SetUsername } // ---------------------------------------------------------------------- // Verify and finalize request args. The final request args are given // (from caller) + optional + static. gr := m.grf.Make(req) reqArgs, err := gr.RequestArgs(req.Type, newReq.Args) if err != nil { return req, err } req.Args = reqArgs // Copy requests args -> initial job args. We save the former as a record // (request_archives.args) of every request arg that the request was started // with. CreateGraph modifies and greatly expands the latter (job args). // Final job args are saved with each job because the same job arg can have // different values for different jobs (especially true for each: expansions). jobArgs := map[string]interface{}{} for k, v := range newReq.Args { jobArgs[k] = v } // ---------------------------------------------------------------------- // Create graph from request specs and jobs args. Then translate the // generic graph into a job chain and save it with the request. newGraph, err := gr.CreateGraph(req.Type, jobArgs) if err != nil { return req, err } jc := &proto.JobChain{ AdjacencyList: newGraph.Edges, RequestId: reqId, State: proto.STATE_PENDING, Jobs: map[string]proto.Job{}, } for jobId, node := range newGraph.Vertices { bytes, err := node.Datum.Serialize() if err != nil { return req, err } job := proto.Job{ Type: node.Datum.Id().Type, Id: node.Datum.Id().Id, Name: node.Datum.Id().Name, Bytes: bytes, Args: node.Args, Retry: node.Retry, RetryWait: node.RetryWait, SequenceId: node.SequenceId, SequenceRetry: node.SequenceRetry, State: proto.STATE_PENDING, } jc.Jobs[jobId] = job } req.JobChain = jc req.TotalJobs = uint(len(jc.Jobs)) // ---------------------------------------------------------------------- // Serial data for request_archives jcBytes, err := json.Marshal(req.JobChain) if err != nil { return req, fmt.Errorf("cannot marshal job chain: %s", err) } newReqBytes, err := json.Marshal(newReq) if err != nil { return req, fmt.Errorf("cannot marshal create request: %s", err) } reqArgsBytes, err := json.Marshal(reqArgs) if err != nil { return req, fmt.Errorf("cannot marshal request args: %s", err) } // ---------------------------------------------------------------------- // Save everything in a transaction. request_archive is immutable data, // i.e. these never change now that request is fully created. requests is // highly mutable, especially requests.state and requests.finished_jobs. ctx := context.TODO() err = retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { txn, err := m.dbc.BeginTx(ctx, nil) if err != nil { return err } defer txn.Rollback() q := "INSERT INTO request_archives (request_id, create_request, args, job_chain) VALUES (?, ?, ?, ?)" _, err = txn.ExecContext(ctx, q, reqIdBytes, string(newReqBytes), string(reqArgsBytes), jcBytes, ) if err != nil { return serr.NewDbError(err, "INSERT request_archives") } q = "INSERT INTO requests (request_id, type, state, user, created_at, total_jobs) VALUES (?, ?, ?, ?, ?, ?)" _, err = txn.ExecContext(ctx, q, reqIdBytes, req.Type, req.State, req.User, req.CreatedAt, req.TotalJobs, ) if err != nil { return serr.NewDbError(err, "INSERT requests") } return txn.Commit() }, nil) return req, err } // Retrieve the request without its corresponding Job Chain. func (m *manager) Get(requestId string) (proto.Request, error) { var req proto.Request ctx := context.TODO() // Nullable columns. var user sql.NullString var jrURL sql.NullString startedAt := mysql.NullTime{} finishedAt := mysql.NullTime{} var reqArgsBytes []byte // Technically, a LEFT JOIN shouldn't be necessary, but we have tests that // create a request but no corresponding request_archive which makes a plain // JOIN not match any row. q := "SELECT request_id, type, state, user, created_at, started_at, finished_at, total_jobs, finished_jobs, jr_url, args" + " FROM requests r LEFT JOIN request_archives a USING (request_id)" + " WHERE request_id = ?" notFound := false err := retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { err := m.dbc.QueryRowContext(ctx, q, requestId).Scan( &req.Id, &req.Type, &req.State, &user, &req.CreatedAt, &startedAt, &finishedAt, &req.TotalJobs, &req.FinishedJobs, &jrURL, &reqArgsBytes, ) if err != nil { switch err { case sql.ErrNoRows: notFound = true return nil // don't try again default: return err } } return nil }, nil) if err != nil { return req, serr.NewDbError(err, "SELECT requests") } if notFound { return req, serr.RequestNotFound{requestId} } if user.Valid { req.User = user.String } if jrURL.Valid { req.JobRunnerURL = jrURL.String } if startedAt.Valid { req.StartedAt = &startedAt.Time } if finishedAt.Valid { req.FinishedAt = &finishedAt.Time } if len(reqArgsBytes) > 0 { var reqArgs []proto.RequestArg if err := json.Unmarshal(reqArgsBytes, &reqArgs); err != nil { return req, err } req.Args = reqArgs } return req, nil } func (m *manager) Start(requestId string) error { req, err := m.GetWithJC(requestId) if err != nil { return err } // Only start the request if it's currently Pending. if req.State != proto.STATE_PENDING { return serr.NewErrInvalidState(proto.StateName[proto.STATE_PENDING], proto.StateName[req.State]) } // Send the request's job chain to the job runner, which will start running it. var chainURL *url.URL for i := 0; i < JR_TRIES; i++ { chainURL, err = m.jrc.NewJobChain(m.defaultJRURL, *req.JobChain) if err == nil { break } time.Sleep(JR_RETRY_WAIT) } if err != nil { return err } now := time.Now().UTC() req.StartedAt = &now req.State = proto.STATE_RUNNING req.JobRunnerURL = strings.TrimSuffix(chainURL.String(), chainURL.RequestURI()) // This will only update the request if the current state is PENDING. The // state should be PENDING since we checked this earlier, but it's possible // something else has changed the state since then. err = m.updateRequest(req, proto.STATE_PENDING) if err != nil { return err } return nil } func (m *manager) Stop(requestId string) error { req, err := m.Get(requestId) if err != nil { return err } if req.State == proto.STATE_COMPLETE { return nil } // Return an error unless the request is in the running state, which prevents // us from stopping a request which should not be able to be stopped. if req.State != proto.STATE_RUNNING { return serr.NewErrInvalidState(proto.StateName[proto.STATE_RUNNING], proto.StateName[req.State]) } // Tell the JR to stop running the job chain for the request. err = m.jrc.StopRequest(req.JobRunnerURL, requestId) if err != nil { return fmt.Errorf("error stopping request in Job Runner: %s", err) } return nil } func (m *manager) Finish(requestId string, finishParams proto.FinishRequest) error { req, err := m.Get(requestId) if err != nil { return err } log.Infof("finish request: %+v", finishParams) prevState := req.State req.State = finishParams.State req.FinishedAt = &finishParams.FinishedAt req.FinishedJobs = finishParams.FinishedJobs req.JobRunnerURL = "" // This will only update the request if the current state is RUNNING. err = m.updateRequest(req, proto.STATE_RUNNING) if err != nil { if prevState != proto.STATE_RUNNING { // This should never happen - we never finish a request that isn't running. return serr.NewErrInvalidState(proto.StateName[proto.STATE_RUNNING], proto.StateName[prevState]) } return err } return nil } var requestList []proto.RequestSpec func (m *manager) Specs() []proto.RequestSpec { m.Lock() defer m.Unlock() if requestList != nil { return requestList } gr := m.grf.Make(proto.Request{}) req := gr.Sequences() sortedReqNames := make([]string, 0, len(req)) for name := range req { if req[name].Request { sortedReqNames = append(sortedReqNames, name) } } sort.Strings(sortedReqNames) requestList = make([]proto.RequestSpec, 0, len(sortedReqNames)) for _, name := range sortedReqNames { s := proto.RequestSpec{ Name: name, Args: []proto.RequestArg{}, } for _, arg := range req[name].Args.Required { a := proto.RequestArg{ Name: arg.Name, Desc: arg.Desc, Type: proto.ARG_TYPE_REQUIRED, } s.Args = append(s.Args, a) } for _, arg := range req[name].Args.Optional { a := proto.RequestArg{ Name: arg.Name, Desc: arg.Desc, Type: proto.ARG_TYPE_OPTIONAL, Default: arg.Default, } s.Args = append(s.Args, a) } requestList = append(requestList, s) } return requestList } func (m *manager) JobChain(requestId string) (proto.JobChain, error) { var jc proto.JobChain var jcBytes []byte // raw job chains are stored as blobs in the db. ctx := context.TODO() // Get the job chain from the request_archives table. q := "SELECT job_chain FROM request_archives WHERE request_id = ?" if err := m.dbc.QueryRowContext(ctx, q, requestId).Scan(&jcBytes); err != nil { switch err { case sql.ErrNoRows: return jc, serr.RequestNotFound{requestId} default: return jc, serr.NewDbError(err, "SELECT request_archives") } } // Unmarshal the job chain into a proto.JobChain. if err := json.Unmarshal(jcBytes, &jc); err != nil { return jc, fmt.Errorf("cannot unmarshal job chain: %s", err) } return jc, nil } // Get a request with proto.Request.JobChain and proto.Request.Params set func (m *manager) GetWithJC(requestId string) (proto.Request, error) { req, err := m.Get(requestId) if err != nil { return req, err } ctx := context.TODO() var jcBytes []byte q := "SELECT job_chain FROM request_archives WHERE request_id = ?" notFound := false err = retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { err := m.dbc.QueryRowContext(ctx, q, requestId).Scan(&jcBytes) if err != nil { switch err { case sql.ErrNoRows: notFound = true return nil // don't try again default: return err } } return nil }, nil) if err != nil { return req, serr.NewDbError(err, "SELECT request_archives") } if notFound { return req, serr.RequestNotFound{requestId} } var jc proto.JobChain if err := json.Unmarshal(jcBytes, &jc); err != nil { return req, fmt.Errorf("cannot unmarshal job chain: %s", err) } req.JobChain = &jc return req, nil } func (m *manager) Find(filter proto.RequestFilter) ([]proto.Request, error) { // Build the query from the filter. query := "SELECT request_id, type, state, user, created_at, started_at, finished_at, total_jobs, finished_jobs, jr_url FROM requests " var fields []string var values []interface{} if filter.Type != "" { fields = append(fields, "type = ?") values = append(values, filter.Type) } if filter.User != "" { fields = append(fields, "user = ?") values = append(values, filter.User) } if len(filter.States) != 0 { stateSQL := fmt.Sprintf("state IN (%s)", strings.TrimRight(strings.Repeat("?, ", len(filter.States)), ", ")) fields = append(fields, stateSQL) for _, state := range filter.States { values = append(values, state) } } if !filter.Since.IsZero() { fields = append(fields, "(finished_at > ? OR finished_at IS NULL)") values = append(values, filter.Since.Format(time.RFC3339Nano)) } if !filter.Until.IsZero() { fields = append(fields, "(created_at < ?)") values = append(values, filter.Until.Format(time.RFC3339Nano)) } if len(fields) > 0 { query += "WHERE " + strings.Join(fields, " AND ") } query += " ORDER BY created_at DESC, request_id " if filter.Limit != 0 { query += fmt.Sprintf(" LIMIT %d", filter.Limit) if filter.Offset != 0 { query += fmt.Sprintf(" OFFSET %d", filter.Offset) } } // Query the db and parse results. ctx := context.Background() var rows *sql.Rows err := retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { var err error rows, err = m.dbc.QueryContext(ctx, query, values...) if err != nil { return err } return nil }, nil) if err != nil { return []proto.Request{}, serr.NewDbError(err, "SELECT request_id") } var requests []proto.Request defer rows.Close() for rows.Next() { var req proto.Request // Nullable columns: var user sql.NullString var jrURL sql.NullString startedAt := mysql.NullTime{} finishedAt := mysql.NullTime{} err := rows.Scan( &req.Id, &req.Type, &req.State, &user, &req.CreatedAt, &startedAt, &finishedAt, &req.TotalJobs, &req.FinishedJobs, &jrURL, ) if err != nil { return []proto.Request{}, fmt.Errorf("Error scanning row returned from MySQL: %s", err) } if user.Valid { req.User = user.String } if jrURL.Valid { req.JobRunnerURL = jrURL.String } if startedAt.Valid { req.StartedAt = &startedAt.Time } if finishedAt.Valid { req.FinishedAt = &finishedAt.Time } requests = append(requests, req) } if rows.Err() != nil { return []proto.Request{}, fmt.Errorf("Error iterating over rows returned from MySQL: %s", err) } return requests, nil } // ------------------------------------------------------------------------- // // Updates the state, started/finished timestamps, and JR url of the provided // request. The request is updated only if its current state (in the db) matches // the state provided. func (m *manager) updateRequest(req proto.Request, curState byte) error { ctx := context.TODO() // If JobRunnerURL is empty, we want to set the db field to NULL (not an empty string). var jrURL interface{} if req.JobRunnerURL != "" { jrURL = req.JobRunnerURL } // Fields that should never be updated by this package are not listed in this query. q := "UPDATE requests SET state = ?, started_at = ?, finished_at = ?, finished_jobs = ?, jr_url = ? WHERE request_id = ? AND state = ?" var res sql.Result err := retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { var err error res, err = m.dbc.ExecContext(ctx, q, req.State, req.StartedAt, req.FinishedAt, req.FinishedJobs, jrURL, req.Id, curState, ) return err }, nil) if err != nil { return serr.NewDbError(err, "UPDATE requests") } cnt, err := res.RowsAffected() if err != nil { return err } switch cnt { case 0: return ErrNotUpdated case 1: break default: // This should be impossible since we specify the primary key // in the WHERE clause of the update. return ErrMultipleUpdated } return nil } Retry five times, not three Issue 121 // Copyright 2017-2019, Square, Inc. // Package request provides an interface for managing requests. package request import ( "context" "database/sql" "encoding/json" "fmt" "net/url" "sort" "strings" "sync" "time" "github.com/go-sql-driver/mysql" "github.com/rs/xid" log "github.com/sirupsen/logrus" serr "github.com/square/spincycle/errors" jr "github.com/square/spincycle/job-runner" "github.com/square/spincycle/proto" "github.com/square/spincycle/request-manager/grapher" "github.com/square/spincycle/retry" ) const ( DB_TRIES = 3 DB_RETRY_WAIT = time.Duration(500 * time.Millisecond) JR_TRIES = 5 JR_RETRY_WAIT = time.Duration(5 * time.Second) ) // A Manager creates and manages the life cycle of requests. type Manager interface { // Create creates a request and saves it to the db. The request is not // started; its state is pending until Start is called. Create(proto.CreateRequest) (proto.Request, error) // Get retrieves the request corresponding to the provided id, // without its job chain or parameters set. Get(requestId string) (proto.Request, error) // Get retrieves the request corresponding to the provided id, // with its job chain and parameters. GetWithJC(requestId string) (proto.Request, error) // Start starts a request (sends it to the JR). Start(requestId string) error // Stop stops a request (sends a stop signal to the JR). Stop(requestId string) error // Finish marks a request as being finished. It gets the request's final // state from the proto.FinishRequest argument. Finish(requestId string, finishParams proto.FinishRequest) error // Specs returns a list of all the request specs the the RM knows about. Specs() []proto.RequestSpec // JobChain returns the job chain for the given request id. JobChain(requestId string) (proto.JobChain, error) // Find returns a list of requests that match the given filter criteria, // in descending order by create time (i.e. most recent first) and ascending // by request id where create time is not unique. Returned requests do // not have job chain or args set. Find(filter proto.RequestFilter) ([]proto.Request, error) } // manager implements the Manager interface. type manager struct { grf grapher.GrapherFactory dbc *sql.DB jrc jr.Client defaultJRURL string shutdownChan chan struct{} *sync.Mutex } type ManagerConfig struct { GrapherFactory grapher.GrapherFactory DBConnector *sql.DB JRClient jr.Client DefaultJRURL string ShutdownChan chan struct{} } func NewManager(config ManagerConfig) Manager { return &manager{ grf: config.GrapherFactory, dbc: config.DBConnector, jrc: config.JRClient, defaultJRURL: config.DefaultJRURL, shutdownChan: config.ShutdownChan, Mutex: &sync.Mutex{}, } } func (m *manager) Create(newReq proto.CreateRequest) (proto.Request, error) { var req proto.Request if newReq.Type == "" { return req, serr.ErrInvalidCreateRequest{Message: "Type is empty, must be a request name"} } reqIdBytes := xid.New() reqId := reqIdBytes.String() req = proto.Request{ Id: reqId, Type: newReq.Type, CreatedAt: time.Now().UTC(), State: proto.STATE_PENDING, User: newReq.User, // Caller.Name if not set by SetUsername } // ---------------------------------------------------------------------- // Verify and finalize request args. The final request args are given // (from caller) + optional + static. gr := m.grf.Make(req) reqArgs, err := gr.RequestArgs(req.Type, newReq.Args) if err != nil { return req, err } req.Args = reqArgs // Copy requests args -> initial job args. We save the former as a record // (request_archives.args) of every request arg that the request was started // with. CreateGraph modifies and greatly expands the latter (job args). // Final job args are saved with each job because the same job arg can have // different values for different jobs (especially true for each: expansions). jobArgs := map[string]interface{}{} for k, v := range newReq.Args { jobArgs[k] = v } // ---------------------------------------------------------------------- // Create graph from request specs and jobs args. Then translate the // generic graph into a job chain and save it with the request. newGraph, err := gr.CreateGraph(req.Type, jobArgs) if err != nil { return req, err } jc := &proto.JobChain{ AdjacencyList: newGraph.Edges, RequestId: reqId, State: proto.STATE_PENDING, Jobs: map[string]proto.Job{}, } for jobId, node := range newGraph.Vertices { bytes, err := node.Datum.Serialize() if err != nil { return req, err } job := proto.Job{ Type: node.Datum.Id().Type, Id: node.Datum.Id().Id, Name: node.Datum.Id().Name, Bytes: bytes, Args: node.Args, Retry: node.Retry, RetryWait: node.RetryWait, SequenceId: node.SequenceId, SequenceRetry: node.SequenceRetry, State: proto.STATE_PENDING, } jc.Jobs[jobId] = job } req.JobChain = jc req.TotalJobs = uint(len(jc.Jobs)) // ---------------------------------------------------------------------- // Serial data for request_archives jcBytes, err := json.Marshal(req.JobChain) if err != nil { return req, fmt.Errorf("cannot marshal job chain: %s", err) } newReqBytes, err := json.Marshal(newReq) if err != nil { return req, fmt.Errorf("cannot marshal create request: %s", err) } reqArgsBytes, err := json.Marshal(reqArgs) if err != nil { return req, fmt.Errorf("cannot marshal request args: %s", err) } // ---------------------------------------------------------------------- // Save everything in a transaction. request_archive is immutable data, // i.e. these never change now that request is fully created. requests is // highly mutable, especially requests.state and requests.finished_jobs. ctx := context.TODO() err = retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { txn, err := m.dbc.BeginTx(ctx, nil) if err != nil { return err } defer txn.Rollback() q := "INSERT INTO request_archives (request_id, create_request, args, job_chain) VALUES (?, ?, ?, ?)" _, err = txn.ExecContext(ctx, q, reqIdBytes, string(newReqBytes), string(reqArgsBytes), jcBytes, ) if err != nil { return serr.NewDbError(err, "INSERT request_archives") } q = "INSERT INTO requests (request_id, type, state, user, created_at, total_jobs) VALUES (?, ?, ?, ?, ?, ?)" _, err = txn.ExecContext(ctx, q, reqIdBytes, req.Type, req.State, req.User, req.CreatedAt, req.TotalJobs, ) if err != nil { return serr.NewDbError(err, "INSERT requests") } return txn.Commit() }, nil) return req, err } // Retrieve the request without its corresponding Job Chain. func (m *manager) Get(requestId string) (proto.Request, error) { var req proto.Request ctx := context.TODO() // Nullable columns. var user sql.NullString var jrURL sql.NullString startedAt := mysql.NullTime{} finishedAt := mysql.NullTime{} var reqArgsBytes []byte // Technically, a LEFT JOIN shouldn't be necessary, but we have tests that // create a request but no corresponding request_archive which makes a plain // JOIN not match any row. q := "SELECT request_id, type, state, user, created_at, started_at, finished_at, total_jobs, finished_jobs, jr_url, args" + " FROM requests r LEFT JOIN request_archives a USING (request_id)" + " WHERE request_id = ?" notFound := false err := retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { err := m.dbc.QueryRowContext(ctx, q, requestId).Scan( &req.Id, &req.Type, &req.State, &user, &req.CreatedAt, &startedAt, &finishedAt, &req.TotalJobs, &req.FinishedJobs, &jrURL, &reqArgsBytes, ) if err != nil { switch err { case sql.ErrNoRows: notFound = true return nil // don't try again default: return err } } return nil }, nil) if err != nil { return req, serr.NewDbError(err, "SELECT requests") } if notFound { return req, serr.RequestNotFound{requestId} } if user.Valid { req.User = user.String } if jrURL.Valid { req.JobRunnerURL = jrURL.String } if startedAt.Valid { req.StartedAt = &startedAt.Time } if finishedAt.Valid { req.FinishedAt = &finishedAt.Time } if len(reqArgsBytes) > 0 { var reqArgs []proto.RequestArg if err := json.Unmarshal(reqArgsBytes, &reqArgs); err != nil { return req, err } req.Args = reqArgs } return req, nil } func (m *manager) Start(requestId string) error { req, err := m.GetWithJC(requestId) if err != nil { return err } // Only start the request if it's currently Pending. if req.State != proto.STATE_PENDING { return serr.NewErrInvalidState(proto.StateName[proto.STATE_PENDING], proto.StateName[req.State]) } // Send the request's job chain to the job runner, which will start running it. var chainURL *url.URL for i := 0; i < JR_TRIES; i++ { chainURL, err = m.jrc.NewJobChain(m.defaultJRURL, *req.JobChain) if err == nil { break } time.Sleep(JR_RETRY_WAIT) } if err != nil { return err } now := time.Now().UTC() req.StartedAt = &now req.State = proto.STATE_RUNNING req.JobRunnerURL = strings.TrimSuffix(chainURL.String(), chainURL.RequestURI()) // This will only update the request if the current state is PENDING. The // state should be PENDING since we checked this earlier, but it's possible // something else has changed the state since then. err = m.updateRequest(req, proto.STATE_PENDING) if err != nil { return err } return nil } func (m *manager) Stop(requestId string) error { req, err := m.Get(requestId) if err != nil { return err } if req.State == proto.STATE_COMPLETE { return nil } // Return an error unless the request is in the running state, which prevents // us from stopping a request which should not be able to be stopped. if req.State != proto.STATE_RUNNING { return serr.NewErrInvalidState(proto.StateName[proto.STATE_RUNNING], proto.StateName[req.State]) } // Tell the JR to stop running the job chain for the request. err = m.jrc.StopRequest(req.JobRunnerURL, requestId) if err != nil { return fmt.Errorf("error stopping request in Job Runner: %s", err) } return nil } func (m *manager) Finish(requestId string, finishParams proto.FinishRequest) error { req, err := m.Get(requestId) if err != nil { return err } log.Infof("finish request: %+v", finishParams) prevState := req.State req.State = finishParams.State req.FinishedAt = &finishParams.FinishedAt req.FinishedJobs = finishParams.FinishedJobs req.JobRunnerURL = "" // This will only update the request if the current state is RUNNING. err = m.updateRequest(req, proto.STATE_RUNNING) if err != nil { if prevState != proto.STATE_RUNNING { // This should never happen - we never finish a request that isn't running. return serr.NewErrInvalidState(proto.StateName[proto.STATE_RUNNING], proto.StateName[prevState]) } return err } return nil } var requestList []proto.RequestSpec func (m *manager) Specs() []proto.RequestSpec { m.Lock() defer m.Unlock() if requestList != nil { return requestList } gr := m.grf.Make(proto.Request{}) req := gr.Sequences() sortedReqNames := make([]string, 0, len(req)) for name := range req { if req[name].Request { sortedReqNames = append(sortedReqNames, name) } } sort.Strings(sortedReqNames) requestList = make([]proto.RequestSpec, 0, len(sortedReqNames)) for _, name := range sortedReqNames { s := proto.RequestSpec{ Name: name, Args: []proto.RequestArg{}, } for _, arg := range req[name].Args.Required { a := proto.RequestArg{ Name: arg.Name, Desc: arg.Desc, Type: proto.ARG_TYPE_REQUIRED, } s.Args = append(s.Args, a) } for _, arg := range req[name].Args.Optional { a := proto.RequestArg{ Name: arg.Name, Desc: arg.Desc, Type: proto.ARG_TYPE_OPTIONAL, Default: arg.Default, } s.Args = append(s.Args, a) } requestList = append(requestList, s) } return requestList } func (m *manager) JobChain(requestId string) (proto.JobChain, error) { var jc proto.JobChain var jcBytes []byte // raw job chains are stored as blobs in the db. ctx := context.TODO() // Get the job chain from the request_archives table. q := "SELECT job_chain FROM request_archives WHERE request_id = ?" if err := m.dbc.QueryRowContext(ctx, q, requestId).Scan(&jcBytes); err != nil { switch err { case sql.ErrNoRows: return jc, serr.RequestNotFound{requestId} default: return jc, serr.NewDbError(err, "SELECT request_archives") } } // Unmarshal the job chain into a proto.JobChain. if err := json.Unmarshal(jcBytes, &jc); err != nil { return jc, fmt.Errorf("cannot unmarshal job chain: %s", err) } return jc, nil } // Get a request with proto.Request.JobChain and proto.Request.Params set func (m *manager) GetWithJC(requestId string) (proto.Request, error) { req, err := m.Get(requestId) if err != nil { return req, err } ctx := context.TODO() var jcBytes []byte q := "SELECT job_chain FROM request_archives WHERE request_id = ?" notFound := false err = retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { err := m.dbc.QueryRowContext(ctx, q, requestId).Scan(&jcBytes) if err != nil { switch err { case sql.ErrNoRows: notFound = true return nil // don't try again default: return err } } return nil }, nil) if err != nil { return req, serr.NewDbError(err, "SELECT request_archives") } if notFound { return req, serr.RequestNotFound{requestId} } var jc proto.JobChain if err := json.Unmarshal(jcBytes, &jc); err != nil { return req, fmt.Errorf("cannot unmarshal job chain: %s", err) } req.JobChain = &jc return req, nil } func (m *manager) Find(filter proto.RequestFilter) ([]proto.Request, error) { // Build the query from the filter. query := "SELECT request_id, type, state, user, created_at, started_at, finished_at, total_jobs, finished_jobs, jr_url FROM requests " var fields []string var values []interface{} if filter.Type != "" { fields = append(fields, "type = ?") values = append(values, filter.Type) } if filter.User != "" { fields = append(fields, "user = ?") values = append(values, filter.User) } if len(filter.States) != 0 { stateSQL := fmt.Sprintf("state IN (%s)", strings.TrimRight(strings.Repeat("?, ", len(filter.States)), ", ")) fields = append(fields, stateSQL) for _, state := range filter.States { values = append(values, state) } } if !filter.Since.IsZero() { fields = append(fields, "(finished_at > ? OR finished_at IS NULL)") values = append(values, filter.Since.Format(time.RFC3339Nano)) } if !filter.Until.IsZero() { fields = append(fields, "(created_at < ?)") values = append(values, filter.Until.Format(time.RFC3339Nano)) } if len(fields) > 0 { query += "WHERE " + strings.Join(fields, " AND ") } query += " ORDER BY created_at DESC, request_id " if filter.Limit != 0 { query += fmt.Sprintf(" LIMIT %d", filter.Limit) if filter.Offset != 0 { query += fmt.Sprintf(" OFFSET %d", filter.Offset) } } // Query the db and parse results. ctx := context.Background() var rows *sql.Rows err := retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { var err error rows, err = m.dbc.QueryContext(ctx, query, values...) if err != nil { return err } return nil }, nil) if err != nil { return []proto.Request{}, serr.NewDbError(err, "SELECT request_id") } var requests []proto.Request defer rows.Close() for rows.Next() { var req proto.Request // Nullable columns: var user sql.NullString var jrURL sql.NullString startedAt := mysql.NullTime{} finishedAt := mysql.NullTime{} err := rows.Scan( &req.Id, &req.Type, &req.State, &user, &req.CreatedAt, &startedAt, &finishedAt, &req.TotalJobs, &req.FinishedJobs, &jrURL, ) if err != nil { return []proto.Request{}, fmt.Errorf("Error scanning row returned from MySQL: %s", err) } if user.Valid { req.User = user.String } if jrURL.Valid { req.JobRunnerURL = jrURL.String } if startedAt.Valid { req.StartedAt = &startedAt.Time } if finishedAt.Valid { req.FinishedAt = &finishedAt.Time } requests = append(requests, req) } if rows.Err() != nil { return []proto.Request{}, fmt.Errorf("Error iterating over rows returned from MySQL: %s", err) } return requests, nil } // ------------------------------------------------------------------------- // // Updates the state, started/finished timestamps, and JR url of the provided // request. The request is updated only if its current state (in the db) matches // the state provided. func (m *manager) updateRequest(req proto.Request, curState byte) error { ctx := context.TODO() // If JobRunnerURL is empty, we want to set the db field to NULL (not an empty string). var jrURL interface{} if req.JobRunnerURL != "" { jrURL = req.JobRunnerURL } // Fields that should never be updated by this package are not listed in this query. q := "UPDATE requests SET state = ?, started_at = ?, finished_at = ?, finished_jobs = ?, jr_url = ? WHERE request_id = ? AND state = ?" var res sql.Result err := retry.Do(DB_TRIES, DB_RETRY_WAIT, func() error { var err error res, err = m.dbc.ExecContext(ctx, q, req.State, req.StartedAt, req.FinishedAt, req.FinishedJobs, jrURL, req.Id, curState, ) return err }, nil) if err != nil { return serr.NewDbError(err, "UPDATE requests") } cnt, err := res.RowsAffected() if err != nil { return err } switch cnt { case 0: return ErrNotUpdated case 1: break default: // This should be impossible since we specify the primary key // in the WHERE clause of the update. return ErrMultipleUpdated } return nil }
package graphdriver import "sync" type minfo struct { check bool count int } // RefCounter is a generic counter for use by graphdriver Get/Put calls type RefCounter struct { counts map[string]*minfo mu sync.Mutex checker Checker } // NewRefCounter returns a new RefCounter func NewRefCounter(c Checker) *RefCounter { return &RefCounter{ checker: c, counts: make(map[string]*minfo), } } // Increment increaes the ref count for the given id and returns the current count func (c *RefCounter) Increment(path string) int { c.mu.Lock() m := c.counts[path] if m == nil { m = &minfo{} c.counts[path] = m } // if we are checking this path for the first time check to make sure // if it was already mounted on the system and make sure we have a correct ref // count if it is mounted as it is in use. if !m.check { m.check = true if c.checker.IsMounted(path) { m.count++ } } m.count++ c.mu.Unlock() return m.count } // Decrement decreases the ref count for the given id and returns the current count func (c *RefCounter) Decrement(path string) int { c.mu.Lock() m := c.counts[path] if m == nil { m = &minfo{} c.counts[path] = m } // if we are checking this path for the first time check to make sure // if it was already mounted on the system and make sure we have a correct ref // count if it is mounted as it is in use. if !m.check { m.check = true if c.checker.IsMounted(path) { m.count++ } } m.count-- c.mu.Unlock() return m.count } Fix RefCounter count return Signed-off-by: Alfred Landrum <024c7638553b6b744bcac8fddb269e416b5baf95@docker.com> package graphdriver import "sync" type minfo struct { check bool count int } // RefCounter is a generic counter for use by graphdriver Get/Put calls type RefCounter struct { counts map[string]*minfo mu sync.Mutex checker Checker } // NewRefCounter returns a new RefCounter func NewRefCounter(c Checker) *RefCounter { return &RefCounter{ checker: c, counts: make(map[string]*minfo), } } // Increment increaes the ref count for the given id and returns the current count func (c *RefCounter) Increment(path string) int { c.mu.Lock() m := c.counts[path] if m == nil { m = &minfo{} c.counts[path] = m } // if we are checking this path for the first time check to make sure // if it was already mounted on the system and make sure we have a correct ref // count if it is mounted as it is in use. if !m.check { m.check = true if c.checker.IsMounted(path) { m.count++ } } m.count++ count := m.count c.mu.Unlock() return count } // Decrement decreases the ref count for the given id and returns the current count func (c *RefCounter) Decrement(path string) int { c.mu.Lock() m := c.counts[path] if m == nil { m = &minfo{} c.counts[path] = m } // if we are checking this path for the first time check to make sure // if it was already mounted on the system and make sure we have a correct ref // count if it is mounted as it is in use. if !m.check { m.check = true if c.checker.IsMounted(path) { m.count++ } } m.count-- count := m.count c.mu.Unlock() return count }
/* Copyright 2014 CoreOS, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package registry import ( "errors" "reflect" "sort" "testing" "time" "github.com/coreos/fleet/etcd" "github.com/coreos/fleet/machine" "github.com/coreos/fleet/unit" ) type action struct { key string val string rec bool } type testEtcdClient struct { gets []action sets []action deletes []action res []*etcd.Result // errors returned from subsequent calls to etcd ri int err []error // results returned from subsequent calls to etcd ei int } func (t *testEtcdClient) Do(req etcd.Action) (r *etcd.Result, e error) { if s, ok := req.(*etcd.Set); ok { t.sets = append(t.sets, action{key: s.Key, val: s.Value}) } else if d, ok := req.(*etcd.Delete); ok { t.deletes = append(t.deletes, action{key: d.Key, rec: d.Recursive}) } else if g, ok := req.(*etcd.Get); ok { t.gets = append(t.gets, action{key: g.Key, rec: g.Recursive}) } if t.ri < len(t.res) { r = t.res[t.ri] t.ri++ } if t.ei < len(t.err) { e = t.err[t.ei] t.ei++ } return r, e } func (t *testEtcdClient) Wait(req etcd.Action, ch <-chan struct{}) (*etcd.Result, error) { return t.Do(req) } func TestUnitStatePaths(t *testing.T) { r := &EtcdRegistry{nil, "/fleet/"} j := "foo.service" want := "/fleet/state/foo.service" got := r.legacyUnitStatePath(j) if got != want { t.Errorf("bad unit state path: got %v, want %v", got, want) } m := "abcdefghij" want = "/fleet/states/foo.service/abcdefghij" got = r.unitStatePath(m, j) if got != want { t.Errorf("bad unit state path: got %v, want %v", got, want) } } func TestSaveUnitState(t *testing.T) { e := &testEtcdClient{} r := &EtcdRegistry{e, "/fleet/"} j := "foo.service" mID := "mymachine" us := unit.NewUnitState("abc", "def", "ghi", mID) // Saving nil unit state should fail r.SaveUnitState(j, nil, time.Second) if e.sets != nil || e.deletes != nil { t.Logf("sets: %#v", e.sets) t.Logf("deletes: %#v", e.deletes) t.Fatalf("SaveUnitState of nil state should fail but acted unexpectedly!") } // Saving unit state with no hash should succeed for now, but should fail // in the future. See https://github.com/coreos/fleet/issues/720. //r.SaveUnitState(j, us, time.Second) //if len(e.sets) != 1 || e.deletes == nil { // t.Logf("sets: %#v", e.sets) // t.Logf("deletes: %#v", e.deletes) // t.Fatalf("SaveUnitState on UnitState with no hash acted unexpectedly!") //} us.UnitHash = "quickbrownfox" r.SaveUnitState(j, us, time.Second) json := `{"loadState":"abc","activeState":"def","subState":"ghi","machineState":{"ID":"mymachine","PublicIP":"","Metadata":null,"Version":""},"unitHash":"quickbrownfox"}` p1 := "/fleet/state/foo.service" p2 := "/fleet/states/foo.service/mymachine" want := []action{ action{key: p1, val: json}, action{key: p2, val: json}, } got := e.sets if !reflect.DeepEqual(got, want) { t.Errorf("bad result from SaveUnitState: \ngot\n%#v\nwant\n%#v", got, want) } if e.deletes != nil { t.Errorf("unexpected deletes during SaveUnitState: %#v", e.deletes) } if e.gets != nil { t.Errorf("unexpected gets during SaveUnitState: %#v", e.gets) } } func TestRemoveUnitState(t *testing.T) { e := &testEtcdClient{} r := &EtcdRegistry{e, "/fleet/"} j := "foo.service" err := r.RemoveUnitState(j) if err != nil { t.Errorf("unexpected error from RemoveUnitState: %v", err) } want := []action{ action{key: "/fleet/state/foo.service", rec: false}, action{key: "/fleet/states/foo.service", rec: true}, } got := e.deletes if !reflect.DeepEqual(got, want) { t.Errorf("bad result from RemoveUnitState: \ngot\n%#v\nwant\n%#v", got, want) } if e.sets != nil { t.Errorf("unexpected sets during RemoveUnitState: %#v", e.sets) } if e.gets != nil { t.Errorf("unexpected gets during RemoveUnitState: %#v", e.gets) } // Ensure RemoveUnitState handles different error scenarios appropriately for i, tt := range []struct { errs []error fail bool }{ {[]error{etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{nil, etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{nil, nil}, false}, // No errors, no responses should succeed {[]error{errors.New("ur registry don't work")}, true}, {[]error{nil, errors.New("ur registry don't work")}, true}, } { e = &testEtcdClient{err: tt.errs} r = &EtcdRegistry{e, "/fleet"} err = r.RemoveUnitState("foo.service") if (err != nil) != tt.fail { t.Errorf("case %d: unexpected error state calling UnitStates(): got %v, want %v", i, err, tt.fail) } } } func TestUnitStateToModel(t *testing.T) { for i, tt := range []struct { in *unit.UnitState want *unitStateModel }{ { in: nil, want: nil, }, { // Unit state with no hash and no machineID is OK // See https://github.com/coreos/fleet/issues/720 in: &unit.UnitState{"foo", "bar", "baz", "", "", "name"}, want: &unitStateModel{"foo", "bar", "baz", nil, ""}, }, { // Unit state with hash but no machineID is OK in: &unit.UnitState{"foo", "bar", "baz", "", "heh", "name"}, want: &unitStateModel{"foo", "bar", "baz", nil, "heh"}, }, { in: &unit.UnitState{"foo", "bar", "baz", "woof", "miaow", "name"}, want: &unitStateModel{"foo", "bar", "baz", &machine.MachineState{ID: "woof"}, "miaow"}, }, } { got := unitStateToModel(tt.in) if !reflect.DeepEqual(got, tt.want) { t.Errorf("case %d: got %#v, want %#v", i, got, tt.want) } } } func TestModelToUnitState(t *testing.T) { for i, tt := range []struct { in *unitStateModel want *unit.UnitState }{ { in: nil, want: nil, }, { in: &unitStateModel{"foo", "bar", "baz", nil, ""}, want: &unit.UnitState{ LoadState: "foo", ActiveState: "bar", SubState: "baz", MachineID: "", UnitHash: "", UnitName: "name", }, }, { in: &unitStateModel{"z", "x", "y", &machine.MachineState{ID: "abcd"}, ""}, want: &unit.UnitState{ LoadState: "z", ActiveState: "x", SubState: "y", MachineID: "abcd", UnitHash: "", UnitName: "name", }, }, } { got := modelToUnitState(tt.in, "name") if !reflect.DeepEqual(got, tt.want) { t.Errorf("case %d: got %#v, want %#v", i, got, tt.want) } } } func makeResult(val string) *etcd.Result { return &etcd.Result{ Node: &etcd.Node{ Value: val, }, } } func TestGetUnitState(t *testing.T) { for i, tt := range []struct { res *etcd.Result // result returned from etcd err error // error returned from etcd us *unit.UnitState }{ { // Unit state with no UnitHash should be OK res: makeResult(`{"loadState":"abc","activeState":"def","subState":"ghi","machineState":{"ID":"mymachine","PublicIP":"","Metadata":null,"Version":"","TotalResources":{"Cores":0,"Memory":0,"Disk":0},"FreeResources":{"Cores":0,"Memory":0,"Disk":0}}}`), err: nil, us: &unit.UnitState{"abc", "def", "ghi", "mymachine", "", "foo.service"}, }, { // Unit state with UnitHash should be OK res: makeResult(`{"loadState":"abc","activeState":"def","subState":"ghi","machineState":{"ID":"mymachine","PublicIP":"","Metadata":null,"Version":"","TotalResources":{"Cores":0,"Memory":0,"Disk":0},"FreeResources":{"Cores":0,"Memory":0,"Disk":0}},"unitHash":"quickbrownfox"}`), err: nil, us: &unit.UnitState{"abc", "def", "ghi", "mymachine", "quickbrownfox", "foo.service"}, }, { // Unit state with no MachineState should be OK res: makeResult(`{"loadState":"abc","activeState":"def","subState":"ghi"}`), err: nil, us: &unit.UnitState{"abc", "def", "ghi", "", "", "foo.service"}, }, { // Bad unit state object should simply result in nil returned res: makeResult(`garbage, not good proper json`), err: nil, us: nil, }, { // Unknown errors should result in nil returned res: nil, err: errors.New("some random error from etcd"), us: nil, }, { // KeyNotFound should result in nil returned res: nil, err: etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}, us: nil, }, } { e := &testEtcdClient{ res: []*etcd.Result{tt.res}, err: []error{tt.err}, } r := &EtcdRegistry{e, "/fleet/"} j := "foo.service" us, _ := r.getUnitState(j, "XXX") want := []action{ action{key: "/fleet/states/foo.service/XXX", rec: false}, } got := e.gets if !reflect.DeepEqual(got, want) { t.Errorf("case %d: bad result from GetUnitState:\ngot\n%#v\nwant\n%#v", i, got, want) } if !reflect.DeepEqual(us, tt.us) { t.Errorf("case %d: bad UnitState:\ngot\n%#v\nwant\n%#v", i, us, tt.us) } } } func usToJson(t *testing.T, us *unit.UnitState) string { json, err := marshal(unitStateToModel(us)) if err != nil { t.Fatalf("error marshalling unit: %v", err) } return json } func TestUnitStates(t *testing.T) { fus1 := unit.UnitState{"abc", "def", "ghi", "mID1", "zzz", "foo"} fus2 := unit.UnitState{"cat", "dog", "cow", "mID2", "xxx", "foo"} // Multiple new unit states reported for the same unit foo := etcd.Node{ Key: "/fleet/states/foo", Nodes: []etcd.Node{ etcd.Node{ Key: "/fleet/states/foo/mID1", Value: usToJson(t, &fus1), }, etcd.Node{ Key: "/fleet/states/foo/mID2", Value: usToJson(t, &fus2), }, }, } // Bogus new unit state which we won't expect to see in results bar := etcd.Node{ Key: "/fleet/states/bar", Nodes: []etcd.Node{ etcd.Node{ Key: "/fleet/states/bar/asdf", Value: `total garbage`, }, }, } // Result from crawling the new "states" namespace res2 := &etcd.Result{ Node: &etcd.Node{ Key: "/fleet/states", Nodes: []etcd.Node{foo, bar}, }, } e := &testEtcdClient{ res: []*etcd.Result{res2}, } r := &EtcdRegistry{e, "/fleet/"} got, err := r.UnitStates() if err != nil { t.Errorf("unexpected error calling UnitStates(): %v", err) } want := []*unit.UnitState{ &fus1, &fus2, } if !reflect.DeepEqual(got, want) { t.Errorf("UnitStates() returned unexpected result") t.Log("got:") for _, i := range got { t.Logf("%#v", i) } t.Log("want:") for _, i := range want { t.Logf("%#v", i) } } // Ensure UnitState handles different error scenarios appropriately for i, tt := range []struct { errs []error fail bool }{ {[]error{etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{nil}, false}, // No errors, no responses should succeed {[]error{errors.New("ur registry don't work")}, true}, {[]error{errors.New("ur registry don't work")}, true}, } { e = &testEtcdClient{err: tt.errs} r = &EtcdRegistry{e, "/fleet"} got, err = r.UnitStates() if (err != nil) != tt.fail { t.Errorf("case %d: unexpected error state calling UnitStates(): got %v, want %v", i, err, tt.fail) } if len(got) != 0 { t.Errorf("case %d: UnitStates() returned unexpected non-empty result on error: %v", i, got) } } } func TestMUSKeys(t *testing.T) { equal := func(a MUSKeys, b []MUSKey) bool { if len(a) != len(b) { return false } for i, m := range a { if m != b[i] { return false } } return true } k1 := MUSKey{name: "abc", machID: "aaa"} k2 := MUSKey{name: "abc", machID: "zzz"} k3 := MUSKey{name: "def", machID: "bbb"} k4 := MUSKey{name: "ppp", machID: "zzz"} k5 := MUSKey{name: "xxx", machID: "aaa"} want := []MUSKey{k1, k2, k3, k4, k5} ms := MUSKeys{k3, k4, k5, k2, k1} if equal(ms, want) { t.Fatalf("this should never happen!") } sort.Sort(ms) if !equal(ms, want) { t.Errorf("bad result after sort: got\n%#v, want\n%#v", ms, want) } } squash: beef up testing in TestGetUnitState /* Copyright 2014 CoreOS, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package registry import ( "errors" "reflect" "sort" "testing" "time" "github.com/coreos/fleet/etcd" "github.com/coreos/fleet/machine" "github.com/coreos/fleet/unit" ) type action struct { key string val string rec bool } type testEtcdClient struct { gets []action sets []action deletes []action res []*etcd.Result // errors returned from subsequent calls to etcd ri int err []error // results returned from subsequent calls to etcd ei int } func (t *testEtcdClient) Do(req etcd.Action) (r *etcd.Result, e error) { if s, ok := req.(*etcd.Set); ok { t.sets = append(t.sets, action{key: s.Key, val: s.Value}) } else if d, ok := req.(*etcd.Delete); ok { t.deletes = append(t.deletes, action{key: d.Key, rec: d.Recursive}) } else if g, ok := req.(*etcd.Get); ok { t.gets = append(t.gets, action{key: g.Key, rec: g.Recursive}) } if t.ri < len(t.res) { r = t.res[t.ri] t.ri++ } if t.ei < len(t.err) { e = t.err[t.ei] t.ei++ } return r, e } func (t *testEtcdClient) Wait(req etcd.Action, ch <-chan struct{}) (*etcd.Result, error) { return t.Do(req) } func TestUnitStatePaths(t *testing.T) { r := &EtcdRegistry{nil, "/fleet/"} j := "foo.service" want := "/fleet/state/foo.service" got := r.legacyUnitStatePath(j) if got != want { t.Errorf("bad unit state path: got %v, want %v", got, want) } m := "abcdefghij" want = "/fleet/states/foo.service/abcdefghij" got = r.unitStatePath(m, j) if got != want { t.Errorf("bad unit state path: got %v, want %v", got, want) } } func TestSaveUnitState(t *testing.T) { e := &testEtcdClient{} r := &EtcdRegistry{e, "/fleet/"} j := "foo.service" mID := "mymachine" us := unit.NewUnitState("abc", "def", "ghi", mID) // Saving nil unit state should fail r.SaveUnitState(j, nil, time.Second) if e.sets != nil || e.deletes != nil { t.Logf("sets: %#v", e.sets) t.Logf("deletes: %#v", e.deletes) t.Fatalf("SaveUnitState of nil state should fail but acted unexpectedly!") } // Saving unit state with no hash should succeed for now, but should fail // in the future. See https://github.com/coreos/fleet/issues/720. //r.SaveUnitState(j, us, time.Second) //if len(e.sets) != 1 || e.deletes == nil { // t.Logf("sets: %#v", e.sets) // t.Logf("deletes: %#v", e.deletes) // t.Fatalf("SaveUnitState on UnitState with no hash acted unexpectedly!") //} us.UnitHash = "quickbrownfox" r.SaveUnitState(j, us, time.Second) json := `{"loadState":"abc","activeState":"def","subState":"ghi","machineState":{"ID":"mymachine","PublicIP":"","Metadata":null,"Version":""},"unitHash":"quickbrownfox"}` p1 := "/fleet/state/foo.service" p2 := "/fleet/states/foo.service/mymachine" want := []action{ action{key: p1, val: json}, action{key: p2, val: json}, } got := e.sets if !reflect.DeepEqual(got, want) { t.Errorf("bad result from SaveUnitState: \ngot\n%#v\nwant\n%#v", got, want) } if e.deletes != nil { t.Errorf("unexpected deletes during SaveUnitState: %#v", e.deletes) } if e.gets != nil { t.Errorf("unexpected gets during SaveUnitState: %#v", e.gets) } } func TestRemoveUnitState(t *testing.T) { e := &testEtcdClient{} r := &EtcdRegistry{e, "/fleet/"} j := "foo.service" err := r.RemoveUnitState(j) if err != nil { t.Errorf("unexpected error from RemoveUnitState: %v", err) } want := []action{ action{key: "/fleet/state/foo.service", rec: false}, action{key: "/fleet/states/foo.service", rec: true}, } got := e.deletes if !reflect.DeepEqual(got, want) { t.Errorf("bad result from RemoveUnitState: \ngot\n%#v\nwant\n%#v", got, want) } if e.sets != nil { t.Errorf("unexpected sets during RemoveUnitState: %#v", e.sets) } if e.gets != nil { t.Errorf("unexpected gets during RemoveUnitState: %#v", e.gets) } // Ensure RemoveUnitState handles different error scenarios appropriately for i, tt := range []struct { errs []error fail bool }{ {[]error{etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{nil, etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{nil, nil}, false}, // No errors, no responses should succeed {[]error{errors.New("ur registry don't work")}, true}, {[]error{nil, errors.New("ur registry don't work")}, true}, } { e = &testEtcdClient{err: tt.errs} r = &EtcdRegistry{e, "/fleet"} err = r.RemoveUnitState("foo.service") if (err != nil) != tt.fail { t.Errorf("case %d: unexpected error state calling UnitStates(): got %v, want %v", i, err, tt.fail) } } } func TestUnitStateToModel(t *testing.T) { for i, tt := range []struct { in *unit.UnitState want *unitStateModel }{ { in: nil, want: nil, }, { // Unit state with no hash and no machineID is OK // See https://github.com/coreos/fleet/issues/720 in: &unit.UnitState{"foo", "bar", "baz", "", "", "name"}, want: &unitStateModel{"foo", "bar", "baz", nil, ""}, }, { // Unit state with hash but no machineID is OK in: &unit.UnitState{"foo", "bar", "baz", "", "heh", "name"}, want: &unitStateModel{"foo", "bar", "baz", nil, "heh"}, }, { in: &unit.UnitState{"foo", "bar", "baz", "woof", "miaow", "name"}, want: &unitStateModel{"foo", "bar", "baz", &machine.MachineState{ID: "woof"}, "miaow"}, }, } { got := unitStateToModel(tt.in) if !reflect.DeepEqual(got, tt.want) { t.Errorf("case %d: got %#v, want %#v", i, got, tt.want) } } } func TestModelToUnitState(t *testing.T) { for i, tt := range []struct { in *unitStateModel want *unit.UnitState }{ { in: nil, want: nil, }, { in: &unitStateModel{"foo", "bar", "baz", nil, ""}, want: &unit.UnitState{ LoadState: "foo", ActiveState: "bar", SubState: "baz", MachineID: "", UnitHash: "", UnitName: "name", }, }, { in: &unitStateModel{"z", "x", "y", &machine.MachineState{ID: "abcd"}, ""}, want: &unit.UnitState{ LoadState: "z", ActiveState: "x", SubState: "y", MachineID: "abcd", UnitHash: "", UnitName: "name", }, }, } { got := modelToUnitState(tt.in, "name") if !reflect.DeepEqual(got, tt.want) { t.Errorf("case %d: got %#v, want %#v", i, got, tt.want) } } } func makeResult(val string) *etcd.Result { return &etcd.Result{ Node: &etcd.Node{ Value: val, }, } } func TestGetUnitState(t *testing.T) { tests := []struct { res *etcd.Result // result returned from etcd err error // error returned from etcd wantUS *unit.UnitState wantErr bool }{ { // Unit state with no UnitHash should be OK res: makeResult(`{"loadState":"abc","activeState":"def","subState":"ghi","machineState":{"ID":"mymachine","PublicIP":"","Metadata":null,"Version":"","TotalResources":{"Cores":0,"Memory":0,"Disk":0},"FreeResources":{"Cores":0,"Memory":0,"Disk":0}}}`), err: nil, wantUS: &unit.UnitState{"abc", "def", "ghi", "mymachine", "", "foo.service"}, }, { // Unit state with UnitHash should be OK res: makeResult(`{"loadState":"abc","activeState":"def","subState":"ghi","machineState":{"ID":"mymachine","PublicIP":"","Metadata":null,"Version":"","TotalResources":{"Cores":0,"Memory":0,"Disk":0},"FreeResources":{"Cores":0,"Memory":0,"Disk":0}},"unitHash":"quickbrownfox"}`), err: nil, wantUS: &unit.UnitState{"abc", "def", "ghi", "mymachine", "quickbrownfox", "foo.service"}, }, { // Unit state with no MachineState should be OK res: makeResult(`{"loadState":"abc","activeState":"def","subState":"ghi"}`), err: nil, wantUS: &unit.UnitState{"abc", "def", "ghi", "", "", "foo.service"}, }, { // Bad unit state object should simply result in nil returned res: makeResult(`garbage, not good proper json`), err: nil, wantUS: nil, wantErr: true, }, { // Unknown errors should result in nil returned res: nil, err: errors.New("some random error from etcd"), wantUS: nil, wantErr: true, }, { // KeyNotFound should result in nil returned res: nil, err: etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}, wantUS: nil, wantErr: false, }, } for i, tt := range tests { e := &testEtcdClient{ res: []*etcd.Result{tt.res}, err: []error{tt.err}, } r := &EtcdRegistry{e, "/fleet/"} j := "foo.service" us, err := r.getUnitState(j, "XXX") if tt.wantErr != (err != nil) { t.Errorf("case %d: unexpected error %t, got %v", i, tt.wantErr, err) continue } want := []action{ action{key: "/fleet/states/foo.service/XXX", rec: false}, } got := e.gets if !reflect.DeepEqual(got, want) { t.Errorf("case %d: bad result from GetUnitState:\ngot\n%#v\nwant\n%#v", i, got, want) } if !reflect.DeepEqual(us, tt.wantUS) { t.Errorf("case %d: bad UnitState:\ngot\n%#v\nwant\n%#v", i, us, tt.wantUS) } } } func usToJson(t *testing.T, us *unit.UnitState) string { json, err := marshal(unitStateToModel(us)) if err != nil { t.Fatalf("error marshalling unit: %v", err) } return json } func TestUnitStates(t *testing.T) { fus1 := unit.UnitState{"abc", "def", "ghi", "mID1", "zzz", "foo"} fus2 := unit.UnitState{"cat", "dog", "cow", "mID2", "xxx", "foo"} // Multiple new unit states reported for the same unit foo := etcd.Node{ Key: "/fleet/states/foo", Nodes: []etcd.Node{ etcd.Node{ Key: "/fleet/states/foo/mID1", Value: usToJson(t, &fus1), }, etcd.Node{ Key: "/fleet/states/foo/mID2", Value: usToJson(t, &fus2), }, }, } // Bogus new unit state which we won't expect to see in results bar := etcd.Node{ Key: "/fleet/states/bar", Nodes: []etcd.Node{ etcd.Node{ Key: "/fleet/states/bar/asdf", Value: `total garbage`, }, }, } // Result from crawling the new "states" namespace res2 := &etcd.Result{ Node: &etcd.Node{ Key: "/fleet/states", Nodes: []etcd.Node{foo, bar}, }, } e := &testEtcdClient{ res: []*etcd.Result{res2}, } r := &EtcdRegistry{e, "/fleet/"} got, err := r.UnitStates() if err != nil { t.Errorf("unexpected error calling UnitStates(): %v", err) } want := []*unit.UnitState{ &fus1, &fus2, } if !reflect.DeepEqual(got, want) { t.Errorf("UnitStates() returned unexpected result") t.Log("got:") for _, i := range got { t.Logf("%#v", i) } t.Log("want:") for _, i := range want { t.Logf("%#v", i) } } // Ensure UnitState handles different error scenarios appropriately for i, tt := range []struct { errs []error fail bool }{ {[]error{etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{etcd.Error{ErrorCode: etcd.ErrorKeyNotFound}}, false}, {[]error{nil}, false}, // No errors, no responses should succeed {[]error{errors.New("ur registry don't work")}, true}, {[]error{errors.New("ur registry don't work")}, true}, } { e = &testEtcdClient{err: tt.errs} r = &EtcdRegistry{e, "/fleet"} got, err = r.UnitStates() if (err != nil) != tt.fail { t.Errorf("case %d: unexpected error state calling UnitStates(): got %v, want %v", i, err, tt.fail) } if len(got) != 0 { t.Errorf("case %d: UnitStates() returned unexpected non-empty result on error: %v", i, got) } } } func TestMUSKeys(t *testing.T) { equal := func(a MUSKeys, b []MUSKey) bool { if len(a) != len(b) { return false } for i, m := range a { if m != b[i] { return false } } return true } k1 := MUSKey{name: "abc", machID: "aaa"} k2 := MUSKey{name: "abc", machID: "zzz"} k3 := MUSKey{name: "def", machID: "bbb"} k4 := MUSKey{name: "ppp", machID: "zzz"} k5 := MUSKey{name: "xxx", machID: "aaa"} want := []MUSKey{k1, k2, k3, k4, k5} ms := MUSKeys{k3, k4, k5, k2, k1} if equal(ms, want) { t.Fatalf("this should never happen!") } sort.Sort(ms) if !equal(ms, want) { t.Errorf("bad result after sort: got\n%#v, want\n%#v", ms, want) } }
// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gitindex import ( "fmt" "log" "net/url" "os" "path/filepath" "reflect" "sort" "strings" "time" "github.com/google/zoekt" "github.com/google/zoekt/build" git "github.com/libgit2/git2go" ) // RepoModTime returns the time of last fetch of a git repository. func RepoModTime(dir string) (time.Time, error) { var last time.Time refDir := filepath.Join(dir, "refs") if _, err := os.Lstat(refDir); err == nil { if err := filepath.Walk(refDir, func(name string, fi os.FileInfo, err error) error { if !fi.IsDir() && last.Before(fi.ModTime()) { last = fi.ModTime() } return nil }); err != nil { return last, err } } // git gc compresses refs into the following file: for _, fn := range []string{"info/refs", "packed-refs"} { if fi, err := os.Lstat(filepath.Join(dir, fn)); err == nil && !fi.IsDir() && last.Before(fi.ModTime()) { last = fi.ModTime() } } return last, nil } // FindGitRepos finds directories holding git repositories. func FindGitRepos(arg string) ([]string, error) { arg, err := filepath.Abs(arg) if err != nil { return nil, err } var dirs []string if err := filepath.Walk(arg, func(name string, fi os.FileInfo, err error) error { if fi, err := os.Lstat(filepath.Join(name, ".git")); err == nil && fi.IsDir() { dirs = append(dirs, filepath.Join(name, ".git")) return filepath.SkipDir } if !strings.HasSuffix(name, ".git") || !fi.IsDir() { return nil } fi, err = os.Lstat(filepath.Join(name, "objects")) if err != nil || !fi.IsDir() { return nil } dirs = append(dirs, name) return filepath.SkipDir }); err != nil { return nil, err } return dirs, nil } func templatesForOrigin(u *url.URL) (*zoekt.Repository, error) { return nil, fmt.Errorf("unknown URL %s", u) } // setTemplates fills in URL templates for known git hosting // sites. func setTemplates(repo *zoekt.Repository, u *url.URL, typ string) error { repo.URL = u.String() switch typ { case "gitiles": /// eg. https://gerrit.googlesource.com/gitiles/+/master/tools/run_dev.sh#20 repo.CommitURLTemplate = u.String() + "/+/{{.Version}}" repo.FileURLTemplate = u.String() + "/+/{{.Version}}/{{.Path}}" repo.LineFragmentTemplate = "{{.LineNumber}}" case "github": // eg. https://github.com/hanwen/go-fuse/blob/notify/genversion.sh#L10 repo.CommitURLTemplate = u.String() + "/commit/{{.Version}}" repo.FileURLTemplate = u.String() + "/blob/{{.Version}}/{{.Path}}" repo.LineFragmentTemplate = "L{{.LineNumber}}" case "cgit": // http://git.savannah.gnu.org/cgit/lilypond.git/tree/elisp/lilypond-mode.el?h=dev/philh&id=b2ca0fefe3018477aaca23b6f672c7199ba5238e#n100 repo.CommitURLTemplate = u.String() + "/commit/?id={{.Version}}" repo.FileURLTemplate = u.String() + "/tree/{{.Path}}/?id={{.Version}}" repo.LineFragmentTemplate = "n{{.LineNumber}}" case "gitweb": // https://gerrit.libreoffice.org/gitweb?p=online.git;a=blob;f=Makefile.am;h=cfcfd7c36fbae10e269653dc57a9b68c92d4c10b;hb=848145503bf7b98ce4a4aa0a858a0d71dd0dbb26#l10 repo.FileURLTemplate = u.String() + ";a=blob;f={{.Path}};hb={{.Version}}" repo.CommitURLTemplate = u.String() + ";a=commit;h={{.Version}}" repo.LineFragmentTemplate = "l{{.LineNumber}}" default: return fmt.Errorf("URL scheme type %q unknown", typ) } return nil } // getCommit returns a tree object for the given reference. func getCommit(repo *git.Repository, ref string) (*git.Commit, error) { obj, err := repo.RevparseSingle(ref) if err != nil { return nil, err } defer obj.Free() commitObj, err := obj.Peel(git.ObjectCommit) if err != nil { return nil, err } return commitObj.AsCommit() } func clearEmptyConfig(err error) error { if git.IsErrorClass(err, git.ErrClassConfig) && git.IsErrorCode(err, git.ErrNotFound) { return nil } return err } func isMissingBranchError(err error) bool { return git.IsErrorClass(err, git.ErrClassReference) && git.IsErrorCode(err, git.ErrNotFound) } func setTemplatesFromConfig(desc *zoekt.Repository, repoDir string) error { base, err := git.NewConfig() if err != nil { return err } defer base.Free() cfg, err := git.OpenOndisk(base, filepath.Join(repoDir, "config")) if err != nil { return err } defer cfg.Free() webURLStr, err := cfg.LookupString("zoekt.web-url") err = clearEmptyConfig(err) if err != nil { return err } webURLType, err := cfg.LookupString("zoekt.web-url-type") err = clearEmptyConfig(err) if err != nil { return err } if webURLType != "" && webURLStr != "" { webURL, err := url.Parse(webURLStr) if err != nil { return err } if err := setTemplates(desc, webURL, webURLType); err != nil { return err } } name, err := cfg.LookupString("zoekt.name") err = clearEmptyConfig(err) if err != nil { return err } if name != "" { desc.Name = name } else { remoteURL, err := cfg.LookupString("remote.origin.url") err = clearEmptyConfig(err) if err != nil || remoteURL == "" { return err } u, err := url.Parse(remoteURL) if err != nil { return err } log.Printf("%q %q", remoteURL, u) if err := SetTemplatesFromOrigin(desc, u); err != nil { return err } } return nil } // SetTemplates fills in templates based on the origin URL. func SetTemplatesFromOrigin(desc *zoekt.Repository, u *url.URL) error { desc.Name = filepath.Join(u.Host, strings.TrimSuffix(u.Path, ".git")) if strings.HasSuffix(u.Host, ".googlesource.com") { return setTemplates(desc, u, "gitiles") } else if u.Host == "github.com" { u.Path = strings.TrimSuffix(u.Path, ".git") return setTemplates(desc, u, "github") } else { return fmt.Errorf("unknown git hosting site %q", u) } found, err := templatesForOrigin(u) if err != nil { return err } desc.URL = found.URL desc.CommitURLTemplate = found.CommitURLTemplate desc.FileURLTemplate = found.FileURLTemplate desc.LineFragmentTemplate = found.LineFragmentTemplate return nil } type Options struct { Submodules bool Incremental bool AllowMissingBranch bool RepoCacheDir string BuildOptions build.Options BranchPrefix string Branches []string } func expandBranches(repo *git.Repository, bs []string, prefix string) ([]string, error) { var result []string for _, b := range bs { if b == "HEAD" { _, ref, err := repo.RevparseExt(b) if err != nil { return nil, err } result = append(result, strings.TrimPrefix(ref.Name(), prefix)) continue } if strings.Contains(b, "*") { iter, err := repo.NewBranchIterator(git.BranchAll) if err != nil { log.Println("boem") return nil, err } for { br, _, err := iter.Next() if git.IsErrorCode(err, git.ErrIterOver) { break } if err != nil { log.Printf("bam %#v", err) return nil, err } name, err := br.Name() if err != nil { return nil, err } if matched, err := filepath.Match(b, name); err != nil { return nil, err } else if !matched { continue } result = append(result, strings.TrimPrefix(name, prefix)) } continue } result = append(result, b) } return result, nil } // IndexGitRepo indexes the git repository as specified by the options. func IndexGitRepo(opts Options) error { repo, err := git.OpenRepository(opts.BuildOptions.RepoDir) if err != nil { return err } if err := setTemplatesFromConfig(&opts.BuildOptions.RepositoryDescription, opts.BuildOptions.RepoDir); err != nil { log.Printf("setTemplatesFromConfig(%s): %s", opts.BuildOptions.RepoDir, err) } repoCache := NewRepoCache(opts.RepoCacheDir) defer repoCache.Close() // branch => (path, sha1) => repo. repos := map[FileKey]BlobLocation{} // FileKey => branches branchMap := map[FileKey][]string{} // Branch => Repo => SHA1 branchVersions := map[string]map[string]git.Oid{} branches, err := expandBranches(repo, opts.Branches, opts.BranchPrefix) if err != nil { return err } for _, b := range branches { fullName := filepath.Join(opts.BranchPrefix, b) commit, err := getCommit(repo, fullName) if opts.AllowMissingBranch && isMissingBranchError(err) { continue } if err != nil { return err } defer commit.Free() opts.BuildOptions.RepositoryDescription.Branches = append(opts.BuildOptions.RepositoryDescription.Branches, zoekt.RepositoryBranch{ Name: b, Version: commit.Id().String(), }) tree, err := commit.Tree() if err != nil { return err } defer tree.Free() files, subVersions, err := TreeToFiles(repo, tree, opts.BuildOptions.RepositoryDescription.URL, repoCache) if err != nil { return err } for k, v := range files { repos[k] = v branchMap[k] = append(branchMap[k], b) } branchVersions[b] = subVersions } if opts.Incremental { versions := opts.BuildOptions.IndexVersions() if reflect.DeepEqual(versions, opts.BuildOptions.RepositoryDescription.Branches) { return nil } } reposByPath := map[string]BlobLocation{} for key, location := range repos { reposByPath[key.SubRepoPath] = location } opts.BuildOptions.SubRepositories = map[string]*zoekt.Repository{} for path, location := range reposByPath { tpl := opts.BuildOptions.RepositoryDescription if path != "" { tpl = zoekt.Repository{URL: location.URL.String()} if err := SetTemplatesFromOrigin(&tpl, location.URL); err != nil { log.Printf("setTemplatesFromOrigin(%s, %s): %s", path, location.URL, err) } } opts.BuildOptions.SubRepositories[path] = &tpl } for _, br := range opts.BuildOptions.RepositoryDescription.Branches { for path, repo := range opts.BuildOptions.SubRepositories { id := branchVersions[br.Name][path] repo.Branches = append(repo.Branches, zoekt.RepositoryBranch{ Name: br.Name, Version: id.String(), }) } } builder, err := build.NewBuilder(opts.BuildOptions) if err != nil { return err } var names []string fileKeys := map[string][]FileKey{} for key := range repos { n := key.FullPath() fileKeys[n] = append(fileKeys[n], key) names = append(names, n) } // not strictly necessary, but nice for reproducibility. sort.Strings(names) for _, name := range names { keys := fileKeys[name] for _, key := range keys { brs := branchMap[key] blob, err := repos[key].Repo.LookupBlob(&key.ID) if err != nil { return err } if blob.Size() > int64(opts.BuildOptions.SizeMax) { continue } builder.Add(zoekt.Document{ SubRepositoryPath: key.SubRepoPath, Name: key.FullPath(), Content: blob.Contents(), Branches: brs, }) } } return builder.Finish() } Drop stray debug print Change-Id: I37c36750116914270963a17b192ff4f85b118137 // Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gitindex import ( "fmt" "log" "net/url" "os" "path/filepath" "reflect" "sort" "strings" "time" "github.com/google/zoekt" "github.com/google/zoekt/build" git "github.com/libgit2/git2go" ) // RepoModTime returns the time of last fetch of a git repository. func RepoModTime(dir string) (time.Time, error) { var last time.Time refDir := filepath.Join(dir, "refs") if _, err := os.Lstat(refDir); err == nil { if err := filepath.Walk(refDir, func(name string, fi os.FileInfo, err error) error { if !fi.IsDir() && last.Before(fi.ModTime()) { last = fi.ModTime() } return nil }); err != nil { return last, err } } // git gc compresses refs into the following file: for _, fn := range []string{"info/refs", "packed-refs"} { if fi, err := os.Lstat(filepath.Join(dir, fn)); err == nil && !fi.IsDir() && last.Before(fi.ModTime()) { last = fi.ModTime() } } return last, nil } // FindGitRepos finds directories holding git repositories. func FindGitRepos(arg string) ([]string, error) { arg, err := filepath.Abs(arg) if err != nil { return nil, err } var dirs []string if err := filepath.Walk(arg, func(name string, fi os.FileInfo, err error) error { if fi, err := os.Lstat(filepath.Join(name, ".git")); err == nil && fi.IsDir() { dirs = append(dirs, filepath.Join(name, ".git")) return filepath.SkipDir } if !strings.HasSuffix(name, ".git") || !fi.IsDir() { return nil } fi, err = os.Lstat(filepath.Join(name, "objects")) if err != nil || !fi.IsDir() { return nil } dirs = append(dirs, name) return filepath.SkipDir }); err != nil { return nil, err } return dirs, nil } func templatesForOrigin(u *url.URL) (*zoekt.Repository, error) { return nil, fmt.Errorf("unknown URL %s", u) } // setTemplates fills in URL templates for known git hosting // sites. func setTemplates(repo *zoekt.Repository, u *url.URL, typ string) error { repo.URL = u.String() switch typ { case "gitiles": /// eg. https://gerrit.googlesource.com/gitiles/+/master/tools/run_dev.sh#20 repo.CommitURLTemplate = u.String() + "/+/{{.Version}}" repo.FileURLTemplate = u.String() + "/+/{{.Version}}/{{.Path}}" repo.LineFragmentTemplate = "{{.LineNumber}}" case "github": // eg. https://github.com/hanwen/go-fuse/blob/notify/genversion.sh#L10 repo.CommitURLTemplate = u.String() + "/commit/{{.Version}}" repo.FileURLTemplate = u.String() + "/blob/{{.Version}}/{{.Path}}" repo.LineFragmentTemplate = "L{{.LineNumber}}" case "cgit": // http://git.savannah.gnu.org/cgit/lilypond.git/tree/elisp/lilypond-mode.el?h=dev/philh&id=b2ca0fefe3018477aaca23b6f672c7199ba5238e#n100 repo.CommitURLTemplate = u.String() + "/commit/?id={{.Version}}" repo.FileURLTemplate = u.String() + "/tree/{{.Path}}/?id={{.Version}}" repo.LineFragmentTemplate = "n{{.LineNumber}}" case "gitweb": // https://gerrit.libreoffice.org/gitweb?p=online.git;a=blob;f=Makefile.am;h=cfcfd7c36fbae10e269653dc57a9b68c92d4c10b;hb=848145503bf7b98ce4a4aa0a858a0d71dd0dbb26#l10 repo.FileURLTemplate = u.String() + ";a=blob;f={{.Path}};hb={{.Version}}" repo.CommitURLTemplate = u.String() + ";a=commit;h={{.Version}}" repo.LineFragmentTemplate = "l{{.LineNumber}}" default: return fmt.Errorf("URL scheme type %q unknown", typ) } return nil } // getCommit returns a tree object for the given reference. func getCommit(repo *git.Repository, ref string) (*git.Commit, error) { obj, err := repo.RevparseSingle(ref) if err != nil { return nil, err } defer obj.Free() commitObj, err := obj.Peel(git.ObjectCommit) if err != nil { return nil, err } return commitObj.AsCommit() } func clearEmptyConfig(err error) error { if git.IsErrorClass(err, git.ErrClassConfig) && git.IsErrorCode(err, git.ErrNotFound) { return nil } return err } func isMissingBranchError(err error) bool { return git.IsErrorClass(err, git.ErrClassReference) && git.IsErrorCode(err, git.ErrNotFound) } func setTemplatesFromConfig(desc *zoekt.Repository, repoDir string) error { base, err := git.NewConfig() if err != nil { return err } defer base.Free() cfg, err := git.OpenOndisk(base, filepath.Join(repoDir, "config")) if err != nil { return err } defer cfg.Free() webURLStr, err := cfg.LookupString("zoekt.web-url") err = clearEmptyConfig(err) if err != nil { return err } webURLType, err := cfg.LookupString("zoekt.web-url-type") err = clearEmptyConfig(err) if err != nil { return err } if webURLType != "" && webURLStr != "" { webURL, err := url.Parse(webURLStr) if err != nil { return err } if err := setTemplates(desc, webURL, webURLType); err != nil { return err } } name, err := cfg.LookupString("zoekt.name") err = clearEmptyConfig(err) if err != nil { return err } if name != "" { desc.Name = name } else { remoteURL, err := cfg.LookupString("remote.origin.url") err = clearEmptyConfig(err) if err != nil || remoteURL == "" { return err } u, err := url.Parse(remoteURL) if err != nil { return err } if err := SetTemplatesFromOrigin(desc, u); err != nil { return err } } return nil } // SetTemplates fills in templates based on the origin URL. func SetTemplatesFromOrigin(desc *zoekt.Repository, u *url.URL) error { desc.Name = filepath.Join(u.Host, strings.TrimSuffix(u.Path, ".git")) if strings.HasSuffix(u.Host, ".googlesource.com") { return setTemplates(desc, u, "gitiles") } else if u.Host == "github.com" { u.Path = strings.TrimSuffix(u.Path, ".git") return setTemplates(desc, u, "github") } else { return fmt.Errorf("unknown git hosting site %q", u) } found, err := templatesForOrigin(u) if err != nil { return err } desc.URL = found.URL desc.CommitURLTemplate = found.CommitURLTemplate desc.FileURLTemplate = found.FileURLTemplate desc.LineFragmentTemplate = found.LineFragmentTemplate return nil } type Options struct { Submodules bool Incremental bool AllowMissingBranch bool RepoCacheDir string BuildOptions build.Options BranchPrefix string Branches []string } func expandBranches(repo *git.Repository, bs []string, prefix string) ([]string, error) { var result []string for _, b := range bs { if b == "HEAD" { _, ref, err := repo.RevparseExt(b) if err != nil { return nil, err } result = append(result, strings.TrimPrefix(ref.Name(), prefix)) continue } if strings.Contains(b, "*") { iter, err := repo.NewBranchIterator(git.BranchAll) if err != nil { log.Println("boem") return nil, err } for { br, _, err := iter.Next() if git.IsErrorCode(err, git.ErrIterOver) { break } if err != nil { log.Printf("bam %#v", err) return nil, err } name, err := br.Name() if err != nil { return nil, err } if matched, err := filepath.Match(b, name); err != nil { return nil, err } else if !matched { continue } result = append(result, strings.TrimPrefix(name, prefix)) } continue } result = append(result, b) } return result, nil } // IndexGitRepo indexes the git repository as specified by the options. func IndexGitRepo(opts Options) error { repo, err := git.OpenRepository(opts.BuildOptions.RepoDir) if err != nil { return err } if err := setTemplatesFromConfig(&opts.BuildOptions.RepositoryDescription, opts.BuildOptions.RepoDir); err != nil { log.Printf("setTemplatesFromConfig(%s): %s", opts.BuildOptions.RepoDir, err) } repoCache := NewRepoCache(opts.RepoCacheDir) defer repoCache.Close() // branch => (path, sha1) => repo. repos := map[FileKey]BlobLocation{} // FileKey => branches branchMap := map[FileKey][]string{} // Branch => Repo => SHA1 branchVersions := map[string]map[string]git.Oid{} branches, err := expandBranches(repo, opts.Branches, opts.BranchPrefix) if err != nil { return err } for _, b := range branches { fullName := filepath.Join(opts.BranchPrefix, b) commit, err := getCommit(repo, fullName) if opts.AllowMissingBranch && isMissingBranchError(err) { continue } if err != nil { return err } defer commit.Free() opts.BuildOptions.RepositoryDescription.Branches = append(opts.BuildOptions.RepositoryDescription.Branches, zoekt.RepositoryBranch{ Name: b, Version: commit.Id().String(), }) tree, err := commit.Tree() if err != nil { return err } defer tree.Free() files, subVersions, err := TreeToFiles(repo, tree, opts.BuildOptions.RepositoryDescription.URL, repoCache) if err != nil { return err } for k, v := range files { repos[k] = v branchMap[k] = append(branchMap[k], b) } branchVersions[b] = subVersions } if opts.Incremental { versions := opts.BuildOptions.IndexVersions() if reflect.DeepEqual(versions, opts.BuildOptions.RepositoryDescription.Branches) { return nil } } reposByPath := map[string]BlobLocation{} for key, location := range repos { reposByPath[key.SubRepoPath] = location } opts.BuildOptions.SubRepositories = map[string]*zoekt.Repository{} for path, location := range reposByPath { tpl := opts.BuildOptions.RepositoryDescription if path != "" { tpl = zoekt.Repository{URL: location.URL.String()} if err := SetTemplatesFromOrigin(&tpl, location.URL); err != nil { log.Printf("setTemplatesFromOrigin(%s, %s): %s", path, location.URL, err) } } opts.BuildOptions.SubRepositories[path] = &tpl } for _, br := range opts.BuildOptions.RepositoryDescription.Branches { for path, repo := range opts.BuildOptions.SubRepositories { id := branchVersions[br.Name][path] repo.Branches = append(repo.Branches, zoekt.RepositoryBranch{ Name: br.Name, Version: id.String(), }) } } builder, err := build.NewBuilder(opts.BuildOptions) if err != nil { return err } var names []string fileKeys := map[string][]FileKey{} for key := range repos { n := key.FullPath() fileKeys[n] = append(fileKeys[n], key) names = append(names, n) } // not strictly necessary, but nice for reproducibility. sort.Strings(names) for _, name := range names { keys := fileKeys[name] for _, key := range keys { brs := branchMap[key] blob, err := repos[key].Repo.LookupBlob(&key.ID) if err != nil { return err } if blob.Size() > int64(opts.BuildOptions.SizeMax) { continue } builder.Add(zoekt.Document{ SubRepositoryPath: key.SubRepoPath, Name: key.FullPath(), Content: blob.Contents(), Branches: brs, }) } } return builder.Finish() }
// Copyright 2020 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "os" "os/exec" "path" "path/filepath" "regexp" "strings" "github.com/russross/blackfriday/v2" "sigs.k8s.io/yaml" ) // golang flags don't accept arrays by default. This adds it. type flagStrings []string func (flagString *flagStrings) String() string { return strings.Join(*flagString, ",") } func (flagString *flagStrings) Set(value string) error { *flagString = append(*flagString, value) return nil } func main() { var oldBranch, newBranch, templatesDir, outDir, oldRelease, newRelease, pullRequest string var validateOnly bool var notesDirs flagStrings flag.StringVar(&oldBranch, "oldBranch", "a", "branch to compare against") flag.StringVar(&newBranch, "newBranch", "b", "branch containing new files") flag.StringVar(&pullRequest, "pullRequest", "", "the pull request to check. Either this or oldBranch & newBranch are required.") flag.Var(&notesDirs, "notes", "the directory containing release notes. Repeat for multiple notes directories") flag.StringVar(&templatesDir, "templates", "./templates", "the directory containing release note templates") flag.StringVar(&outDir, "outDir", ".", "the directory containing release notes") flag.BoolVar(&validateOnly, "validateOnly", false, "set to true to perform validation only") flag.StringVar(&oldRelease, "oldRelease", "x.y.(z-1)", "old release") flag.StringVar(&newRelease, "newRelease", "x.y.z", "new release") flag.Parse() // Prow, at the time of writing this, does not use Git clone, meaning that there is no remote for the pull request. Generate a URL instead if we're using Prow. RepoOwner := os.Getenv("REPO_OWNER") RepoName := os.Getenv("REPO_NAME") if RepoOwner != "" && RepoName != "" { pullRequest = fmt.Sprintf("https://github.com/%s/%s/pull/%s", RepoOwner, RepoName, pullRequest) } if len(notesDirs) == 0 { notesDirs = []string{"."} } var releaseNotes []Note for _, notesDir := range notesDirs { var releaseNoteFiles []string fmt.Printf("Looking for release notes in %s.\n", notesDir) releaseNotesDir := "releasenotes/notes" if _, err := os.Stat(notesDir); os.IsNotExist(err) { fmt.Printf("Could not find repository -- directory %s does not exist.", notesDir) os.Exit(1) } if _, err := os.Stat(filepath.Join(notesDir, releaseNotesDir)); os.IsNotExist(err) { fmt.Printf("could not find release notes directory -- %s does not exist", filepath.Join(notesDir, releaseNotesDir)) os.Exit(2) } var err error releaseNoteFiles, err = getNewFilesInBranch(oldBranch, newBranch, pullRequest, notesDir, releaseNotesDir) if err != nil { fmt.Fprintf(os.Stderr, "failed to list files: %s\n", err.Error()) os.Exit(1) } fmt.Printf("Found %d files.\n\n", len(releaseNoteFiles)) fmt.Printf("Parsing release notes\n") releaseNotesEntries, err := parseReleaseNotesFiles(notesDir, releaseNoteFiles) if err != nil { fmt.Fprintf(os.Stderr, "Unable to read release notes: %s\n", err.Error()) os.Exit(1) } releaseNotes = append(releaseNotes, releaseNotesEntries...) } if len(releaseNotes) < 1 { fmt.Fprintf(os.Stderr, "failed to find any release notes.\n") // maps to EX_NOINPUT, but more importantly lets us differentiate between no files found and other errors os.Exit(66) } if validateOnly { return } fmt.Printf("\nLooking for markdown templates in %s.\n", templatesDir) templateFiles, err := getFilesWithExtension(templatesDir, "md") if err != nil { fmt.Fprintf(os.Stderr, "failed to list files: %s\n", err.Error()) os.Exit(1) } fmt.Printf("Found %d files.\n\n", len(templateFiles)) for _, filename := range templateFiles { output, err := populateTemplate(templatesDir, filename, releaseNotes, oldRelease, newRelease) if err != nil { fmt.Fprintf(os.Stderr, "Failed to parse template: %s\n", err.Error()) os.Exit(1) } if err := createDirIfNotExists(outDir); err != nil { fmt.Fprintf(os.Stderr, "Failed to create our dir: %s\n", err.Error()) } if err := writeAsMarkdown(path.Join(outDir, filename), output); err != nil { fmt.Fprintf(os.Stderr, "Failed to write markdown: %s\n", err.Error()) } else { fmt.Printf("Wrote markdown to %s\n", filename) } if err := writeAsHTML(path.Join(outDir, filename), output); err != nil { fmt.Fprintf(os.Stderr, "Failed to write HTML: %s\n", err.Error()) } else { fmt.Printf("Wrote markdown to %s.html\n", filename) } } } func createDirIfNotExists(path string) error { err := os.MkdirAll(path, 0o755) if os.IsExist(err) { return nil } return err } // writeAsHTML generates HTML from markdown before writing it to a file func writeAsHTML(filename string, markdown string) error { output := string(blackfriday.Run([]byte(markdown))) if err := ioutil.WriteFile(filename+".html", []byte(output), 0o644); err != nil { return err } return nil } // writeAsMarkdown writes markdown to a file func writeAsMarkdown(filename string, markdown string) error { if err := ioutil.WriteFile(filename, []byte(markdown), 0o644); err != nil { return err } return nil } func parseTemplateFormat(releaseNotes []Note, format string) ([]string, error) { template, err := ParseTemplate(format) if err != nil { return nil, fmt.Errorf("failed to parse template: %s", err.Error()) } return getNotesForTemplateFormat(releaseNotes, template), nil } func getNotesForTemplateFormat(notes []Note, template Template) []string { parsedNotes := make([]string, 0) for _, note := range notes { if template.Type == "releaseNotes" { parsedNotes = append(parsedNotes, note.getReleaseNotes(template.Kind, template.Area, template.Action)...) } else if template.Type == "upgradeNotes" { parsedNotes = append(parsedNotes, note.getUpgradeNotes()...) } else if template.Type == "securityNotes" { parsedNotes = append(parsedNotes, note.getSecurityNotes()...) } } return parsedNotes } // getFilesWithExtension returns the files from filePath with extension extension func getFilesWithExtension(filePath string, extension string) ([]string, error) { directory, err := os.Open(filePath) if err != nil { return nil, fmt.Errorf("failed to open directory: %s", err.Error()) } defer directory.Close() var files []string files, err = directory.Readdirnames(0) if err != nil { return nil, fmt.Errorf("unable to list files for directory %s: %s", filePath, err.Error()) } filesWithExtension := make([]string, 0) for _, fileName := range files { if strings.HasSuffix(fileName, extension) { filesWithExtension = append(filesWithExtension, fileName) } } return filesWithExtension, nil } func parseReleaseNotesFiles(filePath string, files []string) ([]Note, error) { notes := make([]Note, 0) for _, file := range files { file = path.Join(filePath, file) contents, err := ioutil.ReadFile(file) if err != nil { return nil, fmt.Errorf("unable to open file %s: %s", file, err.Error()) } var note Note if err = yaml.Unmarshal(contents, &note); err != nil { return nil, fmt.Errorf("unable to parse release note %s:%s", file, err.Error()) } note.File = file notes = append(notes, note) fmt.Printf("found %d upgrade notes, %d release notes, and %d security notes in %s\n", len(note.UpgradeNotes), len(note.ReleaseNotes), len(note.SecurityNotes), note.File) } return notes, nil } func populateTemplate(filepath string, filename string, releaseNotes []Note, oldRelease string, newRelease string) (string, error) { filename = path.Join(filepath, filename) fmt.Printf("Processing %s\n", filename) contents, err := ioutil.ReadFile(filename) if err != nil { return "", fmt.Errorf("unable to open file %s: %s", filename, err.Error()) } comment := regexp.MustCompile("<!--(.*)-->") output := string(contents) output = strings.Replace(output, "<!--oldRelease-->", oldRelease, -1) output = strings.Replace(output, "<!--newRelease-->", newRelease, -1) results := comment.FindAllString(output, -1) for _, result := range results { contents, err := parseTemplateFormat(releaseNotes, result) if err != nil { return "", fmt.Errorf("unable to parse templates: %s", err.Error()) } joinedContents := strings.Join(contents, "\n") output = strings.Replace(output, result, joinedContents, -1) } return output, nil } type prView struct { Files []struct { Path string `json:"path"` } `json:"files"` } func getFilesFromGHPRView(path string, pullRequest string, notesSubpath string) ([]string, error) { cmdStr := fmt.Sprintf("cd %s; gh pr view %s --json files", path, pullRequest) fmt.Printf("Executing: %s\n", cmdStr) cmd := exec.Command("bash", "-c", cmdStr) cmd.Env = os.Environ() out, err := cmd.CombinedOutput() fmt.Printf("%s\n", out) if err != nil { return nil, fmt.Errorf("received error running GH: %s", err.Error()) } var prResults prView if err := json.Unmarshal(out, &prResults); err != nil { return nil, fmt.Errorf("failed to parse GH results: %s", err.Error()) } var results []string for _, val := range prResults.Files { if strings.Contains(val.Path, notesSubpath) { results = append(results, val.Path) } } return results, nil } func getNewFilesInBranch(oldBranch string, newBranch string, pullRequest string, path string, notesSubpath string) ([]string, error) { // if there's a pull request, we can just get the changed files from GitHub. If not, we have to do it manually. if pullRequest != "" { return getFilesFromGHPRView(path, pullRequest, notesSubpath) } cmd := fmt.Sprintf("cd %s; git diff-tree -r --diff-filter=AMR --name-only --relative=%s '%s' '%s'", path, notesSubpath, oldBranch, newBranch) fmt.Printf("Executing: %s\n", cmd) out, err := exec.Command("bash", "-c", cmd).CombinedOutput() if err != nil { return nil, err } outFiles := strings.Split(string(out), "\n") // the getFilesFromGHPRView(path, pullRequest, notesSubpath) method returns file names which are relative to the repo path. // the git diff-tree is relative to the notesSupbpath, so we need to add the subpath back to the filenames. outFileswithPath := []string{} for _, f := range outFiles[:len(outFiles)-1] { // skip the last file which is empty outFileswithPath = append(outFileswithPath, filepath.Join(notesSubpath, f)) } return outFileswithPath, nil } add trailing newline before exit (#1875) // Copyright 2020 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "os" "os/exec" "path" "path/filepath" "regexp" "strings" "github.com/russross/blackfriday/v2" "sigs.k8s.io/yaml" ) // golang flags don't accept arrays by default. This adds it. type flagStrings []string func (flagString *flagStrings) String() string { return strings.Join(*flagString, ",") } func (flagString *flagStrings) Set(value string) error { *flagString = append(*flagString, value) return nil } func main() { var oldBranch, newBranch, templatesDir, outDir, oldRelease, newRelease, pullRequest string var validateOnly bool var notesDirs flagStrings flag.StringVar(&oldBranch, "oldBranch", "a", "branch to compare against") flag.StringVar(&newBranch, "newBranch", "b", "branch containing new files") flag.StringVar(&pullRequest, "pullRequest", "", "the pull request to check. Either this or oldBranch & newBranch are required.") flag.Var(&notesDirs, "notes", "the directory containing release notes. Repeat for multiple notes directories") flag.StringVar(&templatesDir, "templates", "./templates", "the directory containing release note templates") flag.StringVar(&outDir, "outDir", ".", "the directory containing release notes") flag.BoolVar(&validateOnly, "validateOnly", false, "set to true to perform validation only") flag.StringVar(&oldRelease, "oldRelease", "x.y.(z-1)", "old release") flag.StringVar(&newRelease, "newRelease", "x.y.z", "new release") flag.Parse() // Prow, at the time of writing this, does not use Git clone, meaning that there is no remote for the pull request. Generate a URL instead if we're using Prow. RepoOwner := os.Getenv("REPO_OWNER") RepoName := os.Getenv("REPO_NAME") if RepoOwner != "" && RepoName != "" { pullRequest = fmt.Sprintf("https://github.com/%s/%s/pull/%s", RepoOwner, RepoName, pullRequest) } if len(notesDirs) == 0 { notesDirs = []string{"."} } var releaseNotes []Note for _, notesDir := range notesDirs { var releaseNoteFiles []string fmt.Printf("Looking for release notes in %s.\n", notesDir) releaseNotesDir := "releasenotes/notes" if _, err := os.Stat(notesDir); os.IsNotExist(err) { fmt.Printf("Could not find repository -- directory %s does not exist.\n", notesDir) os.Exit(1) } if _, err := os.Stat(filepath.Join(notesDir, releaseNotesDir)); os.IsNotExist(err) { fmt.Printf("Could not find release notes directory -- %s does not exist.\n", filepath.Join(notesDir, releaseNotesDir)) os.Exit(2) } var err error releaseNoteFiles, err = getNewFilesInBranch(oldBranch, newBranch, pullRequest, notesDir, releaseNotesDir) if err != nil { fmt.Fprintf(os.Stderr, "failed to list files: %s\n", err.Error()) os.Exit(1) } fmt.Printf("Found %d files.\n\n", len(releaseNoteFiles)) fmt.Printf("Parsing release notes\n") releaseNotesEntries, err := parseReleaseNotesFiles(notesDir, releaseNoteFiles) if err != nil { fmt.Fprintf(os.Stderr, "Unable to read release notes: %s\n", err.Error()) os.Exit(1) } releaseNotes = append(releaseNotes, releaseNotesEntries...) } if len(releaseNotes) < 1 { fmt.Fprintf(os.Stderr, "failed to find any release notes.\n") // maps to EX_NOINPUT, but more importantly lets us differentiate between no files found and other errors os.Exit(66) } if validateOnly { return } fmt.Printf("\nLooking for markdown templates in %s.\n", templatesDir) templateFiles, err := getFilesWithExtension(templatesDir, "md") if err != nil { fmt.Fprintf(os.Stderr, "failed to list files: %s\n", err.Error()) os.Exit(1) } fmt.Printf("Found %d files.\n\n", len(templateFiles)) for _, filename := range templateFiles { output, err := populateTemplate(templatesDir, filename, releaseNotes, oldRelease, newRelease) if err != nil { fmt.Fprintf(os.Stderr, "Failed to parse template: %s\n", err.Error()) os.Exit(1) } if err := createDirIfNotExists(outDir); err != nil { fmt.Fprintf(os.Stderr, "Failed to create our dir: %s\n", err.Error()) } if err := writeAsMarkdown(path.Join(outDir, filename), output); err != nil { fmt.Fprintf(os.Stderr, "Failed to write markdown: %s\n", err.Error()) } else { fmt.Printf("Wrote markdown to %s\n", filename) } if err := writeAsHTML(path.Join(outDir, filename), output); err != nil { fmt.Fprintf(os.Stderr, "Failed to write HTML: %s\n", err.Error()) } else { fmt.Printf("Wrote markdown to %s.html\n", filename) } } } func createDirIfNotExists(path string) error { err := os.MkdirAll(path, 0o755) if os.IsExist(err) { return nil } return err } // writeAsHTML generates HTML from markdown before writing it to a file func writeAsHTML(filename string, markdown string) error { output := string(blackfriday.Run([]byte(markdown))) if err := ioutil.WriteFile(filename+".html", []byte(output), 0o644); err != nil { return err } return nil } // writeAsMarkdown writes markdown to a file func writeAsMarkdown(filename string, markdown string) error { if err := ioutil.WriteFile(filename, []byte(markdown), 0o644); err != nil { return err } return nil } func parseTemplateFormat(releaseNotes []Note, format string) ([]string, error) { template, err := ParseTemplate(format) if err != nil { return nil, fmt.Errorf("failed to parse template: %s", err.Error()) } return getNotesForTemplateFormat(releaseNotes, template), nil } func getNotesForTemplateFormat(notes []Note, template Template) []string { parsedNotes := make([]string, 0) for _, note := range notes { if template.Type == "releaseNotes" { parsedNotes = append(parsedNotes, note.getReleaseNotes(template.Kind, template.Area, template.Action)...) } else if template.Type == "upgradeNotes" { parsedNotes = append(parsedNotes, note.getUpgradeNotes()...) } else if template.Type == "securityNotes" { parsedNotes = append(parsedNotes, note.getSecurityNotes()...) } } return parsedNotes } // getFilesWithExtension returns the files from filePath with extension extension func getFilesWithExtension(filePath string, extension string) ([]string, error) { directory, err := os.Open(filePath) if err != nil { return nil, fmt.Errorf("failed to open directory: %s", err.Error()) } defer directory.Close() var files []string files, err = directory.Readdirnames(0) if err != nil { return nil, fmt.Errorf("unable to list files for directory %s: %s", filePath, err.Error()) } filesWithExtension := make([]string, 0) for _, fileName := range files { if strings.HasSuffix(fileName, extension) { filesWithExtension = append(filesWithExtension, fileName) } } return filesWithExtension, nil } func parseReleaseNotesFiles(filePath string, files []string) ([]Note, error) { notes := make([]Note, 0) for _, file := range files { file = path.Join(filePath, file) contents, err := ioutil.ReadFile(file) if err != nil { return nil, fmt.Errorf("unable to open file %s: %s", file, err.Error()) } var note Note if err = yaml.Unmarshal(contents, &note); err != nil { return nil, fmt.Errorf("unable to parse release note %s:%s", file, err.Error()) } note.File = file notes = append(notes, note) fmt.Printf("found %d upgrade notes, %d release notes, and %d security notes in %s\n", len(note.UpgradeNotes), len(note.ReleaseNotes), len(note.SecurityNotes), note.File) } return notes, nil } func populateTemplate(filepath string, filename string, releaseNotes []Note, oldRelease string, newRelease string) (string, error) { filename = path.Join(filepath, filename) fmt.Printf("Processing %s\n", filename) contents, err := ioutil.ReadFile(filename) if err != nil { return "", fmt.Errorf("unable to open file %s: %s", filename, err.Error()) } comment := regexp.MustCompile("<!--(.*)-->") output := string(contents) output = strings.Replace(output, "<!--oldRelease-->", oldRelease, -1) output = strings.Replace(output, "<!--newRelease-->", newRelease, -1) results := comment.FindAllString(output, -1) for _, result := range results { contents, err := parseTemplateFormat(releaseNotes, result) if err != nil { return "", fmt.Errorf("unable to parse templates: %s", err.Error()) } joinedContents := strings.Join(contents, "\n") output = strings.Replace(output, result, joinedContents, -1) } return output, nil } type prView struct { Files []struct { Path string `json:"path"` } `json:"files"` } func getFilesFromGHPRView(path string, pullRequest string, notesSubpath string) ([]string, error) { cmdStr := fmt.Sprintf("cd %s; gh pr view %s --json files", path, pullRequest) fmt.Printf("Executing: %s\n", cmdStr) cmd := exec.Command("bash", "-c", cmdStr) cmd.Env = os.Environ() out, err := cmd.CombinedOutput() fmt.Printf("%s\n", out) if err != nil { return nil, fmt.Errorf("received error running GH: %s", err.Error()) } var prResults prView if err := json.Unmarshal(out, &prResults); err != nil { return nil, fmt.Errorf("failed to parse GH results: %s", err.Error()) } var results []string for _, val := range prResults.Files { if strings.Contains(val.Path, notesSubpath) { results = append(results, val.Path) } } return results, nil } func getNewFilesInBranch(oldBranch string, newBranch string, pullRequest string, path string, notesSubpath string) ([]string, error) { // if there's a pull request, we can just get the changed files from GitHub. If not, we have to do it manually. if pullRequest != "" { return getFilesFromGHPRView(path, pullRequest, notesSubpath) } cmd := fmt.Sprintf("cd %s; git diff-tree -r --diff-filter=AMR --name-only --relative=%s '%s' '%s'", path, notesSubpath, oldBranch, newBranch) fmt.Printf("Executing: %s\n", cmd) out, err := exec.Command("bash", "-c", cmd).CombinedOutput() if err != nil { return nil, err } outFiles := strings.Split(string(out), "\n") // the getFilesFromGHPRView(path, pullRequest, notesSubpath) method returns file names which are relative to the repo path. // the git diff-tree is relative to the notesSupbpath, so we need to add the subpath back to the filenames. outFileswithPath := []string{} for _, f := range outFiles[:len(outFiles)-1] { // skip the last file which is empty outFileswithPath = append(outFileswithPath, filepath.Join(notesSubpath, f)) } return outFileswithPath, nil }
package command import ( "net" "os" "path/filepath" "runtime" "strconv" "strings" "time" "github.com/src-d/gitbase" "github.com/src-d/gitbase/internal/function" "github.com/src-d/gitbase/internal/rule" "github.com/opentracing/opentracing-go" "github.com/sirupsen/logrus" "github.com/uber/jaeger-client-go/config" "gopkg.in/src-d/go-git.v4/plumbing/cache" sqle "gopkg.in/src-d/go-mysql-server.v0" "gopkg.in/src-d/go-mysql-server.v0/server" "gopkg.in/src-d/go-mysql-server.v0/sql" "gopkg.in/src-d/go-mysql-server.v0/sql/analyzer" "gopkg.in/src-d/go-mysql-server.v0/sql/index/pilosa" "gopkg.in/src-d/go-vitess.v1/mysql" ) const ( ServerDescription = "Starts a gitbase server instance" ServerHelp = ServerDescription + "\n\n" + "By default when gitbase encounters an error in a repository it\n" + "stops the query. With GITBASE_SKIP_GIT_ERRORS variable it won't\n" + "complain and just skip those rows or repositories." TracerServiceName = "gitbase" ) // Server represents the `server` command of gitbase cli tool. type Server struct { engine *sqle.Engine pool *gitbase.RepositoryPool name string Version string // Version of the application. Directories []string `short:"d" long:"directories" description:"Path where the git repositories are located (standard and siva), multiple directories can be defined. Accepts globs."` Depth int `long:"depth" default:"1000" description:"load repositories looking at less than <depth> nested subdirectories."` Host string `long:"host" default:"localhost" description:"Host where the server is going to listen"` Port int `short:"p" long:"port" default:"3306" description:"Port where the server is going to listen"` User string `short:"u" long:"user" default:"root" description:"User name used for connection"` Password string `short:"P" long:"password" default:"" description:"Password used for connection"` ConnTimeout int `short:"t" long:"timeout" env:"GITBASE_CONNECTION_TIMEOUT" description:"Timeout in seconds used for connections"` IndexDir string `short:"i" long:"index" default:"/var/lib/gitbase/index" description:"Directory where the gitbase indexes information will be persisted." env:"GITBASE_INDEX_DIR"` CacheSize cache.FileSize `long:"cache" default:"512" description:"Object cache size in megabytes" env:"GITBASE_CACHESIZE_MB"` Parallelism uint `long:"parallelism" description:"Maximum number of parallel threads per table. By default, it's the number of CPU cores. 0 means default, 1 means disabled."` DisableSquash bool `long:"no-squash" description:"Disables the table squashing."` TraceEnabled bool `long:"trace" env:"GITBASE_TRACE" description:"Enables jaeger tracing"` ReadOnly bool `short:"r" long:"readonly" description:"Only allow read queries. This disables creating and deleting indexes as well." env:"GITBASE_READONLY"` SkipGitErrors bool // SkipGitErrors disables failing when Git errors are found. DisableGit bool `long:"no-git" description:"disable the load of git standard repositories."` DisableSiva bool `long:"no-siva" description:"disable the load of siva files."` Verbose bool `short:"v" description:"Activates the verbose mode"` } type jaegerLogrus struct { *logrus.Entry } func (l *jaegerLogrus) Error(s string) { l.Entry.Error(s) } func NewDatabaseEngine( readonly bool, version string, parallelism int, squash bool, ) *sqle.Engine { catalog := sql.NewCatalog() ab := analyzer.NewBuilder(catalog) if readonly { ab = ab.ReadOnly() } if parallelism == 0 { parallelism = runtime.NumCPU() } if parallelism > 1 { ab = ab.WithParallelism(parallelism) } if squash { ab = ab.AddPostAnalyzeRule(rule.SquashJoinsRule, rule.SquashJoins) } a := ab.Build() engine := sqle.New(catalog, a, &sqle.Config{ VersionPostfix: version, }) return engine } // Execute starts a new gitbase server based on provided configuration, it // honors the go-flags.Commander interface. func (c *Server) Execute(args []string) error { if c.Verbose { logrus.SetLevel(logrus.DebugLevel) } if err := c.buildDatabase(); err != nil { logrus.WithField("error", err).Fatal("unable to initialize database engine") return err } auth := mysql.NewAuthServerStatic() auth.Entries[c.User] = []*mysql.AuthServerStaticEntry{ {Password: c.Password}, } var tracer opentracing.Tracer if c.TraceEnabled { cfg, err := config.FromEnv() if err != nil { logrus.WithField("error", err). Fatal("unable to read jaeger environment") return err } if cfg.ServiceName == "" { cfg.ServiceName = TracerServiceName } logger := &jaegerLogrus{logrus.WithField("subsystem", "jaeger")} t, closer, err := cfg.NewTracer( config.Logger(logger), ) if err != nil { logrus.WithField("error", err).Fatal("unable to initialize tracer") return err } tracer = t defer closer.Close() logrus.Info("tracing enabled") } hostString := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) timeout := time.Duration(c.ConnTimeout) * time.Second s, err := server.NewServer( server.Config{ Protocol: "tcp", Address: hostString, Auth: auth, Tracer: tracer, ConnReadTimeout: timeout, ConnWriteTimeout: timeout, }, c.engine, gitbase.NewSessionBuilder(c.pool, gitbase.WithSkipGitErrors(c.SkipGitErrors), ), ) if err != nil { return err } logrus.Infof("server started and listening on %s:%d", c.Host, c.Port) return s.Start() } func (c *Server) buildDatabase() error { if c.engine == nil { c.engine = NewDatabaseEngine( c.ReadOnly, c.Version, int(c.Parallelism), !c.DisableSquash, ) } c.pool = gitbase.NewRepositoryPool(c.CacheSize * cache.MiByte) if err := c.addDirectories(); err != nil { return err } c.engine.AddDatabase(gitbase.NewDatabase(c.name)) logrus.WithField("db", c.name).Debug("registered database to catalog") c.engine.Catalog.RegisterFunctions(function.Functions) logrus.Debug("registered all available functions in catalog") if err := c.registerDrivers(); err != nil { return err } if !c.DisableSquash { logrus.Info("squash tables rule is enabled") } else { logrus.Warn("squash tables rule is disabled") } return c.engine.Init() } func (c *Server) registerDrivers() error { if err := os.MkdirAll(c.IndexDir, 0755); err != nil { return err } logrus.Debug("created index storage") c.engine.Catalog.RegisterIndexDriver( pilosa.NewDriver(filepath.Join(c.IndexDir, pilosa.DriverID)), ) logrus.Debug("registered pilosa index driver") return nil } func (c *Server) addDirectories() error { if len(c.Directories) == 0 { logrus.Error("At least one folder should be provided.") } if c.DisableGit && c.DisableSiva { logrus.Warn("The load of git repositories and siva files are disabled," + " no repository will be added.") return nil } if c.Depth < 1 { logrus.Warn("--depth flag set to a number less than 1," + " no repository will be added.") return nil } for _, directory := range c.Directories { if err := c.addDirectory(directory); err != nil { return err } } return nil } func (c *Server) addDirectory(directory string) error { matches, err := gitbase.PatternMatches(directory) if err != nil { return err } for _, match := range matches { if err := c.addMatch(match); err != nil { logrus.WithFields(logrus.Fields{ "path": match, "error": err, }).Error("path couldn't be inspected") } } return nil } func (c *Server) addMatch(match string) error { root, err := filepath.Abs(match) if err != nil { return err } rooti, err := os.Lstat(root) if err != nil { return err } if rooti.Mode()&os.ModeSymlink != 0 { root, err = os.Readlink(root) if err != nil { return err } } initDepth := strings.Count(root, string(os.PathSeparator)) return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { if err := c.addIfGitRepo(path); err != nil { return err } depth := strings.Count(path, string(os.PathSeparator)) - initDepth if depth >= c.Depth { return filepath.SkipDir } return nil } if !c.DisableSiva && info.Mode().IsRegular() && gitbase.IsSivaFile(info.Name()) { if err := c.pool.AddSivaFileWithID(info.Name(), path); err != nil { logrus.WithFields(logrus.Fields{ "path": path, "error": err, }).Error("repository could not be addded") return nil } logrus.WithField("path", path).Debug("repository added") } return nil }) } func (c *Server) addIfGitRepo(path string) error { ok, err := gitbase.IsGitRepo(path) if err != nil { logrus.WithFields(logrus.Fields{ "path": path, "error": err, }).Error("path couldn't be inspected") return filepath.SkipDir } if ok { if !c.DisableGit { base := filepath.Base(path) if err := c.pool.AddGitWithID(base, path); err != nil { logrus.WithFields(logrus.Fields{ "id": base, "path": path, "error": err, }).Error("repository could not be added") } logrus.WithField("path", path).Debug("repository added") } // either the repository is added or not, the path must be skipped return filepath.SkipDir } return nil } command, server: save one line Signed-off-by: bake <74521faff0db9a893a1cd5872e2b81792c8633ed@192k.pw> package command import ( "net" "os" "path/filepath" "runtime" "strconv" "strings" "time" "github.com/src-d/gitbase" "github.com/src-d/gitbase/internal/function" "github.com/src-d/gitbase/internal/rule" "github.com/opentracing/opentracing-go" "github.com/sirupsen/logrus" "github.com/uber/jaeger-client-go/config" "gopkg.in/src-d/go-git.v4/plumbing/cache" sqle "gopkg.in/src-d/go-mysql-server.v0" "gopkg.in/src-d/go-mysql-server.v0/server" "gopkg.in/src-d/go-mysql-server.v0/sql" "gopkg.in/src-d/go-mysql-server.v0/sql/analyzer" "gopkg.in/src-d/go-mysql-server.v0/sql/index/pilosa" "gopkg.in/src-d/go-vitess.v1/mysql" ) const ( ServerDescription = "Starts a gitbase server instance" ServerHelp = ServerDescription + "\n\n" + "By default when gitbase encounters an error in a repository it\n" + "stops the query. With GITBASE_SKIP_GIT_ERRORS variable it won't\n" + "complain and just skip those rows or repositories." TracerServiceName = "gitbase" ) // Server represents the `server` command of gitbase cli tool. type Server struct { engine *sqle.Engine pool *gitbase.RepositoryPool name string Version string // Version of the application. Directories []string `short:"d" long:"directories" description:"Path where the git repositories are located (standard and siva), multiple directories can be defined. Accepts globs."` Depth int `long:"depth" default:"1000" description:"load repositories looking at less than <depth> nested subdirectories."` Host string `long:"host" default:"localhost" description:"Host where the server is going to listen"` Port int `short:"p" long:"port" default:"3306" description:"Port where the server is going to listen"` User string `short:"u" long:"user" default:"root" description:"User name used for connection"` Password string `short:"P" long:"password" default:"" description:"Password used for connection"` ConnTimeout int `short:"t" long:"timeout" env:"GITBASE_CONNECTION_TIMEOUT" description:"Timeout in seconds used for connections"` IndexDir string `short:"i" long:"index" default:"/var/lib/gitbase/index" description:"Directory where the gitbase indexes information will be persisted." env:"GITBASE_INDEX_DIR"` CacheSize cache.FileSize `long:"cache" default:"512" description:"Object cache size in megabytes" env:"GITBASE_CACHESIZE_MB"` Parallelism uint `long:"parallelism" description:"Maximum number of parallel threads per table. By default, it's the number of CPU cores. 0 means default, 1 means disabled."` DisableSquash bool `long:"no-squash" description:"Disables the table squashing."` TraceEnabled bool `long:"trace" env:"GITBASE_TRACE" description:"Enables jaeger tracing"` ReadOnly bool `short:"r" long:"readonly" description:"Only allow read queries. This disables creating and deleting indexes as well." env:"GITBASE_READONLY"` SkipGitErrors bool // SkipGitErrors disables failing when Git errors are found. DisableGit bool `long:"no-git" description:"disable the load of git standard repositories."` DisableSiva bool `long:"no-siva" description:"disable the load of siva files."` Verbose bool `short:"v" description:"Activates the verbose mode"` } type jaegerLogrus struct { *logrus.Entry } func (l *jaegerLogrus) Error(s string) { l.Entry.Error(s) } func NewDatabaseEngine( readonly bool, version string, parallelism int, squash bool, ) *sqle.Engine { catalog := sql.NewCatalog() ab := analyzer.NewBuilder(catalog) if readonly { ab = ab.ReadOnly() } if parallelism == 0 { parallelism = runtime.NumCPU() } if parallelism > 1 { ab = ab.WithParallelism(parallelism) } if squash { ab = ab.AddPostAnalyzeRule(rule.SquashJoinsRule, rule.SquashJoins) } a := ab.Build() engine := sqle.New(catalog, a, &sqle.Config{ VersionPostfix: version, }) return engine } // Execute starts a new gitbase server based on provided configuration, it // honors the go-flags.Commander interface. func (c *Server) Execute(args []string) error { if c.Verbose { logrus.SetLevel(logrus.DebugLevel) } if err := c.buildDatabase(); err != nil { logrus.WithField("error", err).Fatal("unable to initialize database engine") return err } auth := mysql.NewAuthServerStatic() auth.Entries[c.User] = []*mysql.AuthServerStaticEntry{ {Password: c.Password}, } var tracer opentracing.Tracer if c.TraceEnabled { cfg, err := config.FromEnv() if err != nil { logrus.WithField("error", err). Fatal("unable to read jaeger environment") return err } if cfg.ServiceName == "" { cfg.ServiceName = TracerServiceName } logger := &jaegerLogrus{logrus.WithField("subsystem", "jaeger")} t, closer, err := cfg.NewTracer( config.Logger(logger), ) if err != nil { logrus.WithField("error", err).Fatal("unable to initialize tracer") return err } tracer = t defer closer.Close() logrus.Info("tracing enabled") } hostString := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) timeout := time.Duration(c.ConnTimeout) * time.Second s, err := server.NewServer( server.Config{ Protocol: "tcp", Address: hostString, Auth: auth, Tracer: tracer, ConnReadTimeout: timeout, ConnWriteTimeout: timeout, }, c.engine, gitbase.NewSessionBuilder(c.pool, gitbase.WithSkipGitErrors(c.SkipGitErrors), ), ) if err != nil { return err } logrus.Infof("server started and listening on %s:%d", c.Host, c.Port) return s.Start() } func (c *Server) buildDatabase() error { if c.engine == nil { c.engine = NewDatabaseEngine( c.ReadOnly, c.Version, int(c.Parallelism), !c.DisableSquash, ) } c.pool = gitbase.NewRepositoryPool(c.CacheSize * cache.MiByte) if err := c.addDirectories(); err != nil { return err } c.engine.AddDatabase(gitbase.NewDatabase(c.name)) logrus.WithField("db", c.name).Debug("registered database to catalog") c.engine.Catalog.RegisterFunctions(function.Functions) logrus.Debug("registered all available functions in catalog") if err := c.registerDrivers(); err != nil { return err } if !c.DisableSquash { logrus.Info("squash tables rule is enabled") } else { logrus.Warn("squash tables rule is disabled") } return c.engine.Init() } func (c *Server) registerDrivers() error { if err := os.MkdirAll(c.IndexDir, 0755); err != nil { return err } logrus.Debug("created index storage") c.engine.Catalog.RegisterIndexDriver( pilosa.NewDriver(filepath.Join(c.IndexDir, pilosa.DriverID)), ) logrus.Debug("registered pilosa index driver") return nil } func (c *Server) addDirectories() error { if len(c.Directories) == 0 { logrus.Error("At least one folder should be provided.") } if c.DisableGit && c.DisableSiva { logrus.Warn("The load of git repositories and siva files are disabled," + " no repository will be added.") return nil } if c.Depth < 1 { logrus.Warn("--depth flag set to a number less than 1," + " no repository will be added.") return nil } for _, directory := range c.Directories { if err := c.addDirectory(directory); err != nil { return err } } return nil } func (c *Server) addDirectory(directory string) error { matches, err := gitbase.PatternMatches(directory) if err != nil { return err } for _, match := range matches { if err := c.addMatch(match); err != nil { logrus.WithFields(logrus.Fields{ "path": match, "error": err, }).Error("path couldn't be inspected") } } return nil } func (c *Server) addMatch(match string) error { root, err := filepath.Abs(match) if err != nil { return err } rooti, err := os.Lstat(root) if err != nil { return err } if rooti.Mode()&os.ModeSymlink != 0 { if root, err = os.Readlink(root); err != nil { return err } } initDepth := strings.Count(root, string(os.PathSeparator)) return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { if err := c.addIfGitRepo(path); err != nil { return err } depth := strings.Count(path, string(os.PathSeparator)) - initDepth if depth >= c.Depth { return filepath.SkipDir } return nil } if !c.DisableSiva && info.Mode().IsRegular() && gitbase.IsSivaFile(info.Name()) { if err := c.pool.AddSivaFileWithID(info.Name(), path); err != nil { logrus.WithFields(logrus.Fields{ "path": path, "error": err, }).Error("repository could not be addded") return nil } logrus.WithField("path", path).Debug("repository added") } return nil }) } func (c *Server) addIfGitRepo(path string) error { ok, err := gitbase.IsGitRepo(path) if err != nil { logrus.WithFields(logrus.Fields{ "path": path, "error": err, }).Error("path couldn't be inspected") return filepath.SkipDir } if ok { if !c.DisableGit { base := filepath.Base(path) if err := c.pool.AddGitWithID(base, path); err != nil { logrus.WithFields(logrus.Fields{ "id": base, "path": path, "error": err, }).Error("repository could not be added") } logrus.WithField("path", path).Debug("repository added") } // either the repository is added or not, the path must be skipped return filepath.SkipDir } return nil }
package command import ( "fmt" "net" "os" "path/filepath" "strconv" "github.com/src-d/gitbase" "github.com/src-d/gitbase/internal/function" "github.com/src-d/gitbase/internal/rule" "github.com/opentracing/opentracing-go" gopilosa "github.com/pilosa/go-pilosa" "github.com/sirupsen/logrus" jaeger "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/config" sqle "gopkg.in/src-d/go-mysql-server.v0" "gopkg.in/src-d/go-mysql-server.v0/server" "gopkg.in/src-d/go-mysql-server.v0/sql/analyzer" "gopkg.in/src-d/go-mysql-server.v0/sql/index/pilosa" "gopkg.in/src-d/go-vitess.v0/mysql" ) const ( ServerDescription = "Starts a gitbase server instance" ServerHelp = ServerDescription + "\n\n" + "By default when gitbase encounters an error in a repository it\n" + "stops the query. With GITBASE_SKIP_GIT_ERRORS variable it won't\n" + "complain and just skip those rows or repositories." TracerServiceName = "gitbase" ) // Server represents the `server` command of gitbase cli tool. type Server struct { Verbose bool `short:"v" description:"Activates the verbose mode"` Git []string `short:"g" long:"git" description:"Path where the git repositories are located, multiple directories can be defined. Accepts globs."` Siva []string `long:"siva" description:"Path where the siva repositories are located, multiple directories can be defined. Accepts globs."` Host string `short:"h" long:"host" default:"localhost" description:"Host where the server is going to listen"` Port int `short:"p" long:"port" default:"3306" description:"Port where the server is going to listen"` User string `short:"u" long:"user" default:"root" description:"User name used for connection"` Password string `short:"P" long:"password" default:"" description:"Password used for connection"` PilosaURL string `long:"pilosa" default:"http://localhost:10101" description:"URL to your pilosa server" env:"PILOSA_ENDPOINT"` IndexDir string `short:"i" long:"index" default:"/var/lib/gitbase/index" description:"Directory where the gitbase indexes information will be persisted." env:"GITBASE_INDEX_DIR"` DisableSquash bool `long:"no-squash" description:"Disables the table squashing."` TraceEnabled bool `long:"trace" env:"GITBASE_TRACE" description:"Enables jaeger tracing"` // SkipGitErrors disables failing when Git errors are found. SkipGitErrors bool engine *sqle.Engine pool *gitbase.RepositoryPool name string } type jaegerLogrus struct { *logrus.Entry } func (l *jaegerLogrus) Error(s string) { l.Entry.Error(s) } // Execute starts a new gitbase server based on provided configuration, it // honors the go-flags.Commander interface. func (c *Server) Execute(args []string) error { if c.Verbose { logrus.SetLevel(logrus.DebugLevel) } if err := c.buildDatabase(); err != nil { logrus.WithField("error", err).Fatal("unable to start database server") return err } auth := mysql.NewAuthServerStatic() auth.Entries[c.User] = []*mysql.AuthServerStaticEntry{ {Password: c.Password}, } var tracer opentracing.Tracer if c.TraceEnabled { if os.Getenv("JAEGER_SERVICE_NAME") == "" { os.Setenv("JAEGER_SERVICE_NAME", TracerServiceName) } cfg, err := config.FromEnv() if err != nil { logrus.WithField("error", err). Fatal("unable to read jaeger environment") return err } logger := &jaegerLogrus{logrus.WithField("subsystem", "jaeger")} t, closer, err := cfg.NewTracer( config.Logger(logger), ) if err != nil { logrus.WithField("error", err).Fatal("unable to initialize tracer") return err } tracer = t defer closer.Close() jaegerHost := os.Getenv("JAEGER_AGENT_HOST") if jaegerHost == "" { jaegerHost = jaeger.DefaultUDPSpanServerHost } jaegerPort := os.Getenv("JAEGER_AGENT_PORT") if jaegerPort == "" { jaegerPort = strconv.Itoa(jaeger.DefaultUDPSpanServerPort) } endpoint := fmt.Sprintf("%s:%s", jaegerHost, jaegerPort) logrus.WithField("endpoint", endpoint).Info("tracing enabled") } hostString := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) s, err := server.NewServer( server.Config{ Protocol: "tcp", Address: hostString, Auth: auth, Tracer: tracer, }, c.engine, gitbase.NewSessionBuilder(c.pool, gitbase.WithSkipGitErrors(c.SkipGitErrors), ), ) if err != nil { return err } logrus.Infof("server started and listening on %s:%d", c.Host, c.Port) return s.Start() } func (c *Server) buildDatabase() error { if c.engine == nil { c.engine = sqle.NewDefault() } c.pool = gitbase.NewRepositoryPool() if err := c.addDirectories(); err != nil { return err } c.engine.AddDatabase(gitbase.NewDatabase(c.name)) logrus.WithField("db", c.name).Debug("registered database to catalog") c.engine.Catalog.RegisterFunctions(function.Functions) logrus.Debug("registered all available functions in catalog") if err := c.registerDrivers(); err != nil { return err } if !c.DisableSquash { logrus.Info("squash tables rule is enabled") a := analyzer.NewBuilder(c.engine.Catalog). AddPostAnalyzeRule(rule.SquashJoinsRule, rule.SquashJoins). Build() a.CurrentDatabase = c.engine.Analyzer.CurrentDatabase c.engine.Analyzer = a } else { logrus.Warn("squash tables rule is disabled") } return c.engine.Init() } func (c *Server) registerDrivers() error { if err := os.MkdirAll(c.IndexDir, 0755); err != nil { return err } logrus.Debug("created index storage") client, err := gopilosa.NewClient(c.PilosaURL) if err != nil { return err } logrus.Debug("established connection with pilosa") c.engine.Catalog.RegisterIndexDriver(pilosa.NewDriver(c.IndexDir, client)) logrus.Debug("registered pilosa index driver") return nil } func (c *Server) addDirectories() error { if len(c.Git) == 0 && len(c.Siva) == 0 { logrus.Error("At least one git folder or siva folder should be provided.") } for _, pattern := range c.Git { if err := c.addGitPattern(pattern); err != nil { return err } } for _, pattern := range c.Siva { if err := c.addSivaPattern(pattern); err != nil { return err } } return nil } func (c *Server) addGitPattern(pattern string) error { prefix, matches, err := gitbase.PatternMatches(pattern) if err != nil { return err } for _, m := range matches { logrus.WithField("dir", m).Debug("git repositories directory added") if err := c.pool.AddDir(prefix, m); err != nil { return err } } return nil } func (c *Server) addSivaPattern(pattern string) error { matches, err := filepath.Glob(pattern) if err != nil { return err } for _, m := range matches { logrus.WithField("dir", m).Debug("siva repositories directory added") if err := c.pool.AddSivaDir(m); err != nil { return err } } return nil } cmd/gitbase: add readonly flag Signed-off-by: Miguel Molina <75004f149038473757da0be07ef76dd4a9bdbc8d@erizocosmi.co> package command import ( "fmt" "net" "os" "path/filepath" "strconv" "github.com/src-d/gitbase" "github.com/src-d/gitbase/internal/function" "github.com/src-d/gitbase/internal/rule" "github.com/opentracing/opentracing-go" gopilosa "github.com/pilosa/go-pilosa" "github.com/sirupsen/logrus" jaeger "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/config" sqle "gopkg.in/src-d/go-mysql-server.v0" "gopkg.in/src-d/go-mysql-server.v0/server" "gopkg.in/src-d/go-mysql-server.v0/sql" "gopkg.in/src-d/go-mysql-server.v0/sql/analyzer" "gopkg.in/src-d/go-mysql-server.v0/sql/index/pilosa" "gopkg.in/src-d/go-vitess.v0/mysql" ) const ( ServerDescription = "Starts a gitbase server instance" ServerHelp = ServerDescription + "\n\n" + "By default when gitbase encounters an error in a repository it\n" + "stops the query. With GITBASE_SKIP_GIT_ERRORS variable it won't\n" + "complain and just skip those rows or repositories." TracerServiceName = "gitbase" ) // Server represents the `server` command of gitbase cli tool. type Server struct { Verbose bool `short:"v" description:"Activates the verbose mode"` Git []string `short:"g" long:"git" description:"Path where the git repositories are located, multiple directories can be defined. Accepts globs."` Siva []string `long:"siva" description:"Path where the siva repositories are located, multiple directories can be defined. Accepts globs."` Host string `short:"h" long:"host" default:"localhost" description:"Host where the server is going to listen"` Port int `short:"p" long:"port" default:"3306" description:"Port where the server is going to listen"` User string `short:"u" long:"user" default:"root" description:"User name used for connection"` Password string `short:"P" long:"password" default:"" description:"Password used for connection"` PilosaURL string `long:"pilosa" default:"http://localhost:10101" description:"URL to your pilosa server" env:"PILOSA_ENDPOINT"` IndexDir string `short:"i" long:"index" default:"/var/lib/gitbase/index" description:"Directory where the gitbase indexes information will be persisted." env:"GITBASE_INDEX_DIR"` DisableSquash bool `long:"no-squash" description:"Disables the table squashing."` TraceEnabled bool `long:"trace" env:"GITBASE_TRACE" description:"Enables jaeger tracing"` ReadOnly bool `short:"r" long:"readonly" description:"Only allow read queries. This disables creating and deleting indexes as well." env:"GITBASE_READONLY"` // SkipGitErrors disables failing when Git errors are found. SkipGitErrors bool engine *sqle.Engine pool *gitbase.RepositoryPool name string } type jaegerLogrus struct { *logrus.Entry } func (l *jaegerLogrus) Error(s string) { l.Entry.Error(s) } // Execute starts a new gitbase server based on provided configuration, it // honors the go-flags.Commander interface. func (c *Server) Execute(args []string) error { if c.Verbose { logrus.SetLevel(logrus.DebugLevel) } if err := c.buildDatabase(); err != nil { logrus.WithField("error", err).Fatal("unable to start database server") return err } auth := mysql.NewAuthServerStatic() auth.Entries[c.User] = []*mysql.AuthServerStaticEntry{ {Password: c.Password}, } var tracer opentracing.Tracer if c.TraceEnabled { if os.Getenv("JAEGER_SERVICE_NAME") == "" { os.Setenv("JAEGER_SERVICE_NAME", TracerServiceName) } cfg, err := config.FromEnv() if err != nil { logrus.WithField("error", err). Fatal("unable to read jaeger environment") return err } logger := &jaegerLogrus{logrus.WithField("subsystem", "jaeger")} t, closer, err := cfg.NewTracer( config.Logger(logger), ) if err != nil { logrus.WithField("error", err).Fatal("unable to initialize tracer") return err } tracer = t defer closer.Close() jaegerHost := os.Getenv("JAEGER_AGENT_HOST") if jaegerHost == "" { jaegerHost = jaeger.DefaultUDPSpanServerHost } jaegerPort := os.Getenv("JAEGER_AGENT_PORT") if jaegerPort == "" { jaegerPort = strconv.Itoa(jaeger.DefaultUDPSpanServerPort) } endpoint := fmt.Sprintf("%s:%s", jaegerHost, jaegerPort) logrus.WithField("endpoint", endpoint).Info("tracing enabled") } hostString := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) s, err := server.NewServer( server.Config{ Protocol: "tcp", Address: hostString, Auth: auth, Tracer: tracer, }, c.engine, gitbase.NewSessionBuilder(c.pool, gitbase.WithSkipGitErrors(c.SkipGitErrors), ), ) if err != nil { return err } logrus.Infof("server started and listening on %s:%d", c.Host, c.Port) return s.Start() } func (c *Server) buildDatabase() error { if c.engine == nil { catalog := sql.NewCatalog() ab := analyzer.NewBuilder(catalog) if c.ReadOnly { ab = ab.ReadOnly() } a := ab.Build() c.engine = sqle.New(catalog, a) } c.pool = gitbase.NewRepositoryPool() if err := c.addDirectories(); err != nil { return err } c.engine.AddDatabase(gitbase.NewDatabase(c.name)) logrus.WithField("db", c.name).Debug("registered database to catalog") c.engine.Catalog.RegisterFunctions(function.Functions) logrus.Debug("registered all available functions in catalog") if err := c.registerDrivers(); err != nil { return err } if !c.DisableSquash { logrus.Info("squash tables rule is enabled") a := analyzer.NewBuilder(c.engine.Catalog). AddPostAnalyzeRule(rule.SquashJoinsRule, rule.SquashJoins). Build() a.CurrentDatabase = c.engine.Analyzer.CurrentDatabase c.engine.Analyzer = a } else { logrus.Warn("squash tables rule is disabled") } return c.engine.Init() } func (c *Server) registerDrivers() error { if err := os.MkdirAll(c.IndexDir, 0755); err != nil { return err } logrus.Debug("created index storage") client, err := gopilosa.NewClient(c.PilosaURL) if err != nil { return err } logrus.Debug("established connection with pilosa") c.engine.Catalog.RegisterIndexDriver(pilosa.NewDriver(c.IndexDir, client)) logrus.Debug("registered pilosa index driver") return nil } func (c *Server) addDirectories() error { if len(c.Git) == 0 && len(c.Siva) == 0 { logrus.Error("At least one git folder or siva folder should be provided.") } for _, pattern := range c.Git { if err := c.addGitPattern(pattern); err != nil { return err } } for _, pattern := range c.Siva { if err := c.addSivaPattern(pattern); err != nil { return err } } return nil } func (c *Server) addGitPattern(pattern string) error { prefix, matches, err := gitbase.PatternMatches(pattern) if err != nil { return err } for _, m := range matches { logrus.WithField("dir", m).Debug("git repositories directory added") if err := c.pool.AddDir(prefix, m); err != nil { return err } } return nil } func (c *Server) addSivaPattern(pattern string) error { matches, err := filepath.Glob(pattern) if err != nil { return err } for _, m := range matches { logrus.WithField("dir", m).Debug("siva repositories directory added") if err := c.pool.AddSivaDir(m); err != nil { return err } } return nil }
package main import ( "encoding/gob" "errors" imgclient "github.com/Symantec/Dominator/imageserver/client" "github.com/Symantec/Dominator/imageserver/scanner" "github.com/Symantec/Dominator/lib/filesystem" "github.com/Symantec/Dominator/lib/hash" objectclient "github.com/Symantec/Dominator/lib/objectserver/client" fsdriver "github.com/Symantec/Dominator/lib/objectserver/filesystem" "github.com/Symantec/Dominator/lib/srpc" "github.com/Symantec/Dominator/proto/imageserver" "io" "log" "time" ) func replicator(address string, imdb *scanner.ImageDataBase, objSrv *fsdriver.ObjectServer, archiveMode bool, logger *log.Logger) { initialTimeout := time.Second * 15 timeout := initialTimeout var nextSleepStopTime time.Time for { nextSleepStopTime = time.Now().Add(timeout) if client, err := srpc.DialHTTP("tcp", address, timeout); err != nil { logger.Printf("Error dialling: %s %s\n", address, err) } else { if conn, err := client.Call( "ImageServer.GetImageUpdates"); err != nil { logger.Println(err) } else { if err := getUpdates(address, conn, imdb, objSrv, archiveMode, logger); err != nil { if err == io.EOF { logger.Println("Connection to image replicator closed") if nextSleepStopTime.Sub(time.Now()) < 1 { timeout = initialTimeout } } else { logger.Println(err) } } conn.Close() } client.Close() } time.Sleep(nextSleepStopTime.Sub(time.Now())) if timeout < time.Minute { timeout *= 2 } } } func getUpdates(address string, conn *srpc.Conn, imdb *scanner.ImageDataBase, objSrv *fsdriver.ObjectServer, archiveMode bool, logger *log.Logger) error { logger.Printf("Image replicator: connected to: %s\n", address) decoder := gob.NewDecoder(conn) initialImages := make(map[string]struct{}) if archiveMode { initialImages = nil } for { var imageUpdate imageserver.ImageUpdate if err := decoder.Decode(&imageUpdate); err != nil { if err == io.EOF { return err } return errors.New("decode err: " + err.Error()) } if imageUpdate.Name == "" { if initialImages != nil { deleteMissingImages(imdb, initialImages, logger) initialImages = nil } continue } switch imageUpdate.Operation { case imageserver.OperationAddImage: if initialImages != nil { initialImages[imageUpdate.Name] = struct{}{} } if err := addImage(address, imdb, objSrv, imageUpdate.Name, logger); err != nil { return err } case imageserver.OperationDeleteImage: logger.Printf("Replicator(%s): delete image\n", imageUpdate.Name) if err := imdb.DeleteImage(imageUpdate.Name); err != nil { return err } } } } func deleteMissingImages(imdb *scanner.ImageDataBase, imagesToKeep map[string]struct{}, logger *log.Logger) { missingImages := make([]string, 0) for _, imageName := range imdb.ListImages() { if _, ok := imagesToKeep[imageName]; !ok { missingImages = append(missingImages, imageName) } } for _, imageName := range missingImages { logger.Printf("Replicator(%s): delete missing image\n", imageName) if err := imdb.DeleteImage(imageName); err != nil { logger.Println(err) } } } func addImage(address string, imdb *scanner.ImageDataBase, objSrv *fsdriver.ObjectServer, name string, logger *log.Logger) error { timeout := time.Second * 60 if imdb.CheckImage(name) { return nil } logger.Printf("Replicator(%s): add image\n", name) client, err := srpc.DialHTTP("tcp", address, timeout) if err != nil { return err } defer client.Close() var request imageserver.GetImageRequest request.ImageName = name var reply imageserver.GetImageResponse if err := imgclient.CallGetImage(client, request, &reply); err != nil { return err } if reply.Image == nil { return errors.New(name + ": not found") } logger.Printf("Replicator(%s): downloaded image\n", name) reply.Image.FileSystem.RebuildInodePointers() if err := getMissingObjects(address, objSrv, reply.Image.FileSystem, logger); err != nil { return err } if err := imdb.AddImage(reply.Image, name); err != nil { return err } logger.Printf("Replicator(%s): added image\n", name) return nil } func getMissingObjects(address string, objSrv *fsdriver.ObjectServer, fs *filesystem.FileSystem, logger *log.Logger) error { hashes := make([]hash.Hash, 0, fs.NumRegularInodes) for _, inode := range fs.InodeTable { if inode, ok := inode.(*filesystem.RegularInode); ok { if inode.Size > 0 { hashes = append(hashes, inode.Hash) } } } objectSizes, err := objSrv.CheckObjects(hashes) if err != nil { return err } missingObjects := make([]hash.Hash, 0) for index, size := range objectSizes { if size < 1 { missingObjects = append(missingObjects, hashes[index]) } } if len(missingObjects) < 1 { return nil } logger.Printf("Replicator: downloading %d of %d objects\n", len(missingObjects), len(hashes)) objClient := objectclient.NewObjectClient(address) objectsReader, err := objClient.GetObjects(missingObjects) if err != nil { return err } defer objectsReader.Close() for _, hash := range missingObjects { length, reader, err := objectsReader.NextObject() if err != nil { return err } _, _, err = objSrv.AddObject(reader, length, &hash) reader.Close() if err != nil { return err } } return nil } Disable replicator image deletes when in archiveMode in imageserver. package main import ( "encoding/gob" "errors" imgclient "github.com/Symantec/Dominator/imageserver/client" "github.com/Symantec/Dominator/imageserver/scanner" "github.com/Symantec/Dominator/lib/filesystem" "github.com/Symantec/Dominator/lib/hash" objectclient "github.com/Symantec/Dominator/lib/objectserver/client" fsdriver "github.com/Symantec/Dominator/lib/objectserver/filesystem" "github.com/Symantec/Dominator/lib/srpc" "github.com/Symantec/Dominator/proto/imageserver" "io" "log" "time" ) func replicator(address string, imdb *scanner.ImageDataBase, objSrv *fsdriver.ObjectServer, archiveMode bool, logger *log.Logger) { initialTimeout := time.Second * 15 timeout := initialTimeout var nextSleepStopTime time.Time for { nextSleepStopTime = time.Now().Add(timeout) if client, err := srpc.DialHTTP("tcp", address, timeout); err != nil { logger.Printf("Error dialling: %s %s\n", address, err) } else { if conn, err := client.Call( "ImageServer.GetImageUpdates"); err != nil { logger.Println(err) } else { if err := getUpdates(address, conn, imdb, objSrv, archiveMode, logger); err != nil { if err == io.EOF { logger.Println("Connection to image replicator closed") if nextSleepStopTime.Sub(time.Now()) < 1 { timeout = initialTimeout } } else { logger.Println(err) } } conn.Close() } client.Close() } time.Sleep(nextSleepStopTime.Sub(time.Now())) if timeout < time.Minute { timeout *= 2 } } } func getUpdates(address string, conn *srpc.Conn, imdb *scanner.ImageDataBase, objSrv *fsdriver.ObjectServer, archiveMode bool, logger *log.Logger) error { logger.Printf("Image replicator: connected to: %s\n", address) decoder := gob.NewDecoder(conn) initialImages := make(map[string]struct{}) if archiveMode { initialImages = nil } for { var imageUpdate imageserver.ImageUpdate if err := decoder.Decode(&imageUpdate); err != nil { if err == io.EOF { return err } return errors.New("decode err: " + err.Error()) } if imageUpdate.Name == "" { if initialImages != nil { deleteMissingImages(imdb, initialImages, logger) initialImages = nil } continue } switch imageUpdate.Operation { case imageserver.OperationAddImage: if initialImages != nil { initialImages[imageUpdate.Name] = struct{}{} } if err := addImage(address, imdb, objSrv, imageUpdate.Name, logger); err != nil { return err } case imageserver.OperationDeleteImage: if archiveMode { continue } logger.Printf("Replicator(%s): delete image\n", imageUpdate.Name) if err := imdb.DeleteImage(imageUpdate.Name); err != nil { return err } } } } func deleteMissingImages(imdb *scanner.ImageDataBase, imagesToKeep map[string]struct{}, logger *log.Logger) { missingImages := make([]string, 0) for _, imageName := range imdb.ListImages() { if _, ok := imagesToKeep[imageName]; !ok { missingImages = append(missingImages, imageName) } } for _, imageName := range missingImages { logger.Printf("Replicator(%s): delete missing image\n", imageName) if err := imdb.DeleteImage(imageName); err != nil { logger.Println(err) } } } func addImage(address string, imdb *scanner.ImageDataBase, objSrv *fsdriver.ObjectServer, name string, logger *log.Logger) error { timeout := time.Second * 60 if imdb.CheckImage(name) { return nil } logger.Printf("Replicator(%s): add image\n", name) client, err := srpc.DialHTTP("tcp", address, timeout) if err != nil { return err } defer client.Close() var request imageserver.GetImageRequest request.ImageName = name var reply imageserver.GetImageResponse if err := imgclient.CallGetImage(client, request, &reply); err != nil { return err } if reply.Image == nil { return errors.New(name + ": not found") } logger.Printf("Replicator(%s): downloaded image\n", name) reply.Image.FileSystem.RebuildInodePointers() if err := getMissingObjects(address, objSrv, reply.Image.FileSystem, logger); err != nil { return err } if err := imdb.AddImage(reply.Image, name); err != nil { return err } logger.Printf("Replicator(%s): added image\n", name) return nil } func getMissingObjects(address string, objSrv *fsdriver.ObjectServer, fs *filesystem.FileSystem, logger *log.Logger) error { hashes := make([]hash.Hash, 0, fs.NumRegularInodes) for _, inode := range fs.InodeTable { if inode, ok := inode.(*filesystem.RegularInode); ok { if inode.Size > 0 { hashes = append(hashes, inode.Hash) } } } objectSizes, err := objSrv.CheckObjects(hashes) if err != nil { return err } missingObjects := make([]hash.Hash, 0) for index, size := range objectSizes { if size < 1 { missingObjects = append(missingObjects, hashes[index]) } } if len(missingObjects) < 1 { return nil } logger.Printf("Replicator: downloading %d of %d objects\n", len(missingObjects), len(hashes)) objClient := objectclient.NewObjectClient(address) objectsReader, err := objClient.GetObjects(missingObjects) if err != nil { return err } defer objectsReader.Close() for _, hash := range missingObjects { length, reader, err := objectsReader.NextObject() if err != nil { return err } _, _, err = objSrv.AddObject(reader, length, &hash) reader.Close() if err != nil { return err } } return nil }
package manager import ( "errors" ) var ( ErrEmptyParam = errors.New("empty params") ErrPermDenied = errors.New("permission denied") ErrAuthenticationFail = errors.New("authentication fails") ErrAuthorizationFial = errors.New("authorization fails") ) more clear err msg package manager import ( "errors" ) var ( ErrEmptyParam = errors.New("auth with empty params") ErrPermDenied = errors.New("permission denied") ErrAuthenticationFail = errors.New("authentication fails") ErrAuthorizationFial = errors.New("authorization fails") )
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "bufio" "bytes" "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "time" "github.com/blang/semver" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" "k8s.io/kubectl/pkg/cmd/util/editor" "k8s.io/release/pkg/command" "k8s.io/release/pkg/git" "k8s.io/release/pkg/github" "k8s.io/release/pkg/notes" "k8s.io/release/pkg/notes/document" "k8s.io/release/pkg/notes/options" "k8s.io/release/pkg/util" ) const ( // draftFilename filename for the release notes draft draftFilename = "release-notes-draft.md" // serviceDirectory is where we keep the files used to generate the notes releaseNotesWorkDir = "release-notes" // mapsMainDirectory is where we will save the release notes maps mapsMainDirectory = "maps" // mapsCVEDirectory holds the maps with CVE data mapsCVEDirectory = "cve" // Directory where session editing files are stored mapsSessionDirectory = "sessions" // The themes directory holds the maps for the release notes major themes mapsThemesDirectory = "themes" // defaultKubernetesSigsOrg GitHub org owner of the release-notes repo defaultKubernetesSigsOrg = "kubernetes-sigs" // defaultKubernetesSigsRepo relnotes.k8s.io repository name defaultKubernetesSigsRepo = "release-notes" // userForkName The name we will give to the user's remote when adding it to repos userForkName = "userfork" // assetsFilePath Path to the assets.ts file assetsFilePath = "src/environments/assets.ts" // websiteBranchPrefix name of the website branch in the user's fork websiteBranchPrefix = "release-notes-json-" // draftBranchPrefix name of the draft branch in the user's fork draftBranchPrefix = "release-notes-draft-" // Editing instructions for notemaps mapEditingInstructions = `# This is the current map for this Pull Request. # The original note content is commented out, if you need to # change a field, remove the comment and change the value. # To cancel, exit without changing anything or leave the file blank. # Important! pr: and releasenote: have to be uncommented. #` ) // releaseNotesCmd represents the subcommand for `krel release-notes` var releaseNotesCmd = &cobra.Command{ Use: "release-notes", Short: "The subcommand of choice for the Release Notes subteam of SIG Release", Long: fmt.Sprintf(`krel release-notes The 'release-notes' subcommand of krel has been developed to: 1. Generate the release notes for the provided tag for commits on the main branch. We always use the main branch because a release branch gets fast-forwarded until we hit the first release candidate (rc). This is also the reason why we select the first 'v1.xx.0-rc.1' as start tag for the notes generation. 2. Put the generated notes into a release notes draft markdown document and create a GitHub pull request targeting to update the file: https://github.com/kubernetes/sig-release/blob/master/releases/release-1.xx/release-notes-draft.md 3. Put the generated notes into a JSON file and create a GitHub pull request to update the website https://relnotes.k8s.io. To use the tool, please set the %v environment variable which needs write permissions to your fork of k/sig-release and k-sigs/release-notes.`, github.TokenEnvKey), SilenceUsage: true, SilenceErrors: true, PreRunE: func(cmd *cobra.Command, args []string) error { // If none of the operation modes is defined, show the usage help and exit if !releaseNotesOpts.createDraftPR && !releaseNotesOpts.createWebsitePR { if err := cmd.Help(); err != nil { return err } } return nil }, RunE: func(cmd *cobra.Command, args []string) error { // Run the PR creation function return runReleaseNotes() }, } type releaseNotesOptions struct { tag string userFork string createDraftPR bool createWebsitePR bool dependencies bool fixNotes bool websiteRepo string mapProviders []string githubOrg string draftRepo string } type releaseNotesResult struct { markdown string json string } // A datatype to record a notes repair session type sessionData struct { UserEmail string `json:"mail"` UserName string `json:"name"` Date int64 `json:"date"` PullRequests []struct { Number int `json:"nr"` Hash string `json:"hash"` } `json:"prs"` Path string `json:"-"` } var releaseNotesOpts = &releaseNotesOptions{} func init() { releaseNotesCmd.PersistentFlags().StringVarP( &releaseNotesOpts.tag, "tag", "t", "", "version tag for the notes", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.createDraftPR, "create-draft-pr", false, "update the Release Notes draft and create a PR in k/sig-release", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.createWebsitePR, "create-website-pr", false, "patch the relnotes.k8s.io sources and generate a PR with the changes", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.dependencies, "dependencies", true, "add dependency report", ) releaseNotesCmd.PersistentFlags().StringSliceVarP( &releaseNotesOpts.mapProviders, "maps-from", "m", []string{}, "specify a location to recursively look for release notes *.y[a]ml file mappings", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.fixNotes, "fix", false, "fix release notes", ) releaseNotesCmd.PersistentFlags().StringVar( &releaseNotesOpts.userFork, "fork", "", "the user's fork in the form org/repo. Used to submit Pull Requests for the website and draft", ) rootCmd.AddCommand(releaseNotesCmd) } func runReleaseNotes() (err error) { var tag string if releaseNotesOpts.tag == "" { tag, err = tryToFindLatestMinorTag() if err != nil { return errors.Wrapf(err, "unable to find latest minor tag") } releaseNotesOpts.tag = tag } else { tag = releaseNotesOpts.tag } if releaseNotesOpts.userFork != "" { org, repo, err := git.ParseRepoSlug(releaseNotesOpts.userFork) if err != nil { return errors.Wrap(err, "parsing the user's fork") } releaseNotesOpts.githubOrg = org releaseNotesOpts.websiteRepo, releaseNotesOpts.draftRepo = repo, repo // If the slug did not have a repo, use the defaults if repo == "" { releaseNotesOpts.websiteRepo = defaultKubernetesSigsRepo releaseNotesOpts.draftRepo = git.DefaultGithubReleaseRepo } } // First, validate cmdline options if err := releaseNotesOpts.Validate(); err != nil { return errors.Wrap(err, "validating command line options") } // before running the generators, verify that the repositories are ready if releaseNotesOpts.createWebsitePR { if err = verifyFork( websiteBranchPrefix+tag, releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo, defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, ); err != nil { return errors.Wrapf(err, "while checking %s/%s fork", defaultKubernetesSigsOrg, defaultKubernetesSigsRepo) } } if releaseNotesOpts.createDraftPR { if err = verifyFork( draftBranchPrefix+tag, releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ); err != nil { return errors.Wrapf(err, "while checking %s/%s fork", defaultKubernetesSigsOrg, git.DefaultGithubReleaseRepo) } } // Create the PR for relnotes.k8s.io if releaseNotesOpts.createWebsitePR { // Run the website PR process if err := createWebsitePR(tag); err != nil { return errors.Wrap(err, "creating website PR") } } // Create the PR for the Release Notes Draft in k/sig-release if releaseNotesOpts.createDraftPR { // Create the Draft PR Process if err := createDraftPR(tag); err != nil { return errors.Wrap(err, "creating Draft PR") } } if releaseNotesOpts.createDraftPR || releaseNotesOpts.createWebsitePR { logrus.Info("Release notes generation complete!") } return nil } // verifyFork does a pre-check of a fork to see if we can create a PR from it func verifyFork(branchName, forkOwner, forkRepo, parentOwner, parentRepo string) error { logrus.Infof("Checking if a PR can be created from %s/%s", forkOwner, forkRepo) gh := github.New() // Check th PR isrepo, err := gh.RepoIsForkOf( forkOwner, forkRepo, parentOwner, parentRepo, ) if err != nil { return errors.Wrapf( err, "while checking if repository is a fork of %s/%s", parentOwner, parentRepo, ) } if !isrepo { return errors.Errorf( "cannot create PR, %s/%s is not a fork of %s/%s", forkOwner, forkRepo, parentOwner, parentRepo, ) } // verify the branch does not previously exist branchExists, err := gh.BranchExists( forkOwner, forkRepo, branchName, ) if err != nil { return errors.Wrap(err, "while checking if branch can be created") } if branchExists { return errors.Errorf( "a branch named %s already exists in %s/%s", branchName, forkOwner, forkRepo, ) } return nil } // createDraftPR pushes the release notes draft to the users fork func createDraftPR(tag string) (err error) { tagVersion, err := util.TagStringToSemver(tag) if err != nil { return errors.Wrapf(err, "reading tag: %s", tag) } // Release notes are built from the first RC in the previous // minor to encompass all changes received after Code Thaw, // the point where the last minor was forked. start := util.SemverToTagString(semver.Version{ Major: tagVersion.Major, Minor: tagVersion.Minor - 1, Patch: 0, Pre: []semver.PRVersion{{VersionStr: "rc.1"}}, }) gh := github.New() autoCreatePullRequest := true // Verify the repository isrepo, err := gh.RepoIsForkOf( releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ) if err != nil { return errors.Wrapf( err, "while checking if repository is a fork of %s/%s", git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ) } if !isrepo { return errors.New( fmt.Sprintf( "Cannot create PR, %s/%s is not a fork of %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ), ) } // Generate the notes for the current version releaseNotes, err := gatherNotesFrom(start) if err != nil { return errors.Wrapf(err, "while generating the release notes for tag %s", start) } branchname := draftBranchPrefix + tag // Prepare the fork of k/sig-release sigReleaseRepo, err := prepareFork( branchname, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, ) if err != nil { return errors.Wrap(err, "preparing local fork of kubernetes/sig-release") } // The release path inside the repository releasePath := filepath.Join("releases", fmt.Sprintf("release-%d.%d", tagVersion.Major, tagVersion.Minor)) // Check if the directory exists releaseDir := filepath.Join(sigReleaseRepo.Dir(), releasePath) if !util.Exists(releaseDir) { return errors.New(fmt.Sprintf("could not find release directory %s", releaseDir)) } // If we got the --fix flag, start the fix flow if releaseNotesOpts.fixNotes { _, _, err = util.Ask("Press enter to start", "y:yes|n:no|y", 10) // In interactive mode, we will ask the user before sending the PR autoCreatePullRequest = false // createNotesWorkDir is idempotent, we can use it to verify the tree is complete if err := createNotesWorkDir(releaseDir); err != nil { return errors.Wrap(err, "creating working directory") } // Run the release notes fix flow err := fixReleaseNotes(filepath.Join(releaseDir, releaseNotesWorkDir), releaseNotes) if err != nil { return errors.Wrap(err, "while running release notes fix flow") } // Create the map provider to read the changes so far rnMapProvider, err := notes.NewProviderFromInitString(filepath.Join(releaseDir, releaseNotesWorkDir, mapsMainDirectory)) if err != nil { return errors.Wrap(err, "creating release notes draft") } for _, note := range releaseNotes.ByPR() { maps, err := rnMapProvider.GetMapsForPR(note.PrNumber) if err != nil { return errors.Wrapf(err, "while getting maps for PR#%d", note.PrNumber) } for _, noteMap := range maps { if err := note.ApplyMap(noteMap); err != nil { return errors.Wrapf(err, "applying note maps to pr #%d", note.PrNumber) } } } } // Generate the results struct result, err := buildNotesResult(start, releaseNotes) if err != nil { return errors.Wrap(err, "building release notes results") } // generate the notes logrus.Debugf("Release notes markdown will be written to %s", releaseDir) err = ioutil.WriteFile(filepath.Join(releaseDir, draftFilename), []byte(result.markdown), 0644) if err != nil { return errors.Wrapf(err, "writing release notes draft") } logrus.Infof("Release Notes Draft written to %s", filepath.Join(releaseDir, draftFilename)) // If we are in interactive mode, ask before continuing if !autoCreatePullRequest { _, autoCreatePullRequest, err = util.Ask("Create pull request with your changes? (y/n)", "y:yes|n:no", 10) if err != nil { return errors.Wrap(err, "while asking to create pull request") } } if !autoCreatePullRequest { fmt.Println("\nPull request has NOT been created. The changes were made to your local copy of k/sig-release.") fmt.Println("To complete the process, you will need to:") fmt.Println(" 1. Review the changes in your local copy") fmt.Printf(" 2. Push the changes to your fork (git push -u %s %s)\n", userForkName, branchname) fmt.Println(" 3. Submit a pull request to k/sig-release") fmt.Println("\nYou can find your local copy here:") fmt.Println(sigReleaseRepo.Dir()) fmt.Println(nl) logrus.Warn("Changes were made locally, user needs to perform manual push and create pull request.") return nil } defer func() { err = sigReleaseRepo.Cleanup() }() // add the updated draft if err := sigReleaseRepo.Add(filepath.Join(releasePath, draftFilename)); err != nil { return errors.Wrap(err, "adding release notes draft to staging area") } // List of directories we'll consider for the PR releaseDirectories := []struct{ Path, Name, Ext string }{ { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsMainDirectory), Name: "release notes maps", Ext: "yaml", }, { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsSessionDirectory), Name: "release notes session files", Ext: "json", }, { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsCVEDirectory), Name: "release notes cve data", Ext: "yaml", }, { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsThemesDirectory), Name: "release notes major theme files", Ext: "yaml", }, } // Add to the PR all files that exist for _, dirData := range releaseDirectories { // add the updated maps if util.Exists(filepath.Join(sigReleaseRepo.Dir(), dirData.Path)) { // Check if there are any files to commit matches, err := filepath.Glob(filepath.Join(sigReleaseRepo.Dir(), dirData.Path, "*"+dirData.Ext)) logrus.Debugf("Adding %d %s from %s to commit", len(matches), dirData.Name, dirData.Path) if err != nil { return errors.Wrapf(err, "checking for %s files in %s", dirData.Ext, dirData.Path) } if len(matches) > 1 { if err := sigReleaseRepo.Add(filepath.Join(dirData.Path, "*"+dirData.Ext)); err != nil { return errors.Wrapf(err, "adding %s to staging area", dirData.Name) } } } else { logrus.Debugf("Not adding %s files, directory %s not found", dirData.Name, dirData.Path) } } // add the generated draft if err := sigReleaseRepo.UserCommit("Release Notes draft for k/k " + tag); err != nil { return errors.Wrapf(err, "creating commit in %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo) } // push to the user's remote logrus.Infof("Pushing modified release notes draft to %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo) if err := sigReleaseRepo.PushToRemote(userForkName, branchname); err != nil { return errors.Wrapf(err, "pushing %s to remote", userForkName) } // Create a PR against k/sig-release using the github API // TODO: Maybe read and parse the PR template from sig-release? prBody := "**What type of PR is this?**\n" prBody += "/kind documentation\n\n" prBody += "**What this PR does / why we need it**:\n" prBody += fmt.Sprintf("This PR updates the Release Notes Draft to k/k %s\n\n", tag) prBody += "**Which issue(s) this PR fixes**:\n\n" prBody += "**Special notes for your reviewer**:\n" prBody += "This is an automated PR generated from `krel The Kubernetes Release Toolbox`\n\n" // Create the pull request logrus.Debugf( "PR params: org: %s, repo: %s, headBranch: %s baseBranch: %s", git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), ) // Create the PR pr, err := gh.CreatePullRequest( git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), fmt.Sprintf("Update release notes draft to version %s", tag), prBody, ) if err != nil { logrus.Warnf("An error has occurred while creating the pull request for %s", tag) logrus.Warn("While the PR failed, the release notes draft was generated and submitted to your fork") return errors.Wrap(err, "creating the pull request") } logrus.Infof( "Successfully created PR: %s%s/%s/pull/%d", github.GitHubURL, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, pr.GetNumber(), ) logrus.Infof("Successfully created PR #%d", pr.GetNumber()) return nil } // prepareFork Prepare a branch a repo func prepareFork(branchName, upstreamOrg, upstreamRepo, myOrg, myRepo string) (repo *git.Repo, err error) { // checkout the upstream repository logrus.Infof("Cloning/updating repository %s/%s", upstreamOrg, upstreamRepo) repo, err = git.CleanCloneGitHubRepo( upstreamOrg, upstreamRepo, false, ) if err != nil { return nil, errors.Wrapf(err, "cloning %s/%s", upstreamOrg, upstreamRepo) } // test if the fork remote is already existing url := git.GetRepoURL(myOrg, myRepo, false) if repo.HasRemote(userForkName, url) { logrus.Infof( "Using already existing remote %s (%s) in repository", userForkName, url, ) } else { // add the user's fork as a remote err = repo.AddRemote(userForkName, myOrg, myRepo) if err != nil { return nil, errors.Wrap(err, "adding user's fork as remote repository") } } // checkout the new branch err = repo.Checkout("-B", branchName) if err != nil { return nil, errors.Wrapf(err, "creating new branch %s", branchName) } return repo, nil } // addReferenceToAssetsFile adds a new entry in the assets.ts file in repoPath to include newJsonFile func addReferenceToAssetsFile(repoPath, newJSONFile string) error { // Full filesystem path to the assets.ts file assetsFullPath := filepath.Join(repoPath, assetsFilePath) file, err := os.Open(assetsFullPath) if err != nil { return errors.Wrap(err, "opening assets.ts to check for current version") } defer file.Close() logrus.Infof("Writing json reference to %s in %s", newJSONFile, assetsFullPath) scanner := bufio.NewScanner(file) var assetsBuffer bytes.Buffer assetsFileWasModified := false fileIsReferenced := false for scanner.Scan() { // Check if the assets file already has the json notes referenced: if strings.Contains(scanner.Text(), fmt.Sprintf("assets/%s", newJSONFile)) { logrus.Warnf("File %s is already referenced in assets.ts", newJSONFile) fileIsReferenced = true break } assetsBuffer.WriteString(scanner.Text()) // Add the current version right after the array export if strings.Contains(scanner.Text(), "export const assets =") { assetsBuffer.WriteString(fmt.Sprintf(" 'assets/%s',\n", newJSONFile)) assetsFileWasModified = true } } if fileIsReferenced { logrus.Infof("Not modifying assets.ts since it already has a reference to %s", newJSONFile) return nil } // Return an error if the array decalra if !assetsFileWasModified { return errors.New("unable to modify assets file, could not find assets array declaration") } // write the modified assets.ts file if err := ioutil.WriteFile(assetsFullPath, assetsBuffer.Bytes(), os.FileMode(0o644)); err != nil { return errors.Wrap(err, "writing assets.ts file") } return nil } // processJSONOutput Runs NPM prettier inside repoPath to format the JSON output func processJSONOutput(repoPath string) error { npmpath, err := exec.LookPath("npm") if err != nil { return errors.Wrap(err, "while looking for npm in your path") } // run npm install logrus.Info("Installing npm modules, this can take a while") if err := command.NewWithWorkDir(repoPath, npmpath, "install").RunSuccess(); err != nil { return errors.Wrap(err, "running npm install in kubernetes-sigs/release-notes") } // run npm prettier logrus.Info("Running npm prettier...") if err := command.NewWithWorkDir(repoPath, npmpath, "run", "prettier").RunSuccess(); err != nil { return errors.Wrap(err, "running npm prettier in kubernetes-sigs/release-notes") } return nil } // createWebsitePR creates the JSON version of the release notes and pushes them to a user fork func createWebsitePR(tag string) error { _, err := util.TagStringToSemver(tag) if err != nil { return errors.Wrapf(err, "reading tag: %s", tag) } // Generate the release notes for ust the current tag jsonStr, err := releaseNotesJSON(tag) if err != nil { return errors.Wrapf(err, "generating release notes in JSON format") } jsonNotesFilename := fmt.Sprintf("release-notes-%s.json", tag[1:]) branchname := websiteBranchPrefix + tag // checkout kubernetes-sigs/release-notes k8sSigsRepo, err := prepareFork( branchname, defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo, ) if err != nil { return errors.Wrap(err, "preparing local fork branch") } defer func() { err = k8sSigsRepo.Cleanup() }() // add a reference to the new json file in assets.ts if err := addReferenceToAssetsFile(k8sSigsRepo.Dir(), jsonNotesFilename); err != nil { return errors.Wrapf(err, "adding %s to assets file", jsonNotesFilename) } // generate the notes jsonNotesPath := filepath.Join("src", "assets", jsonNotesFilename) logrus.Debugf("Release notes json file will be written to %s", filepath.Join(k8sSigsRepo.Dir(), jsonNotesPath)) err = ioutil.WriteFile(filepath.Join(k8sSigsRepo.Dir(), jsonNotesPath), []byte(jsonStr), 0644) if err != nil { return errors.Wrapf(err, "writing release notes json file") } // Run NPM prettier if err := processJSONOutput(k8sSigsRepo.Dir()); err != nil { return errors.Wrap(err, "while formatting release notes JSON files") } // add the modified files & commit the results if err := k8sSigsRepo.Add(jsonNotesPath); err != nil { return errors.Wrap(err, "adding release notes draft to staging area") } if err := k8sSigsRepo.Add(filepath.FromSlash(assetsFilePath)); err != nil { return errors.Wrap(err, "adding release notes draft to staging area") } if err := k8sSigsRepo.UserCommit(fmt.Sprintf("Patch relnotes.k8s.io with release %s", tag)); err != nil { return errors.Wrapf(err, "Error creating commit in %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo) } // push to the user's fork logrus.Infof("Pushing website changes to %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo) if err := k8sSigsRepo.PushToRemote(userForkName, branchname); err != nil { return errors.Wrapf(err, "pushing %s to %s/%s", userForkName, releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo) } // Create a PR against k-sigs/release-notes using the github API gh := github.New() logrus.Debugf( "PR params: org: %s, repo: %s, headBranch: %s baseBranch: %s", defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), ) pr, err := gh.CreatePullRequest( defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), fmt.Sprintf("Patch relnotes.k8s.io to release %s", tag), fmt.Sprintf("Automated patch to update relnotes.k8s.io to k/k version `%s` ", tag), ) if err != nil { logrus.Warnf("An error has occurred while creating the pull request for %s", tag) logrus.Warn("While the PR failed, the release notes where generated and submitted to your fork") return errors.Wrap(err, "creating the pull request") } logrus.Infof( "Successfully created PR: %s%s/%s/pull/%d", github.GitHubURL, defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, pr.GetNumber(), ) return nil } // tryToFindLatestMinorTag looks-up the default k/k remote to find the latest // non final version func tryToFindLatestMinorTag() (string, error) { url := git.GetDefaultKubernetesRepoURL() status, err := command.New( "git", "ls-remote", "--sort=v:refname", "--tags", url, ). Pipe("grep", "-Eo", "v[0-9].[0-9]+.0-.*.[0-9]$"). Pipe("tail", "-1"). RunSilentSuccessOutput() if err != nil { return "", err } return strings.TrimSpace(status.Output()), nil } // releaseNotesJSON generate the release notes for a specific tag and returns // them as JSON blob func releaseNotesJSON(tag string) (string, error) { logrus.Infof("Generating release notes for tag %s", tag) tagVersion, err := util.TagStringToSemver(tag) if err != nil { return "", errors.Wrap(err, "parsing semver from tag string") } branchName := git.DefaultBranch releaseBranch := fmt.Sprintf("release-%d.%d", tagVersion.Major, tagVersion.Minor) // Ensure we have a valid branch if !git.IsReleaseBranch(branchName) { return "", errors.New("Could not determine a release branch for tag") } // Preclone the repo to be able to read branches and tags logrus.Infof("Cloning %s/%s", git.DefaultGithubOrg, git.DefaultGithubRepo) repo, err := git.CloneOrOpenDefaultGitHubRepoSSH(rootOpts.repoPath) if err != nil { return "", errors.Wrap(err, "cloning default github repo") } // Chech if release branch already exists _, err = repo.RevParse(releaseBranch) if err == nil { logrus.Infof("Working on branch %s instead of %s", releaseBranch, git.DefaultBranch) branchName = releaseBranch } else { logrus.Infof("Release branch %s does not exist, working on %s", releaseBranch, git.DefaultBranch) } // If it's a patch release, we deduce the startTag manually: var startTag string if tagVersion.Patch > 0 { logrus.Debugf("Working on branch %s instead of %s", tag, git.DefaultBranch) startTag = fmt.Sprintf("v%d.%d.%d", tagVersion.Major, tagVersion.Minor, tagVersion.Patch-1) } else { startTag, err = repo.PreviousTag(tag, branchName) if err != nil { return "", errors.Wrap(err, "getting previous tag from branch") } } logrus.Infof("Using start tag %v", startTag) logrus.Infof("Using end tag %v", tag) notesOptions := options.New() notesOptions.Branch = branchName notesOptions.RepoPath = rootOpts.repoPath notesOptions.StartRev = startTag notesOptions.EndRev = tag notesOptions.Debug = logrus.StandardLogger().Level >= logrus.DebugLevel notesOptions.MapProviderStrings = releaseNotesOpts.mapProviders if err := notesOptions.ValidateAndFinish(); err != nil { return "", err } // Fetch the notes releaseNotes, err := notes.GatherReleaseNotes(notesOptions) if err != nil { return "", errors.Wrapf(err, "gathering release notes") } doc, err := document.New( releaseNotes, notesOptions.StartRev, notesOptions.EndRev, ) if err != nil { return "", errors.Wrapf(err, "creating release note document") } doc.PreviousRevision = startTag doc.CurrentRevision = tag // Create the JSON j, err := json.Marshal(releaseNotes.ByPR()) if err != nil { return "", errors.Wrapf(err, "generating release notes JSON") } return string(j), nil } // gatherNotesFrom gathers all the release notes from the specified startTag up to --tag func gatherNotesFrom(startTag string) (*notes.ReleaseNotes, error) { logrus.Infof("Gathering release notes from %s to %s", startTag, releaseNotesOpts.tag) notesOptions := options.New() notesOptions.Branch = git.DefaultBranch notesOptions.RepoPath = rootOpts.repoPath notesOptions.StartRev = startTag notesOptions.EndRev = releaseNotesOpts.tag notesOptions.Debug = logrus.StandardLogger().Level >= logrus.DebugLevel notesOptions.MapProviderStrings = releaseNotesOpts.mapProviders if err := notesOptions.ValidateAndFinish(); err != nil { return nil, err } logrus.Infof("Using start tag %v", startTag) logrus.Infof("Using end tag %v", releaseNotesOpts.tag) // Fetch the notes releaseNotes, err := notes.GatherReleaseNotes(notesOptions) if err != nil { return nil, errors.Wrapf(err, "gathering release notes") } return releaseNotes, nil } func buildNotesResult(startTag string, releaseNotes *notes.ReleaseNotes) (*releaseNotesResult, error) { doc, err := document.New( releaseNotes, startTag, releaseNotesOpts.tag, ) if err != nil { return nil, errors.Wrapf(err, "creating release note document") } doc.PreviousRevision = startTag doc.CurrentRevision = releaseNotesOpts.tag // Create the markdown markdown, err := doc.RenderMarkdownTemplate( "", "", options.GoTemplateDefault, ) if err != nil { return nil, errors.Wrapf( err, "rendering release notes to markdown", ) } // Add the dependency report if necessary if releaseNotesOpts.dependencies { logrus.Info("Generating dependency changes") deps, err := notes.NewDependencies().Changes( startTag, releaseNotesOpts.tag, ) if err != nil { return nil, errors.Wrap(err, "creating dependency report") } markdown += strings.Repeat(nl, 2) + deps } // Create the JSON j, err := json.Marshal(releaseNotes.ByPR()) if err != nil { return nil, errors.Wrapf(err, "generating release notes JSON") } return &releaseNotesResult{markdown: markdown, json: string(j)}, nil } // Validate checks if passed cmdline options are sane func (o *releaseNotesOptions) Validate() error { // Check that we have a GitHub token set token, isset := os.LookupEnv(github.TokenEnvKey) if !isset || token == "" { return errors.New("Cannot generate release notes if GitHub token is not set") } // If a tag is defined, see if it is a valid semver tag _, err := util.TagStringToSemver(releaseNotesOpts.tag) if err != nil { return errors.Wrapf(err, "reading tag: %s", releaseNotesOpts.tag) } // Options for PR creation if o.createDraftPR || o.createWebsitePR { if o.userFork == "" { return errors.New("cannot generate the Release Notes PR without --fork") } } return nil } // Save the session to a file func (sd *sessionData) Save() error { if sd.Date == 0 { return errors.New("unable to save session, date is note defined") } if sd.Path == "" { return errors.New("unable to save session, path is not defined") } jsonData, err := json.Marshal(sd) if err != nil { return errors.Wrap(err, "marshaling session data") } if err := ioutil.WriteFile( filepath.Join(sd.Path, fmt.Sprintf("maps-%d.json", sd.Date)), jsonData, os.FileMode(0o644)); err != nil { return errors.Wrap(err, "writing session data to disk") } return nil } // readFixSessions reads all the previous fixing data func readFixSessions(sessionPath string) (pullRequestChecklist map[int]string, err error) { files, err := ioutil.ReadDir(sessionPath) if err != nil { return nil, errors.Wrap(err, "reading working directory") } pullRequestList := make([]struct { Number int `json:"nr"` Hash string `json:"hash"` }, 0) // Look in the work dir for all json files for _, fileData := range files { currentSession := &sessionData{} if strings.HasSuffix(fileData.Name(), ".json") { logrus.Debugf("Reading session data from %s", fileData.Name()) jsonData, err := ioutil.ReadFile(filepath.Join(sessionPath, fileData.Name())) if err != nil { return nil, errors.Wrapf(err, "reading session data from %s", fileData.Name()) } if err := json.Unmarshal(jsonData, currentSession); err != nil { return nil, errors.Wrapf(err, "unmarshalling session data in %s", fileData.Name()) } pullRequestList = append(pullRequestList, currentSession.PullRequests...) } } // Copy the PRs to a map for easy lookup pullRequestChecklist = map[int]string{} for _, pr := range pullRequestList { pullRequestChecklist[pr.Number] = pr.Hash } logrus.Infof("Read %d PR reviews from previous sessions", len(pullRequestList)) return pullRequestChecklist, nil } // Do the fix process for the current tag func fixReleaseNotes(workDir string, releaseNotes *notes.ReleaseNotes) error { // Get data to record the session userEmail, err := git.GetUserEmail() if err != nil { return errors.Wrap(err, "getting local user's email") } userName, err := git.GetUserName() if err != nil { return errors.Wrap(err, "getting local user's name") } // Check the workDir before going further if !util.Exists(workDir) { return errors.New("map directory does not exist") } // Create the new session struct session := &sessionData{ UserEmail: userEmail, UserName: userName, Date: time.Now().UTC().Unix(), Path: filepath.Join(workDir, mapsSessionDirectory), } // Read the list of all PRs we've processed so far pullRequestChecklist, err := readFixSessions(filepath.Join(workDir, mapsSessionDirectory)) if err != nil { return errors.Wrapf(err, "reading previous session data") } // Greet the user with basic instructions greetingMessage := "\nWelcome to the Kubernetes Release Notes editing tool!\n\n" greetingMessage += "This tool will allow you to review and edit all the release\n" greetingMessage += "notes submitted by the Kubernetes contributors before publishing\n" greetingMessage += "the updated draft.\n\n" greetingMessage += "The flow will show each of the release notes that need to be\n" greetingMessage += "reviewed once and you can choose to edit it or not.\n\n" greetingMessage += "After you choose, it will be marked as reviewed and will not\n" greetingMessage += "be shown during the next sessions unless you choose to do a\n" greetingMessage += "full review of all notes.\n\n" greetingMessage += "You can hit Ctrl+C at any time to exit the review process\n" greetingMessage += "and submit the draft PR with the revisions made so far.\n\n" fmt.Print(greetingMessage) // Ask the user if they want to continue the last session o fix all notes continueFromLastSession := true if len(pullRequestChecklist) > 0 { _, continueFromLastSession, err = util.Ask("Would you like to continue from the last session? (Y/n)", "y:yes|n:no|y", 10) } else { _, _, err = util.Ask("Press enter to start editing", "y:yes|n:no|y", 10) } if err != nil { return errors.Wrap(err, "asking to retrieve last session") } // Bring up the provider provider, err := notes.NewProviderFromInitString(workDir) if err != nil { return errors.Wrap(err, "while getting map provider for current notes") } const ( spacer = " │ " ) // Cycle all gathered release notes for pr, note := range releaseNotes.ByPR() { contentHash, err := note.ContentHash() if err != nil { return errors.Wrapf(err, "getting the content hash for PR#%d", pr) } // We'll skip editing if the Releas Note has been reviewed if _, ok := pullRequestChecklist[pr]; ok && // and if we chose not to edit all continueFromLastSession && // and if the not has not been modified in GutHub contentHash == pullRequestChecklist[pr] { logrus.Debugf("Pull Request %d already reviewed", pr) continue } title := fmt.Sprintf("Release Note for PR %d:", pr) fmt.Println(nl + title) fmt.Println(strings.Repeat("=", len(title))) fmt.Printf("Pull Request URL: %skubernetes/kubernetes/pull/%d%s", github.GitHubURL, pr, nl) noteMaps, err := provider.GetMapsForPR(pr) if err != nil { return errors.Wrapf(err, "while getting map for PR #%d", pr) } // Capture the original note values to compare originalNote := &notes.ReleaseNote{ Text: note.Text, Author: note.Author, Areas: note.Areas, Kinds: note.Kinds, SIGs: note.SIGs, Feature: note.Feature, ActionRequired: note.ActionRequired, Documentation: note.Documentation, } if noteMaps != nil { fmt.Println("✨ Note contents are modified with a map") for _, noteMap := range noteMaps { if err := note.ApplyMap(noteMap); err != nil { return errors.Wrapf(err, "applying notemap for PR #%d", pr) } } } fmt.Println(pointIfChanged("Author", note.Author, originalNote.Author), "@"+note.Author) fmt.Println(pointIfChanged("SIGs", note.SIGs, originalNote.SIGs), note.SIGs) fmt.Println(pointIfChanged("Kinds", note.Kinds, originalNote.Kinds), note.Kinds) fmt.Println(pointIfChanged("Areas", note.Areas, originalNote.Areas), note.Areas) fmt.Println(pointIfChanged("Feature", note.Feature, originalNote.Feature), note.Feature) fmt.Println(pointIfChanged("ActionRequired", note.ActionRequired, originalNote.ActionRequired), note.ActionRequired) // TODO: Implement note.Documentation // Wrap the note for better readability on the terminal fmt.Println(pointIfChanged("Text", note.Text, originalNote.Text)) text := util.WrapText(note.Text, 80) fmt.Println(spacer + strings.ReplaceAll(text, nl, nl+spacer)) _, choice, err := util.Ask(fmt.Sprintf("\n- Fix note for PR #%d? (y/N)", note.PrNumber), "y:yes|n:no|n", 10) if err != nil { // If the user cancelled with ctr+c exit and continue the PR flow if err.(util.UserInputError).IsCtrlC() { logrus.Info("Input cancelled, exiting edit flow >> PRESS ENTER TO CONTINUE") return nil } return errors.Wrap(err, "while asking to edit release note") } if choice { for { retry, err := editReleaseNote(pr, workDir, originalNote, note) if err == nil { break } // If it's a user error (like yaml error) we can try again if retry { _, retryEditingChoice, err := util.Ask( fmt.Sprintf("\n- An error occurred while editing PR #%d. Try again?", note.PrNumber), "y:yes|n:no", 10, ) if err != nil { return errors.Wrap(err, "while asking to re-edit release note") } if !retryEditingChoice { return errors.Wrap(err, "editing release note map") } } else { return errors.Wrap(err, "while editing release note") } } } // Add this PR to the checklist: pullRequestChecklist[note.PrNumber] = contentHash session.PullRequests = append(session.PullRequests, struct { Number int `json:"nr"` Hash string `json:"hash"` }{ Number: note.PrNumber, Hash: contentHash, }) if err := session.Save(); err != nil { return errors.Wrap(err, "while saving editing session data") } } return nil } // Check two values and print a prefix if they are different func pointIfChanged(label string, var1, var2 interface{}) string { changed := false // Check if alues are string if _, ok := var1.(string); ok { if var1.(string) != var2.(string) { changed = true } } // Check if string slices if _, ok := var1.([]string); ok { if fmt.Sprint(var1) != fmt.Sprint(var2) { changed = true } } // Check if string slices if _, ok := var1.(bool); ok { if var1.(bool) != var2.(bool) { changed = true } } if changed { return fmt.Sprintf(" >> %s:", label) } return fmt.Sprintf(" %s:", label) } // editReleaseNote opens the user's editor for them to update the note. // In case of an editing error by the user, it returns shouldRetryEditing // set to true to retry editing. func editReleaseNote(pr int, workDir string, originalNote, modifiedNote *notes.ReleaseNote) (shouldRetryEditing bool, err error) { // To edit the note, we will create a yaml file, with the changed fields // active and we'll add the unaltered fields commented for the user to review modifiedFields := &notes.ReleaseNotesMap{PR: pr} unalteredFields := &notes.ReleaseNotesMap{PR: pr} numChanges := 0 if originalNote.Text == modifiedNote.Text { unalteredFields.ReleaseNote.Text = &originalNote.Text } else { modifiedFields.ReleaseNote.Text = &modifiedNote.Text numChanges++ } if originalNote.Author == modifiedNote.Author { unalteredFields.ReleaseNote.Author = &originalNote.Author } else { modifiedFields.ReleaseNote.Author = &modifiedNote.Author numChanges++ } if fmt.Sprint(originalNote.SIGs) == fmt.Sprint(modifiedNote.SIGs) { unalteredFields.ReleaseNote.SIGs = &originalNote.SIGs } else { modifiedFields.ReleaseNote.SIGs = &modifiedNote.SIGs numChanges++ } if fmt.Sprint(originalNote.Kinds) == fmt.Sprint(modifiedNote.Kinds) { unalteredFields.ReleaseNote.Kinds = &originalNote.Kinds } else { modifiedFields.ReleaseNote.Kinds = &modifiedNote.Kinds numChanges++ } if fmt.Sprint(originalNote.Areas) == fmt.Sprint(modifiedNote.Areas) { unalteredFields.ReleaseNote.Areas = &originalNote.Areas } else { modifiedFields.ReleaseNote.Areas = &modifiedNote.Areas numChanges++ } if fmt.Sprint(originalNote.Feature) == fmt.Sprint(modifiedNote.Feature) { unalteredFields.ReleaseNote.Feature = &originalNote.Feature } else { modifiedFields.ReleaseNote.Feature = &modifiedNote.Feature numChanges++ } if fmt.Sprint(originalNote.ActionRequired) == fmt.Sprint(modifiedNote.ActionRequired) { unalteredFields.ReleaseNote.ActionRequired = &originalNote.ActionRequired } else { modifiedFields.ReleaseNote.ActionRequired = &modifiedNote.ActionRequired numChanges++ } // TODO: Implement after writing a documentation comparison func unalteredFields.ReleaseNote.Documentation = &originalNote.Documentation // Create the release note map for the editor: output := "---\n" + string(mapEditingInstructions) + "\n" if numChanges == 0 { // If there are no changes, present the user with the commented // map with the original values yamlCode, err := yaml.Marshal(&unalteredFields) if err != nil { return false, errors.Wrap(err, "marshalling release note to map") } output += "# " + strings.ReplaceAll(string(yamlCode), "\n", "\n# ") } else { // ... otherwise build a mixed map with the changes and the original // values commented out for reference yamlCode, err := yaml.Marshal(&modifiedFields) if err != nil { return false, errors.Wrap(err, "marshalling release note to map") } unalteredYAML, err := yaml.Marshal(&unalteredFields.ReleaseNote) if err != nil { return false, errors.Wrap(err, "marshalling release note to map") } output += string(yamlCode) + " # " + strings.ReplaceAll(string(unalteredYAML), "\n", "\n # ") } kubeEditor := editor.NewDefaultEditor([]string{"KUBE_EDITOR", "EDITOR"}) changes, _, err := kubeEditor.LaunchTempFile("map", ".yaml", bytes.NewReader([]byte(output))) if err != nil { return false, errors.Wrap(err, "while launching editor") } // If the map was not modified, we don't make any changes if string(changes) == output || string(changes) == "" { logrus.Info("Release notes map was not modified") return false, nil } // Verify that the new yaml is valid and can be serialized back into a Map testMap := notes.ReleaseNotesMap{} err = yaml.Unmarshal(changes, &testMap) if err != nil { logrus.Error("The YAML code has errors") return true, errors.Wrap(err, "while verifying if changes are a valid map") } if testMap.PR == 0 { logrus.Error("The yaml code does not have a PR number") return true, errors.New("Invalid map: the YAML code did not have a PR number") } // Remarshall the newyaml to save only the new values newYAML, err := yaml.Marshal(testMap) if err != nil { return true, errors.Wrap(err, "while re-marshaling new map") } // Write the new map, removing the instructions mapPath := filepath.Join(workDir, mapsMainDirectory, fmt.Sprintf("pr-%d-map.yaml", pr)) err = ioutil.WriteFile(mapPath, newYAML, os.FileMode(0o644)) if err != nil { logrus.Errorf("Error writing map to %s: %s", mapPath, err) return true, errors.Wrap(err, "writing modified release note map") } return false, nil } // createNotesWorkDir creates the release notes working directory func createNotesWorkDir(releaseDir string) error { // Check that the working tree is complete: for _, dirPath := range []string{ filepath.Join(releaseDir, releaseNotesWorkDir), // Main work dir filepath.Join(releaseDir, releaseNotesWorkDir, mapsMainDirectory), // Maps directory filepath.Join(releaseDir, releaseNotesWorkDir, mapsCVEDirectory), // Maps for CVE data filepath.Join(releaseDir, releaseNotesWorkDir, mapsSessionDirectory), // Editing session files filepath.Join(releaseDir, releaseNotesWorkDir, mapsThemesDirectory), // Major themes directory } { if !util.Exists(dirPath) { if err := os.Mkdir(dirPath, os.FileMode(0o755)); err != nil { return errors.Wrap(err, "creating working directory") } } } return nil } Generate release notes from previous minor version Signed-off-by: Adolfo García Veytia (Puerco) <9de42c8261a7cacaae239656a4dd6991d9b9250f@uservers.net> /* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "bufio" "bytes" "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "time" "github.com/blang/semver" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" "k8s.io/kubectl/pkg/cmd/util/editor" "k8s.io/release/pkg/command" "k8s.io/release/pkg/git" "k8s.io/release/pkg/github" "k8s.io/release/pkg/notes" "k8s.io/release/pkg/notes/document" "k8s.io/release/pkg/notes/options" "k8s.io/release/pkg/util" ) const ( // draftFilename filename for the release notes draft draftFilename = "release-notes-draft.md" // serviceDirectory is where we keep the files used to generate the notes releaseNotesWorkDir = "release-notes" // mapsMainDirectory is where we will save the release notes maps mapsMainDirectory = "maps" // mapsCVEDirectory holds the maps with CVE data mapsCVEDirectory = "cve" // Directory where session editing files are stored mapsSessionDirectory = "sessions" // The themes directory holds the maps for the release notes major themes mapsThemesDirectory = "themes" // defaultKubernetesSigsOrg GitHub org owner of the release-notes repo defaultKubernetesSigsOrg = "kubernetes-sigs" // defaultKubernetesSigsRepo relnotes.k8s.io repository name defaultKubernetesSigsRepo = "release-notes" // userForkName The name we will give to the user's remote when adding it to repos userForkName = "userfork" // assetsFilePath Path to the assets.ts file assetsFilePath = "src/environments/assets.ts" // websiteBranchPrefix name of the website branch in the user's fork websiteBranchPrefix = "release-notes-json-" // draftBranchPrefix name of the draft branch in the user's fork draftBranchPrefix = "release-notes-draft-" // Editing instructions for notemaps mapEditingInstructions = `# This is the current map for this Pull Request. # The original note content is commented out, if you need to # change a field, remove the comment and change the value. # To cancel, exit without changing anything or leave the file blank. # Important! pr: and releasenote: have to be uncommented. #` ) // releaseNotesCmd represents the subcommand for `krel release-notes` var releaseNotesCmd = &cobra.Command{ Use: "release-notes", Short: "The subcommand of choice for the Release Notes subteam of SIG Release", Long: fmt.Sprintf(`krel release-notes The 'release-notes' subcommand of krel has been developed to: 1. Generate the release notes draft for the provided tag for commits on the main branch. 2. Put the generated notes into a release notes draft markdown document and create a GitHub pull request targeting to update the file: https://github.com/kubernetes/sig-release/blob/master/releases/release-1.xx/release-notes-draft.md 3. Put the generated notes into a JSON file and create a GitHub pull request to update the website https://relnotes.k8s.io. To use the tool, please set the %v environment variable which needs write permissions to your fork of k/sig-release and k-sigs/release-notes.`, github.TokenEnvKey), SilenceUsage: true, SilenceErrors: true, PreRunE: func(cmd *cobra.Command, args []string) error { // If none of the operation modes is defined, show the usage help and exit if !releaseNotesOpts.createDraftPR && !releaseNotesOpts.createWebsitePR { if err := cmd.Help(); err != nil { return err } } return nil }, RunE: func(cmd *cobra.Command, args []string) error { // Run the PR creation function return runReleaseNotes() }, } type releaseNotesOptions struct { tag string userFork string createDraftPR bool createWebsitePR bool dependencies bool fixNotes bool websiteRepo string mapProviders []string githubOrg string draftRepo string } type releaseNotesResult struct { markdown string json string } // A datatype to record a notes repair session type sessionData struct { UserEmail string `json:"mail"` UserName string `json:"name"` Date int64 `json:"date"` PullRequests []struct { Number int `json:"nr"` Hash string `json:"hash"` } `json:"prs"` Path string `json:"-"` } var releaseNotesOpts = &releaseNotesOptions{} func init() { releaseNotesCmd.PersistentFlags().StringVarP( &releaseNotesOpts.tag, "tag", "t", "", "version tag for the notes", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.createDraftPR, "create-draft-pr", false, "update the Release Notes draft and create a PR in k/sig-release", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.createWebsitePR, "create-website-pr", false, "patch the relnotes.k8s.io sources and generate a PR with the changes", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.dependencies, "dependencies", true, "add dependency report", ) releaseNotesCmd.PersistentFlags().StringSliceVarP( &releaseNotesOpts.mapProviders, "maps-from", "m", []string{}, "specify a location to recursively look for release notes *.y[a]ml file mappings", ) releaseNotesCmd.PersistentFlags().BoolVar( &releaseNotesOpts.fixNotes, "fix", false, "fix release notes", ) releaseNotesCmd.PersistentFlags().StringVar( &releaseNotesOpts.userFork, "fork", "", "the user's fork in the form org/repo. Used to submit Pull Requests for the website and draft", ) rootCmd.AddCommand(releaseNotesCmd) } func runReleaseNotes() (err error) { var tag string if releaseNotesOpts.tag == "" { tag, err = tryToFindLatestMinorTag() if err != nil { return errors.Wrapf(err, "unable to find latest minor tag") } releaseNotesOpts.tag = tag } else { tag = releaseNotesOpts.tag } if releaseNotesOpts.userFork != "" { org, repo, err := git.ParseRepoSlug(releaseNotesOpts.userFork) if err != nil { return errors.Wrap(err, "parsing the user's fork") } releaseNotesOpts.githubOrg = org releaseNotesOpts.websiteRepo, releaseNotesOpts.draftRepo = repo, repo // If the slug did not have a repo, use the defaults if repo == "" { releaseNotesOpts.websiteRepo = defaultKubernetesSigsRepo releaseNotesOpts.draftRepo = git.DefaultGithubReleaseRepo } } // First, validate cmdline options if err := releaseNotesOpts.Validate(); err != nil { return errors.Wrap(err, "validating command line options") } // before running the generators, verify that the repositories are ready if releaseNotesOpts.createWebsitePR { if err = verifyFork( websiteBranchPrefix+tag, releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo, defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, ); err != nil { return errors.Wrapf(err, "while checking %s/%s fork", defaultKubernetesSigsOrg, defaultKubernetesSigsRepo) } } if releaseNotesOpts.createDraftPR { if err = verifyFork( draftBranchPrefix+tag, releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ); err != nil { return errors.Wrapf(err, "while checking %s/%s fork", defaultKubernetesSigsOrg, git.DefaultGithubReleaseRepo) } } // Create the PR for relnotes.k8s.io if releaseNotesOpts.createWebsitePR { // Run the website PR process if err := createWebsitePR(tag); err != nil { return errors.Wrap(err, "creating website PR") } } // Create the PR for the Release Notes Draft in k/sig-release if releaseNotesOpts.createDraftPR { // Create the Draft PR Process if err := createDraftPR(tag); err != nil { return errors.Wrap(err, "creating Draft PR") } } if releaseNotesOpts.createDraftPR || releaseNotesOpts.createWebsitePR { logrus.Info("Release notes generation complete!") } return nil } // verifyFork does a pre-check of a fork to see if we can create a PR from it func verifyFork(branchName, forkOwner, forkRepo, parentOwner, parentRepo string) error { logrus.Infof("Checking if a PR can be created from %s/%s", forkOwner, forkRepo) gh := github.New() // Check th PR isrepo, err := gh.RepoIsForkOf( forkOwner, forkRepo, parentOwner, parentRepo, ) if err != nil { return errors.Wrapf( err, "while checking if repository is a fork of %s/%s", parentOwner, parentRepo, ) } if !isrepo { return errors.Errorf( "cannot create PR, %s/%s is not a fork of %s/%s", forkOwner, forkRepo, parentOwner, parentRepo, ) } // verify the branch does not previously exist branchExists, err := gh.BranchExists( forkOwner, forkRepo, branchName, ) if err != nil { return errors.Wrap(err, "while checking if branch can be created") } if branchExists { return errors.Errorf( "a branch named %s already exists in %s/%s", branchName, forkOwner, forkRepo, ) } return nil } // createDraftPR pushes the release notes draft to the users fork func createDraftPR(tag string) (err error) { tagVersion, err := util.TagStringToSemver(tag) if err != nil { return errors.Wrapf(err, "reading tag: %s", tag) } // From v1.20.0 on we use the previous minor as a starting tag // for the Release Notes draft because the branch is fast-rowarded now: start := util.SemverToTagString(semver.Version{ Major: tagVersion.Major, Minor: tagVersion.Minor - 1, Patch: 0, }) gh := github.New() autoCreatePullRequest := true // Verify the repository isrepo, err := gh.RepoIsForkOf( releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ) if err != nil { return errors.Wrapf( err, "while checking if repository is a fork of %s/%s", git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ) } if !isrepo { return errors.New( fmt.Sprintf( "Cannot create PR, %s/%s is not a fork of %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, ), ) } // Generate the notes for the current version releaseNotes, err := gatherNotesFrom(start) if err != nil { return errors.Wrapf(err, "while generating the release notes for tag %s", start) } branchname := draftBranchPrefix + tag // Prepare the fork of k/sig-release sigReleaseRepo, err := prepareFork( branchname, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo, ) if err != nil { return errors.Wrap(err, "preparing local fork of kubernetes/sig-release") } // The release path inside the repository releasePath := filepath.Join("releases", fmt.Sprintf("release-%d.%d", tagVersion.Major, tagVersion.Minor)) // Check if the directory exists releaseDir := filepath.Join(sigReleaseRepo.Dir(), releasePath) if !util.Exists(releaseDir) { return errors.New(fmt.Sprintf("could not find release directory %s", releaseDir)) } // If we got the --fix flag, start the fix flow if releaseNotesOpts.fixNotes { _, _, err = util.Ask("Press enter to start", "y:yes|n:no|y", 10) // In interactive mode, we will ask the user before sending the PR autoCreatePullRequest = false // createNotesWorkDir is idempotent, we can use it to verify the tree is complete if err := createNotesWorkDir(releaseDir); err != nil { return errors.Wrap(err, "creating working directory") } // Run the release notes fix flow err := fixReleaseNotes(filepath.Join(releaseDir, releaseNotesWorkDir), releaseNotes) if err != nil { return errors.Wrap(err, "while running release notes fix flow") } // Create the map provider to read the changes so far rnMapProvider, err := notes.NewProviderFromInitString(filepath.Join(releaseDir, releaseNotesWorkDir, mapsMainDirectory)) if err != nil { return errors.Wrap(err, "creating release notes draft") } for _, note := range releaseNotes.ByPR() { maps, err := rnMapProvider.GetMapsForPR(note.PrNumber) if err != nil { return errors.Wrapf(err, "while getting maps for PR#%d", note.PrNumber) } for _, noteMap := range maps { if err := note.ApplyMap(noteMap); err != nil { return errors.Wrapf(err, "applying note maps to pr #%d", note.PrNumber) } } } } // Generate the results struct result, err := buildNotesResult(start, releaseNotes) if err != nil { return errors.Wrap(err, "building release notes results") } // generate the notes logrus.Debugf("Release notes markdown will be written to %s", releaseDir) err = ioutil.WriteFile(filepath.Join(releaseDir, draftFilename), []byte(result.markdown), 0644) if err != nil { return errors.Wrapf(err, "writing release notes draft") } logrus.Infof("Release Notes Draft written to %s", filepath.Join(releaseDir, draftFilename)) // If we are in interactive mode, ask before continuing if !autoCreatePullRequest { _, autoCreatePullRequest, err = util.Ask("Create pull request with your changes? (y/n)", "y:yes|n:no", 10) if err != nil { return errors.Wrap(err, "while asking to create pull request") } } if !autoCreatePullRequest { fmt.Println("\nPull request has NOT been created. The changes were made to your local copy of k/sig-release.") fmt.Println("To complete the process, you will need to:") fmt.Println(" 1. Review the changes in your local copy") fmt.Printf(" 2. Push the changes to your fork (git push -u %s %s)\n", userForkName, branchname) fmt.Println(" 3. Submit a pull request to k/sig-release") fmt.Println("\nYou can find your local copy here:") fmt.Println(sigReleaseRepo.Dir()) fmt.Println(nl) logrus.Warn("Changes were made locally, user needs to perform manual push and create pull request.") return nil } defer func() { err = sigReleaseRepo.Cleanup() }() // add the updated draft if err := sigReleaseRepo.Add(filepath.Join(releasePath, draftFilename)); err != nil { return errors.Wrap(err, "adding release notes draft to staging area") } // List of directories we'll consider for the PR releaseDirectories := []struct{ Path, Name, Ext string }{ { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsMainDirectory), Name: "release notes maps", Ext: "yaml", }, { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsSessionDirectory), Name: "release notes session files", Ext: "json", }, { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsCVEDirectory), Name: "release notes cve data", Ext: "yaml", }, { Path: filepath.Join(releasePath, releaseNotesWorkDir, mapsThemesDirectory), Name: "release notes major theme files", Ext: "yaml", }, } // Add to the PR all files that exist for _, dirData := range releaseDirectories { // add the updated maps if util.Exists(filepath.Join(sigReleaseRepo.Dir(), dirData.Path)) { // Check if there are any files to commit matches, err := filepath.Glob(filepath.Join(sigReleaseRepo.Dir(), dirData.Path, "*"+dirData.Ext)) logrus.Debugf("Adding %d %s from %s to commit", len(matches), dirData.Name, dirData.Path) if err != nil { return errors.Wrapf(err, "checking for %s files in %s", dirData.Ext, dirData.Path) } if len(matches) > 1 { if err := sigReleaseRepo.Add(filepath.Join(dirData.Path, "*"+dirData.Ext)); err != nil { return errors.Wrapf(err, "adding %s to staging area", dirData.Name) } } } else { logrus.Debugf("Not adding %s files, directory %s not found", dirData.Name, dirData.Path) } } // add the generated draft if err := sigReleaseRepo.UserCommit("Release Notes draft for k/k " + tag); err != nil { return errors.Wrapf(err, "creating commit in %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo) } // push to the user's remote logrus.Infof("Pushing modified release notes draft to %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.draftRepo) if err := sigReleaseRepo.PushToRemote(userForkName, branchname); err != nil { return errors.Wrapf(err, "pushing %s to remote", userForkName) } // Create a PR against k/sig-release using the github API // TODO: Maybe read and parse the PR template from sig-release? prBody := "**What type of PR is this?**\n" prBody += "/kind documentation\n\n" prBody += "**What this PR does / why we need it**:\n" prBody += fmt.Sprintf("This PR updates the Release Notes Draft to k/k %s\n\n", tag) prBody += "**Which issue(s) this PR fixes**:\n\n" prBody += "**Special notes for your reviewer**:\n" prBody += "This is an automated PR generated from `krel The Kubernetes Release Toolbox`\n\n" // Create the pull request logrus.Debugf( "PR params: org: %s, repo: %s, headBranch: %s baseBranch: %s", git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), ) // Create the PR pr, err := gh.CreatePullRequest( git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), fmt.Sprintf("Update release notes draft to version %s", tag), prBody, ) if err != nil { logrus.Warnf("An error has occurred while creating the pull request for %s", tag) logrus.Warn("While the PR failed, the release notes draft was generated and submitted to your fork") return errors.Wrap(err, "creating the pull request") } logrus.Infof( "Successfully created PR: %s%s/%s/pull/%d", github.GitHubURL, git.DefaultGithubOrg, git.DefaultGithubReleaseRepo, pr.GetNumber(), ) logrus.Infof("Successfully created PR #%d", pr.GetNumber()) return nil } // prepareFork Prepare a branch a repo func prepareFork(branchName, upstreamOrg, upstreamRepo, myOrg, myRepo string) (repo *git.Repo, err error) { // checkout the upstream repository logrus.Infof("Cloning/updating repository %s/%s", upstreamOrg, upstreamRepo) repo, err = git.CleanCloneGitHubRepo( upstreamOrg, upstreamRepo, false, ) if err != nil { return nil, errors.Wrapf(err, "cloning %s/%s", upstreamOrg, upstreamRepo) } // test if the fork remote is already existing url := git.GetRepoURL(myOrg, myRepo, false) if repo.HasRemote(userForkName, url) { logrus.Infof( "Using already existing remote %s (%s) in repository", userForkName, url, ) } else { // add the user's fork as a remote err = repo.AddRemote(userForkName, myOrg, myRepo) if err != nil { return nil, errors.Wrap(err, "adding user's fork as remote repository") } } // checkout the new branch err = repo.Checkout("-B", branchName) if err != nil { return nil, errors.Wrapf(err, "creating new branch %s", branchName) } return repo, nil } // addReferenceToAssetsFile adds a new entry in the assets.ts file in repoPath to include newJsonFile func addReferenceToAssetsFile(repoPath, newJSONFile string) error { // Full filesystem path to the assets.ts file assetsFullPath := filepath.Join(repoPath, assetsFilePath) file, err := os.Open(assetsFullPath) if err != nil { return errors.Wrap(err, "opening assets.ts to check for current version") } defer file.Close() logrus.Infof("Writing json reference to %s in %s", newJSONFile, assetsFullPath) scanner := bufio.NewScanner(file) var assetsBuffer bytes.Buffer assetsFileWasModified := false fileIsReferenced := false for scanner.Scan() { // Check if the assets file already has the json notes referenced: if strings.Contains(scanner.Text(), fmt.Sprintf("assets/%s", newJSONFile)) { logrus.Warnf("File %s is already referenced in assets.ts", newJSONFile) fileIsReferenced = true break } assetsBuffer.WriteString(scanner.Text()) // Add the current version right after the array export if strings.Contains(scanner.Text(), "export const assets =") { assetsBuffer.WriteString(fmt.Sprintf(" 'assets/%s',\n", newJSONFile)) assetsFileWasModified = true } } if fileIsReferenced { logrus.Infof("Not modifying assets.ts since it already has a reference to %s", newJSONFile) return nil } // Return an error if the array decalra if !assetsFileWasModified { return errors.New("unable to modify assets file, could not find assets array declaration") } // write the modified assets.ts file if err := ioutil.WriteFile(assetsFullPath, assetsBuffer.Bytes(), os.FileMode(0o644)); err != nil { return errors.Wrap(err, "writing assets.ts file") } return nil } // processJSONOutput Runs NPM prettier inside repoPath to format the JSON output func processJSONOutput(repoPath string) error { npmpath, err := exec.LookPath("npm") if err != nil { return errors.Wrap(err, "while looking for npm in your path") } // run npm install logrus.Info("Installing npm modules, this can take a while") if err := command.NewWithWorkDir(repoPath, npmpath, "install").RunSuccess(); err != nil { return errors.Wrap(err, "running npm install in kubernetes-sigs/release-notes") } // run npm prettier logrus.Info("Running npm prettier...") if err := command.NewWithWorkDir(repoPath, npmpath, "run", "prettier").RunSuccess(); err != nil { return errors.Wrap(err, "running npm prettier in kubernetes-sigs/release-notes") } return nil } // createWebsitePR creates the JSON version of the release notes and pushes them to a user fork func createWebsitePR(tag string) error { _, err := util.TagStringToSemver(tag) if err != nil { return errors.Wrapf(err, "reading tag: %s", tag) } // Generate the release notes for ust the current tag jsonStr, err := releaseNotesJSON(tag) if err != nil { return errors.Wrapf(err, "generating release notes in JSON format") } jsonNotesFilename := fmt.Sprintf("release-notes-%s.json", tag[1:]) branchname := websiteBranchPrefix + tag // checkout kubernetes-sigs/release-notes k8sSigsRepo, err := prepareFork( branchname, defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo, ) if err != nil { return errors.Wrap(err, "preparing local fork branch") } defer func() { err = k8sSigsRepo.Cleanup() }() // add a reference to the new json file in assets.ts if err := addReferenceToAssetsFile(k8sSigsRepo.Dir(), jsonNotesFilename); err != nil { return errors.Wrapf(err, "adding %s to assets file", jsonNotesFilename) } // generate the notes jsonNotesPath := filepath.Join("src", "assets", jsonNotesFilename) logrus.Debugf("Release notes json file will be written to %s", filepath.Join(k8sSigsRepo.Dir(), jsonNotesPath)) err = ioutil.WriteFile(filepath.Join(k8sSigsRepo.Dir(), jsonNotesPath), []byte(jsonStr), 0644) if err != nil { return errors.Wrapf(err, "writing release notes json file") } // Run NPM prettier if err := processJSONOutput(k8sSigsRepo.Dir()); err != nil { return errors.Wrap(err, "while formatting release notes JSON files") } // add the modified files & commit the results if err := k8sSigsRepo.Add(jsonNotesPath); err != nil { return errors.Wrap(err, "adding release notes draft to staging area") } if err := k8sSigsRepo.Add(filepath.FromSlash(assetsFilePath)); err != nil { return errors.Wrap(err, "adding release notes draft to staging area") } if err := k8sSigsRepo.UserCommit(fmt.Sprintf("Patch relnotes.k8s.io with release %s", tag)); err != nil { return errors.Wrapf(err, "Error creating commit in %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo) } // push to the user's fork logrus.Infof("Pushing website changes to %s/%s", releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo) if err := k8sSigsRepo.PushToRemote(userForkName, branchname); err != nil { return errors.Wrapf(err, "pushing %s to %s/%s", userForkName, releaseNotesOpts.githubOrg, releaseNotesOpts.websiteRepo) } // Create a PR against k-sigs/release-notes using the github API gh := github.New() logrus.Debugf( "PR params: org: %s, repo: %s, headBranch: %s baseBranch: %s", defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), ) pr, err := gh.CreatePullRequest( defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, git.DefaultBranch, fmt.Sprintf("%s:%s", releaseNotesOpts.githubOrg, branchname), fmt.Sprintf("Patch relnotes.k8s.io to release %s", tag), fmt.Sprintf("Automated patch to update relnotes.k8s.io to k/k version `%s` ", tag), ) if err != nil { logrus.Warnf("An error has occurred while creating the pull request for %s", tag) logrus.Warn("While the PR failed, the release notes where generated and submitted to your fork") return errors.Wrap(err, "creating the pull request") } logrus.Infof( "Successfully created PR: %s%s/%s/pull/%d", github.GitHubURL, defaultKubernetesSigsOrg, defaultKubernetesSigsRepo, pr.GetNumber(), ) return nil } // tryToFindLatestMinorTag looks-up the default k/k remote to find the latest // non final version func tryToFindLatestMinorTag() (string, error) { url := git.GetDefaultKubernetesRepoURL() status, err := command.New( "git", "ls-remote", "--sort=v:refname", "--tags", url, ). Pipe("grep", "-Eo", "v[0-9].[0-9]+.0-.*.[0-9]$"). Pipe("tail", "-1"). RunSilentSuccessOutput() if err != nil { return "", err } return strings.TrimSpace(status.Output()), nil } // releaseNotesJSON generate the release notes for a specific tag and returns // them as JSON blob func releaseNotesJSON(tag string) (string, error) { logrus.Infof("Generating release notes for tag %s", tag) tagVersion, err := util.TagStringToSemver(tag) if err != nil { return "", errors.Wrap(err, "parsing semver from tag string") } branchName := git.DefaultBranch releaseBranch := fmt.Sprintf("release-%d.%d", tagVersion.Major, tagVersion.Minor) // Ensure we have a valid branch if !git.IsReleaseBranch(branchName) { return "", errors.New("Could not determine a release branch for tag") } // Preclone the repo to be able to read branches and tags logrus.Infof("Cloning %s/%s", git.DefaultGithubOrg, git.DefaultGithubRepo) repo, err := git.CloneOrOpenDefaultGitHubRepoSSH(rootOpts.repoPath) if err != nil { return "", errors.Wrap(err, "cloning default github repo") } // Chech if release branch already exists _, err = repo.RevParse(releaseBranch) if err == nil { logrus.Infof("Working on branch %s instead of %s", releaseBranch, git.DefaultBranch) branchName = releaseBranch } else { logrus.Infof("Release branch %s does not exist, working on %s", releaseBranch, git.DefaultBranch) } // Notes for patch releases are generated starting from the previous patch release: var startTag, tagChoice string if tagVersion.Patch > 0 { startTag = fmt.Sprintf("v%d.%d.%d", tagVersion.Major, tagVersion.Minor, tagVersion.Patch-1) tagChoice = "previous patch release" } else { // From 1.20 the notes fot the first alpha start from the previous minor if tagVersion.Pre[0].String() == "alpha" && tagVersion.Pre[1].VersionNum == 1 { startTag = util.SemverToTagString(semver.Version{ Major: tagVersion.Major, Minor: tagVersion.Minor - 1, Patch: 0, }) tagChoice = "previous minor version" } else { // All others from the previous existing tag startTag, err = repo.PreviousTag(tag, branchName) if err != nil { return "", errors.Wrap(err, "getting previous tag from branch") } tagChoice = "previous tag" } } logrus.Infof("Using start tag %v from %s", startTag, tagChoice) logrus.Infof("Using end tag %v", tag) notesOptions := options.New() notesOptions.Branch = branchName notesOptions.RepoPath = rootOpts.repoPath notesOptions.StartRev = startTag notesOptions.EndRev = tag notesOptions.Debug = logrus.StandardLogger().Level >= logrus.DebugLevel notesOptions.MapProviderStrings = releaseNotesOpts.mapProviders if err := notesOptions.ValidateAndFinish(); err != nil { return "", err } // Fetch the notes releaseNotes, err := notes.GatherReleaseNotes(notesOptions) if err != nil { return "", errors.Wrapf(err, "gathering release notes") } doc, err := document.New( releaseNotes, notesOptions.StartRev, notesOptions.EndRev, ) if err != nil { return "", errors.Wrapf(err, "creating release note document") } doc.PreviousRevision = startTag doc.CurrentRevision = tag // Create the JSON j, err := json.Marshal(releaseNotes.ByPR()) if err != nil { return "", errors.Wrapf(err, "generating release notes JSON") } return string(j), nil } // gatherNotesFrom gathers all the release notes from the specified startTag up to --tag func gatherNotesFrom(startTag string) (*notes.ReleaseNotes, error) { logrus.Infof("Gathering release notes from %s to %s", startTag, releaseNotesOpts.tag) notesOptions := options.New() notesOptions.Branch = git.DefaultBranch notesOptions.RepoPath = rootOpts.repoPath notesOptions.StartRev = startTag notesOptions.EndRev = releaseNotesOpts.tag notesOptions.Debug = logrus.StandardLogger().Level >= logrus.DebugLevel notesOptions.MapProviderStrings = releaseNotesOpts.mapProviders if err := notesOptions.ValidateAndFinish(); err != nil { return nil, err } logrus.Infof("Using start tag %v", startTag) logrus.Infof("Using end tag %v", releaseNotesOpts.tag) // Fetch the notes releaseNotes, err := notes.GatherReleaseNotes(notesOptions) if err != nil { return nil, errors.Wrapf(err, "gathering release notes") } return releaseNotes, nil } func buildNotesResult(startTag string, releaseNotes *notes.ReleaseNotes) (*releaseNotesResult, error) { doc, err := document.New( releaseNotes, startTag, releaseNotesOpts.tag, ) if err != nil { return nil, errors.Wrapf(err, "creating release note document") } doc.PreviousRevision = startTag doc.CurrentRevision = releaseNotesOpts.tag // Create the markdown markdown, err := doc.RenderMarkdownTemplate( "", "", options.GoTemplateDefault, ) if err != nil { return nil, errors.Wrapf( err, "rendering release notes to markdown", ) } // Add the dependency report if necessary if releaseNotesOpts.dependencies { logrus.Info("Generating dependency changes") deps, err := notes.NewDependencies().Changes( startTag, releaseNotesOpts.tag, ) if err != nil { return nil, errors.Wrap(err, "creating dependency report") } markdown += strings.Repeat(nl, 2) + deps } // Create the JSON j, err := json.Marshal(releaseNotes.ByPR()) if err != nil { return nil, errors.Wrapf(err, "generating release notes JSON") } return &releaseNotesResult{markdown: markdown, json: string(j)}, nil } // Validate checks if passed cmdline options are sane func (o *releaseNotesOptions) Validate() error { // Check that we have a GitHub token set token, isset := os.LookupEnv(github.TokenEnvKey) if !isset || token == "" { return errors.New("Cannot generate release notes if GitHub token is not set") } // If a tag is defined, see if it is a valid semver tag _, err := util.TagStringToSemver(releaseNotesOpts.tag) if err != nil { return errors.Wrapf(err, "reading tag: %s", releaseNotesOpts.tag) } // Options for PR creation if o.createDraftPR || o.createWebsitePR { if o.userFork == "" { return errors.New("cannot generate the Release Notes PR without --fork") } } return nil } // Save the session to a file func (sd *sessionData) Save() error { if sd.Date == 0 { return errors.New("unable to save session, date is note defined") } if sd.Path == "" { return errors.New("unable to save session, path is not defined") } jsonData, err := json.Marshal(sd) if err != nil { return errors.Wrap(err, "marshaling session data") } if err := ioutil.WriteFile( filepath.Join(sd.Path, fmt.Sprintf("maps-%d.json", sd.Date)), jsonData, os.FileMode(0o644)); err != nil { return errors.Wrap(err, "writing session data to disk") } return nil } // readFixSessions reads all the previous fixing data func readFixSessions(sessionPath string) (pullRequestChecklist map[int]string, err error) { files, err := ioutil.ReadDir(sessionPath) if err != nil { return nil, errors.Wrap(err, "reading working directory") } pullRequestList := make([]struct { Number int `json:"nr"` Hash string `json:"hash"` }, 0) // Look in the work dir for all json files for _, fileData := range files { currentSession := &sessionData{} if strings.HasSuffix(fileData.Name(), ".json") { logrus.Debugf("Reading session data from %s", fileData.Name()) jsonData, err := ioutil.ReadFile(filepath.Join(sessionPath, fileData.Name())) if err != nil { return nil, errors.Wrapf(err, "reading session data from %s", fileData.Name()) } if err := json.Unmarshal(jsonData, currentSession); err != nil { return nil, errors.Wrapf(err, "unmarshalling session data in %s", fileData.Name()) } pullRequestList = append(pullRequestList, currentSession.PullRequests...) } } // Copy the PRs to a map for easy lookup pullRequestChecklist = map[int]string{} for _, pr := range pullRequestList { pullRequestChecklist[pr.Number] = pr.Hash } logrus.Infof("Read %d PR reviews from previous sessions", len(pullRequestList)) return pullRequestChecklist, nil } // Do the fix process for the current tag func fixReleaseNotes(workDir string, releaseNotes *notes.ReleaseNotes) error { // Get data to record the session userEmail, err := git.GetUserEmail() if err != nil { return errors.Wrap(err, "getting local user's email") } userName, err := git.GetUserName() if err != nil { return errors.Wrap(err, "getting local user's name") } // Check the workDir before going further if !util.Exists(workDir) { return errors.New("map directory does not exist") } // Create the new session struct session := &sessionData{ UserEmail: userEmail, UserName: userName, Date: time.Now().UTC().Unix(), Path: filepath.Join(workDir, mapsSessionDirectory), } // Read the list of all PRs we've processed so far pullRequestChecklist, err := readFixSessions(filepath.Join(workDir, mapsSessionDirectory)) if err != nil { return errors.Wrapf(err, "reading previous session data") } // Greet the user with basic instructions greetingMessage := "\nWelcome to the Kubernetes Release Notes editing tool!\n\n" greetingMessage += "This tool will allow you to review and edit all the release\n" greetingMessage += "notes submitted by the Kubernetes contributors before publishing\n" greetingMessage += "the updated draft.\n\n" greetingMessage += "The flow will show each of the release notes that need to be\n" greetingMessage += "reviewed once and you can choose to edit it or not.\n\n" greetingMessage += "After you choose, it will be marked as reviewed and will not\n" greetingMessage += "be shown during the next sessions unless you choose to do a\n" greetingMessage += "full review of all notes.\n\n" greetingMessage += "You can hit Ctrl+C at any time to exit the review process\n" greetingMessage += "and submit the draft PR with the revisions made so far.\n\n" fmt.Print(greetingMessage) // Ask the user if they want to continue the last session o fix all notes continueFromLastSession := true if len(pullRequestChecklist) > 0 { _, continueFromLastSession, err = util.Ask("Would you like to continue from the last session? (Y/n)", "y:yes|n:no|y", 10) } else { _, _, err = util.Ask("Press enter to start editing", "y:yes|n:no|y", 10) } if err != nil { return errors.Wrap(err, "asking to retrieve last session") } // Bring up the provider provider, err := notes.NewProviderFromInitString(workDir) if err != nil { return errors.Wrap(err, "while getting map provider for current notes") } const ( spacer = " │ " ) // Cycle all gathered release notes for pr, note := range releaseNotes.ByPR() { contentHash, err := note.ContentHash() if err != nil { return errors.Wrapf(err, "getting the content hash for PR#%d", pr) } // We'll skip editing if the Releas Note has been reviewed if _, ok := pullRequestChecklist[pr]; ok && // and if we chose not to edit all continueFromLastSession && // and if the not has not been modified in GutHub contentHash == pullRequestChecklist[pr] { logrus.Debugf("Pull Request %d already reviewed", pr) continue } title := fmt.Sprintf("Release Note for PR %d:", pr) fmt.Println(nl + title) fmt.Println(strings.Repeat("=", len(title))) fmt.Printf("Pull Request URL: %skubernetes/kubernetes/pull/%d%s", github.GitHubURL, pr, nl) noteMaps, err := provider.GetMapsForPR(pr) if err != nil { return errors.Wrapf(err, "while getting map for PR #%d", pr) } // Capture the original note values to compare originalNote := &notes.ReleaseNote{ Text: note.Text, Author: note.Author, Areas: note.Areas, Kinds: note.Kinds, SIGs: note.SIGs, Feature: note.Feature, ActionRequired: note.ActionRequired, Documentation: note.Documentation, } if noteMaps != nil { fmt.Println("✨ Note contents are modified with a map") for _, noteMap := range noteMaps { if err := note.ApplyMap(noteMap); err != nil { return errors.Wrapf(err, "applying notemap for PR #%d", pr) } } } fmt.Println(pointIfChanged("Author", note.Author, originalNote.Author), "@"+note.Author) fmt.Println(pointIfChanged("SIGs", note.SIGs, originalNote.SIGs), note.SIGs) fmt.Println(pointIfChanged("Kinds", note.Kinds, originalNote.Kinds), note.Kinds) fmt.Println(pointIfChanged("Areas", note.Areas, originalNote.Areas), note.Areas) fmt.Println(pointIfChanged("Feature", note.Feature, originalNote.Feature), note.Feature) fmt.Println(pointIfChanged("ActionRequired", note.ActionRequired, originalNote.ActionRequired), note.ActionRequired) // TODO: Implement note.Documentation // Wrap the note for better readability on the terminal fmt.Println(pointIfChanged("Text", note.Text, originalNote.Text)) text := util.WrapText(note.Text, 80) fmt.Println(spacer + strings.ReplaceAll(text, nl, nl+spacer)) _, choice, err := util.Ask(fmt.Sprintf("\n- Fix note for PR #%d? (y/N)", note.PrNumber), "y:yes|n:no|n", 10) if err != nil { // If the user cancelled with ctr+c exit and continue the PR flow if err.(util.UserInputError).IsCtrlC() { logrus.Info("Input cancelled, exiting edit flow >> PRESS ENTER TO CONTINUE") return nil } return errors.Wrap(err, "while asking to edit release note") } if choice { for { retry, err := editReleaseNote(pr, workDir, originalNote, note) if err == nil { break } // If it's a user error (like yaml error) we can try again if retry { _, retryEditingChoice, err := util.Ask( fmt.Sprintf("\n- An error occurred while editing PR #%d. Try again?", note.PrNumber), "y:yes|n:no", 10, ) if err != nil { return errors.Wrap(err, "while asking to re-edit release note") } if !retryEditingChoice { return errors.Wrap(err, "editing release note map") } } else { return errors.Wrap(err, "while editing release note") } } } // Add this PR to the checklist: pullRequestChecklist[note.PrNumber] = contentHash session.PullRequests = append(session.PullRequests, struct { Number int `json:"nr"` Hash string `json:"hash"` }{ Number: note.PrNumber, Hash: contentHash, }) if err := session.Save(); err != nil { return errors.Wrap(err, "while saving editing session data") } } return nil } // Check two values and print a prefix if they are different func pointIfChanged(label string, var1, var2 interface{}) string { changed := false // Check if alues are string if _, ok := var1.(string); ok { if var1.(string) != var2.(string) { changed = true } } // Check if string slices if _, ok := var1.([]string); ok { if fmt.Sprint(var1) != fmt.Sprint(var2) { changed = true } } // Check if string slices if _, ok := var1.(bool); ok { if var1.(bool) != var2.(bool) { changed = true } } if changed { return fmt.Sprintf(" >> %s:", label) } return fmt.Sprintf(" %s:", label) } // editReleaseNote opens the user's editor for them to update the note. // In case of an editing error by the user, it returns shouldRetryEditing // set to true to retry editing. func editReleaseNote(pr int, workDir string, originalNote, modifiedNote *notes.ReleaseNote) (shouldRetryEditing bool, err error) { // To edit the note, we will create a yaml file, with the changed fields // active and we'll add the unaltered fields commented for the user to review modifiedFields := &notes.ReleaseNotesMap{PR: pr} unalteredFields := &notes.ReleaseNotesMap{PR: pr} numChanges := 0 if originalNote.Text == modifiedNote.Text { unalteredFields.ReleaseNote.Text = &originalNote.Text } else { modifiedFields.ReleaseNote.Text = &modifiedNote.Text numChanges++ } if originalNote.Author == modifiedNote.Author { unalteredFields.ReleaseNote.Author = &originalNote.Author } else { modifiedFields.ReleaseNote.Author = &modifiedNote.Author numChanges++ } if fmt.Sprint(originalNote.SIGs) == fmt.Sprint(modifiedNote.SIGs) { unalteredFields.ReleaseNote.SIGs = &originalNote.SIGs } else { modifiedFields.ReleaseNote.SIGs = &modifiedNote.SIGs numChanges++ } if fmt.Sprint(originalNote.Kinds) == fmt.Sprint(modifiedNote.Kinds) { unalteredFields.ReleaseNote.Kinds = &originalNote.Kinds } else { modifiedFields.ReleaseNote.Kinds = &modifiedNote.Kinds numChanges++ } if fmt.Sprint(originalNote.Areas) == fmt.Sprint(modifiedNote.Areas) { unalteredFields.ReleaseNote.Areas = &originalNote.Areas } else { modifiedFields.ReleaseNote.Areas = &modifiedNote.Areas numChanges++ } if fmt.Sprint(originalNote.Feature) == fmt.Sprint(modifiedNote.Feature) { unalteredFields.ReleaseNote.Feature = &originalNote.Feature } else { modifiedFields.ReleaseNote.Feature = &modifiedNote.Feature numChanges++ } if fmt.Sprint(originalNote.ActionRequired) == fmt.Sprint(modifiedNote.ActionRequired) { unalteredFields.ReleaseNote.ActionRequired = &originalNote.ActionRequired } else { modifiedFields.ReleaseNote.ActionRequired = &modifiedNote.ActionRequired numChanges++ } // TODO: Implement after writing a documentation comparison func unalteredFields.ReleaseNote.Documentation = &originalNote.Documentation // Create the release note map for the editor: output := "---\n" + string(mapEditingInstructions) + "\n" if numChanges == 0 { // If there are no changes, present the user with the commented // map with the original values yamlCode, err := yaml.Marshal(&unalteredFields) if err != nil { return false, errors.Wrap(err, "marshalling release note to map") } output += "# " + strings.ReplaceAll(string(yamlCode), "\n", "\n# ") } else { // ... otherwise build a mixed map with the changes and the original // values commented out for reference yamlCode, err := yaml.Marshal(&modifiedFields) if err != nil { return false, errors.Wrap(err, "marshalling release note to map") } unalteredYAML, err := yaml.Marshal(&unalteredFields.ReleaseNote) if err != nil { return false, errors.Wrap(err, "marshalling release note to map") } output += string(yamlCode) + " # " + strings.ReplaceAll(string(unalteredYAML), "\n", "\n # ") } kubeEditor := editor.NewDefaultEditor([]string{"KUBE_EDITOR", "EDITOR"}) changes, _, err := kubeEditor.LaunchTempFile("map", ".yaml", bytes.NewReader([]byte(output))) if err != nil { return false, errors.Wrap(err, "while launching editor") } // If the map was not modified, we don't make any changes if string(changes) == output || string(changes) == "" { logrus.Info("Release notes map was not modified") return false, nil } // Verify that the new yaml is valid and can be serialized back into a Map testMap := notes.ReleaseNotesMap{} err = yaml.Unmarshal(changes, &testMap) if err != nil { logrus.Error("The YAML code has errors") return true, errors.Wrap(err, "while verifying if changes are a valid map") } if testMap.PR == 0 { logrus.Error("The yaml code does not have a PR number") return true, errors.New("Invalid map: the YAML code did not have a PR number") } // Remarshall the newyaml to save only the new values newYAML, err := yaml.Marshal(testMap) if err != nil { return true, errors.Wrap(err, "while re-marshaling new map") } // Write the new map, removing the instructions mapPath := filepath.Join(workDir, mapsMainDirectory, fmt.Sprintf("pr-%d-map.yaml", pr)) err = ioutil.WriteFile(mapPath, newYAML, os.FileMode(0o644)) if err != nil { logrus.Errorf("Error writing map to %s: %s", mapPath, err) return true, errors.Wrap(err, "writing modified release note map") } return false, nil } // createNotesWorkDir creates the release notes working directory func createNotesWorkDir(releaseDir string) error { // Check that the working tree is complete: for _, dirPath := range []string{ filepath.Join(releaseDir, releaseNotesWorkDir), // Main work dir filepath.Join(releaseDir, releaseNotesWorkDir, mapsMainDirectory), // Maps directory filepath.Join(releaseDir, releaseNotesWorkDir, mapsCVEDirectory), // Maps for CVE data filepath.Join(releaseDir, releaseNotesWorkDir, mapsSessionDirectory), // Editing session files filepath.Join(releaseDir, releaseNotesWorkDir, mapsThemesDirectory), // Major themes directory } { if !util.Exists(dirPath) { if err := os.Mkdir(dirPath, os.FileMode(0o755)); err != nil { return errors.Wrap(err, "creating working directory") } } } return nil }
package quic import ( "context" "crypto/rand" "crypto/tls" "errors" "fmt" "net" "sync" "time" "github.com/lucas-clemente/quic-go/internal/ackhandler" "github.com/lucas-clemente/quic-go/internal/congestion" "github.com/lucas-clemente/quic-go/internal/crypto" "github.com/lucas-clemente/quic-go/internal/flowcontrol" "github.com/lucas-clemente/quic-go/internal/handshake" "github.com/lucas-clemente/quic-go/internal/protocol" "github.com/lucas-clemente/quic-go/internal/utils" "github.com/lucas-clemente/quic-go/internal/wire" "github.com/lucas-clemente/quic-go/qerr" ) type unpacker interface { Unpack(headerBinary []byte, hdr *wire.Header, data []byte) (*unpackedPacket, error) } type streamGetter interface { GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) } type streamManager interface { GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) OpenStream() (Stream, error) OpenUniStream() (SendStream, error) OpenStreamSync() (Stream, error) OpenUniStreamSync() (SendStream, error) AcceptStream() (Stream, error) AcceptUniStream() (ReceiveStream, error) DeleteStream(protocol.StreamID) error UpdateLimits(*handshake.TransportParameters) HandleMaxStreamIDFrame(*wire.MaxStreamIDFrame) error CloseWithError(error) } type cryptoStreamHandler interface { HandleCryptoStream() error ConnectionState() handshake.ConnectionState } type divNonceSetter interface { SetDiversificationNonce([]byte) error } type receivedPacket struct { remoteAddr net.Addr header *wire.Header data []byte rcvTime time.Time } var ( newCryptoSetup = handshake.NewCryptoSetup newCryptoSetupClient = handshake.NewCryptoSetupClient ) type closeError struct { err error remote bool sendClose bool } // A Session is a QUIC session type session struct { sessionRunner sessionRunner destConnID protocol.ConnectionID srcConnID protocol.ConnectionID perspective protocol.Perspective version protocol.VersionNumber config *Config conn connection streamsMap streamManager cryptoStream cryptoStreamI rttStats *congestion.RTTStats sentPacketHandler ackhandler.SentPacketHandler receivedPacketHandler ackhandler.ReceivedPacketHandler streamFramer *streamFramer windowUpdateQueue *windowUpdateQueue connFlowController flowcontrol.ConnectionFlowController unpacker unpacker packer *packetPacker cryptoStreamHandler cryptoStreamHandler receivedPackets chan *receivedPacket sendingScheduled chan struct{} // closeChan is used to notify the run loop that it should terminate. closeChan chan closeError closeOnce sync.Once ctx context.Context ctxCancel context.CancelFunc // when we receive too many undecryptable packets during the handshake, we send a Public reset // but only after a time of protocol.PublicResetTimeout has passed undecryptablePackets []*receivedPacket receivedTooManyUndecrytablePacketsTime time.Time // this channel is passed to the CryptoSetup and receives the transport parameters, as soon as the peer sends them paramsChan <-chan handshake.TransportParameters // the handshakeEvent channel is passed to the CryptoSetup. // It receives when it makes sense to try decrypting undecryptable packets. handshakeEvent <-chan struct{} handshakeComplete bool receivedFirstPacket bool // since packet numbers start at 0, we can't use largestRcvdPacketNumber != 0 for this receivedFirstForwardSecurePacket bool lastRcvdPacketNumber protocol.PacketNumber // Used to calculate the next packet number from the truncated wire // representation, and sent back in public reset packets largestRcvdPacketNumber protocol.PacketNumber sessionCreationTime time.Time lastNetworkActivityTime time.Time // pacingDeadline is the time when the next packet should be sent pacingDeadline time.Time peerParams *handshake.TransportParameters timer *utils.Timer // keepAlivePingSent stores whether a Ping frame was sent to the peer or not // it is reset as soon as we receive a packet from the peer keepAlivePingSent bool logger utils.Logger } var _ Session = &session{} var _ streamSender = &session{} // newSession makes a new session func newSession( conn connection, sessionRunner sessionRunner, v protocol.VersionNumber, connectionID protocol.ConnectionID, scfg *handshake.ServerConfig, tlsConf *tls.Config, config *Config, logger utils.Logger, ) (quicSession, error) { paramsChan := make(chan handshake.TransportParameters) handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: sessionRunner, srcConnID: connectionID, destConnID: connectionID, perspective: protocol.PerspectiveServer, version: v, config: config, handshakeEvent: handshakeEvent, paramsChan: paramsChan, logger: logger, } s.preSetup() transportParams := &handshake.TransportParameters{ StreamFlowControlWindow: protocol.ReceiveStreamFlowControlWindow, ConnectionFlowControlWindow: protocol.ReceiveConnectionFlowControlWindow, MaxStreams: uint32(s.config.MaxIncomingStreams), IdleTimeout: s.config.IdleTimeout, } divNonce := make([]byte, 32) if _, err := rand.Read(divNonce); err != nil { return nil, err } cs, err := newCryptoSetup( s.cryptoStream, connectionID, s.conn.RemoteAddr(), s.version, divNonce, scfg, transportParams, s.config.Versions, s.config.AcceptCookie, paramsChan, handshakeEvent, s.logger, ) if err != nil { return nil, err } s.cryptoStreamHandler = cs s.unpacker = newPacketUnpackerGQUIC(cs, s.version) s.streamsMap = newStreamsMapLegacy(s.newStream, s.config.MaxIncomingStreams, s.perspective) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( connectionID, nil, // no src connection ID 1, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), divNonce, cs, s.streamFramer, s.perspective, s.version, ) return s, s.postSetup() } // declare this as a variable, so that we can it mock it in the tests var newClientSession = func( conn connection, sessionRunner sessionRunner, hostname string, v protocol.VersionNumber, connectionID protocol.ConnectionID, tlsConf *tls.Config, config *Config, initialVersion protocol.VersionNumber, negotiatedVersions []protocol.VersionNumber, // needed for validation of the GQUIC version negotiation logger utils.Logger, ) (quicSession, error) { paramsChan := make(chan handshake.TransportParameters) handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: sessionRunner, srcConnID: connectionID, destConnID: connectionID, perspective: protocol.PerspectiveClient, version: v, config: config, handshakeEvent: handshakeEvent, paramsChan: paramsChan, logger: logger, } s.preSetup() transportParams := &handshake.TransportParameters{ StreamFlowControlWindow: protocol.ReceiveStreamFlowControlWindow, ConnectionFlowControlWindow: protocol.ReceiveConnectionFlowControlWindow, MaxStreams: uint32(s.config.MaxIncomingStreams), IdleTimeout: s.config.IdleTimeout, OmitConnectionID: s.config.RequestConnectionIDOmission, } cs, err := newCryptoSetupClient( s.cryptoStream, hostname, connectionID, s.version, tlsConf, transportParams, paramsChan, handshakeEvent, initialVersion, negotiatedVersions, s.logger, ) if err != nil { return nil, err } s.cryptoStreamHandler = cs s.unpacker = newPacketUnpackerGQUIC(cs, s.version) s.streamsMap = newStreamsMapLegacy(s.newStream, s.config.MaxIncomingStreams, s.perspective) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( connectionID, nil, // no src connection ID 1, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), nil, // no diversification nonce cs, s.streamFramer, s.perspective, s.version, ) return s, s.postSetup() } func newTLSServerSession( conn connection, runner sessionRunner, destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, initialPacketNumber protocol.PacketNumber, config *Config, tls handshake.MintTLS, cryptoStreamConn *handshake.CryptoStreamConn, nullAEAD crypto.AEAD, peerParams *handshake.TransportParameters, v protocol.VersionNumber, logger utils.Logger, ) (quicSession, error) { handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: runner, config: config, srcConnID: srcConnID, destConnID: destConnID, perspective: protocol.PerspectiveServer, version: v, handshakeEvent: handshakeEvent, logger: logger, } s.preSetup() cs := handshake.NewCryptoSetupTLSServer( tls, cryptoStreamConn, nullAEAD, handshakeEvent, v, ) s.cryptoStreamHandler = cs s.streamsMap = newStreamsMap(s, s.newFlowController, s.config.MaxIncomingStreams, s.config.MaxIncomingUniStreams, s.perspective, s.version) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( s.destConnID, s.srcConnID, initialPacketNumber, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), nil, // no diversification nonce cs, s.streamFramer, s.perspective, s.version, ) if err := s.postSetup(); err != nil { return nil, err } s.peerParams = peerParams s.processTransportParameters(peerParams) s.unpacker = newPacketUnpacker(cs, s.version) return s, nil } // declare this as a variable, such that we can it mock it in the tests var newTLSClientSession = func( conn connection, runner sessionRunner, hostname string, v protocol.VersionNumber, destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, config *Config, tls handshake.MintTLS, paramsChan <-chan handshake.TransportParameters, initialPacketNumber protocol.PacketNumber, logger utils.Logger, ) (quicSession, error) { handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: runner, config: config, srcConnID: srcConnID, destConnID: destConnID, perspective: protocol.PerspectiveClient, version: v, handshakeEvent: handshakeEvent, paramsChan: paramsChan, logger: logger, } s.preSetup() tls.SetCryptoStream(s.cryptoStream) cs, err := handshake.NewCryptoSetupTLSClient( s.cryptoStream, s.destConnID, hostname, handshakeEvent, tls, v, ) if err != nil { return nil, err } s.cryptoStreamHandler = cs s.unpacker = newPacketUnpacker(cs, s.version) s.streamsMap = newStreamsMap(s, s.newFlowController, s.config.MaxIncomingStreams, s.config.MaxIncomingUniStreams, s.perspective, s.version) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( s.destConnID, s.srcConnID, initialPacketNumber, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), nil, // no diversification nonce cs, s.streamFramer, s.perspective, s.version, ) return s, s.postSetup() } func (s *session) preSetup() { s.rttStats = &congestion.RTTStats{} s.sentPacketHandler = ackhandler.NewSentPacketHandler(s.rttStats, s.logger, s.version) s.connFlowController = flowcontrol.NewConnectionFlowController( protocol.ReceiveConnectionFlowControlWindow, protocol.ByteCount(s.config.MaxReceiveConnectionFlowControlWindow), s.onHasConnectionWindowUpdate, s.rttStats, s.logger, ) s.cryptoStream = s.newCryptoStream() } func (s *session) postSetup() error { s.receivedPackets = make(chan *receivedPacket, protocol.MaxSessionUnprocessedPackets) s.closeChan = make(chan closeError, 1) s.sendingScheduled = make(chan struct{}, 1) s.undecryptablePackets = make([]*receivedPacket, 0, protocol.MaxUndecryptablePackets) s.ctx, s.ctxCancel = context.WithCancel(context.Background()) s.timer = utils.NewTimer() now := time.Now() s.lastNetworkActivityTime = now s.sessionCreationTime = now s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.rttStats, s.logger, s.version) s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.cryptoStream, s.connFlowController, s.packer.QueueControlFrame) return nil } // run the session main loop func (s *session) run() error { defer s.ctxCancel() go func() { if err := s.cryptoStreamHandler.HandleCryptoStream(); err != nil { if err == handshake.ErrCloseSessionForRetry { s.destroy(err) } else { s.closeLocal(err) } } }() var closeErr closeError runLoop: for { // Close immediately if requested select { case closeErr = <-s.closeChan: break runLoop case _, ok := <-s.handshakeEvent: // when the handshake is completed, the channel will be closed s.handleHandshakeEvent(!ok) default: } s.maybeResetTimer() select { case closeErr = <-s.closeChan: break runLoop case <-s.timer.Chan(): s.timer.SetRead() // We do all the interesting stuff after the switch statement, so // nothing to see here. case <-s.sendingScheduled: // We do all the interesting stuff after the switch statement, so // nothing to see here. case p := <-s.receivedPackets: err := s.handlePacketImpl(p) if err != nil { if qErr, ok := err.(*qerr.QuicError); ok && qErr.ErrorCode == qerr.DecryptionFailure { s.tryQueueingUndecryptablePacket(p) continue } s.closeLocal(err) continue } // This is a bit unclean, but works properly, since the packet always // begins with the public header and we never copy it. putPacketBuffer(&p.header.Raw) case p := <-s.paramsChan: s.processTransportParameters(&p) case _, ok := <-s.handshakeEvent: // when the handshake is completed, the channel will be closed s.handleHandshakeEvent(!ok) } now := time.Now() if timeout := s.sentPacketHandler.GetAlarmTimeout(); !timeout.IsZero() && timeout.Before(now) { // This could cause packets to be retransmitted. // Check it before trying to send packets. if err := s.sentPacketHandler.OnAlarm(); err != nil { s.closeLocal(err) } } var pacingDeadline time.Time if s.pacingDeadline.IsZero() { // the timer didn't have a pacing deadline set pacingDeadline = s.sentPacketHandler.TimeUntilSend() } if s.config.KeepAlive && !s.keepAlivePingSent && s.handshakeComplete && time.Since(s.lastNetworkActivityTime) >= s.peerParams.IdleTimeout/2 { // send the PING frame since there is no activity in the session s.packer.QueueControlFrame(&wire.PingFrame{}) s.keepAlivePingSent = true } else if !pacingDeadline.IsZero() && now.Before(pacingDeadline) { // If we get to this point before the pacing deadline, we should wait until that deadline. // This can happen when scheduleSending is called, or a packet is received. // Set the timer and restart the run loop. s.pacingDeadline = pacingDeadline continue } if err := s.sendPackets(); err != nil { s.closeLocal(err) } if !s.receivedTooManyUndecrytablePacketsTime.IsZero() && s.receivedTooManyUndecrytablePacketsTime.Add(protocol.PublicResetTimeout).Before(now) && len(s.undecryptablePackets) != 0 { s.closeLocal(qerr.Error(qerr.DecryptionFailure, "too many undecryptable packets received")) } if !s.handshakeComplete && now.Sub(s.sessionCreationTime) >= s.config.HandshakeTimeout { s.closeLocal(qerr.Error(qerr.HandshakeTimeout, "Crypto handshake did not complete in time.")) } if s.handshakeComplete && now.Sub(s.lastNetworkActivityTime) >= s.config.IdleTimeout { s.closeLocal(qerr.Error(qerr.NetworkIdleTimeout, "No recent network activity.")) } } if err := s.handleCloseError(closeErr); err != nil { s.logger.Infof("Handling close error failed: %s", err) } s.logger.Infof("Connection %s closed.", s.srcConnID) if closeErr.err != handshake.ErrCloseSessionForRetry { s.sessionRunner.removeConnectionID(s.srcConnID) } return closeErr.err } func (s *session) Context() context.Context { return s.ctx } func (s *session) ConnectionState() ConnectionState { return s.cryptoStreamHandler.ConnectionState() } func (s *session) maybeResetTimer() { var deadline time.Time if s.config.KeepAlive && s.handshakeComplete && !s.keepAlivePingSent { deadline = s.lastNetworkActivityTime.Add(s.peerParams.IdleTimeout / 2) } else { deadline = s.lastNetworkActivityTime.Add(s.config.IdleTimeout) } if ackAlarm := s.receivedPacketHandler.GetAlarmTimeout(); !ackAlarm.IsZero() { deadline = utils.MinTime(deadline, ackAlarm) } if lossTime := s.sentPacketHandler.GetAlarmTimeout(); !lossTime.IsZero() { deadline = utils.MinTime(deadline, lossTime) } if !s.handshakeComplete { handshakeDeadline := s.sessionCreationTime.Add(s.config.HandshakeTimeout) deadline = utils.MinTime(deadline, handshakeDeadline) } if !s.receivedTooManyUndecrytablePacketsTime.IsZero() { deadline = utils.MinTime(deadline, s.receivedTooManyUndecrytablePacketsTime.Add(protocol.PublicResetTimeout)) } if !s.pacingDeadline.IsZero() { deadline = utils.MinTime(deadline, s.pacingDeadline) } s.timer.Reset(deadline) } func (s *session) handleHandshakeEvent(completed bool) { if !completed { s.tryDecryptingQueuedPackets() return } s.handshakeComplete = true s.handshakeEvent = nil // prevent this case from ever being selected again s.sessionRunner.onHandshakeComplete(s) // In gQUIC, the server completes the handshake first (after sending the SHLO). // In TLS 1.3, the client completes the handshake first (after sending the CFIN). // We need to make sure they learn about the peer completing the handshake, // in order to stop retransmitting handshake packets. // They will stop retransmitting handshake packets when receiving the first forward-secure packet. // We need to make sure that a retransmittable forward-secure packet is sent, // independent from the application protocol. if (!s.version.UsesTLS() && s.perspective == protocol.PerspectiveClient) || (s.version.UsesTLS() && s.perspective == protocol.PerspectiveServer) { s.queueControlFrame(&wire.PingFrame{}) s.sentPacketHandler.SetHandshakeComplete() } } func (s *session) handlePacketImpl(p *receivedPacket) error { hdr := p.header // The server can change the source connection ID with the first Handshake packet. // After this, all packets with a different source connection have to be ignored. if s.receivedFirstPacket && hdr.IsLongHeader && !hdr.SrcConnectionID.Equal(s.destConnID) { s.logger.Debugf("Dropping packet with unexpected source connection ID: %s (expected %s)", p.header.SrcConnectionID, s.destConnID) return nil } if s.perspective == protocol.PerspectiveClient { if divNonce := p.header.DiversificationNonce; len(divNonce) > 0 { if err := s.cryptoStreamHandler.(divNonceSetter).SetDiversificationNonce(divNonce); err != nil { return err } } } if p.rcvTime.IsZero() { // To simplify testing p.rcvTime = time.Now() } // Calculate packet number hdr.PacketNumber = protocol.InferPacketNumber( hdr.PacketNumberLen, s.largestRcvdPacketNumber, hdr.PacketNumber, s.version, ) packet, err := s.unpacker.Unpack(hdr.Raw, hdr, p.data) if s.logger.Debug() { if err != nil { s.logger.Debugf("<- Reading packet 0x%x (%d bytes) for connection %s", hdr.PacketNumber, len(p.data)+len(hdr.Raw), hdr.DestConnectionID) } else { s.logger.Debugf("<- Reading packet 0x%x (%d bytes) for connection %s, %s", hdr.PacketNumber, len(p.data)+len(hdr.Raw), hdr.DestConnectionID, packet.encryptionLevel) } hdr.Log(s.logger) } // if the decryption failed, this might be a packet sent by an attacker if err != nil { return err } // The server can change the source connection ID with the first Handshake packet. if s.perspective == protocol.PerspectiveClient && !s.receivedFirstPacket && hdr.IsLongHeader && !hdr.SrcConnectionID.Equal(s.destConnID) { s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", hdr.SrcConnectionID) s.destConnID = hdr.SrcConnectionID s.packer.ChangeDestConnectionID(s.destConnID) } s.receivedFirstPacket = true s.lastNetworkActivityTime = p.rcvTime s.keepAlivePingSent = false // In gQUIC, the server completes the handshake first (after sending the SHLO). // In TLS 1.3, the client completes the handshake first (after sending the CFIN). // We know that the peer completed the handshake as soon as we receive a forward-secure packet. if (!s.version.UsesTLS() && s.perspective == protocol.PerspectiveServer) || (s.version.UsesTLS() && s.perspective == protocol.PerspectiveClient) { if !s.receivedFirstForwardSecurePacket && packet.encryptionLevel == protocol.EncryptionForwardSecure { s.receivedFirstForwardSecurePacket = true s.sentPacketHandler.SetHandshakeComplete() } } s.lastRcvdPacketNumber = hdr.PacketNumber // Only do this after decrypting, so we are sure the packet is not attacker-controlled s.largestRcvdPacketNumber = utils.MaxPacketNumber(s.largestRcvdPacketNumber, hdr.PacketNumber) // If this is a Retry packet, there's no need to send an ACK. // The session will be closed and recreated as soon as the crypto setup processed the HRR. if hdr.Type != protocol.PacketTypeRetry { isRetransmittable := ackhandler.HasRetransmittableFrames(packet.frames) if err := s.receivedPacketHandler.ReceivedPacket(hdr.PacketNumber, p.rcvTime, isRetransmittable); err != nil { return err } } return s.handleFrames(packet.frames, packet.encryptionLevel) } func (s *session) handleFrames(fs []wire.Frame, encLevel protocol.EncryptionLevel) error { for _, ff := range fs { var err error wire.LogFrame(s.logger, ff, false) switch frame := ff.(type) { case *wire.StreamFrame: err = s.handleStreamFrame(frame, encLevel) case *wire.AckFrame: err = s.handleAckFrame(frame, encLevel) case *wire.ConnectionCloseFrame: s.closeRemote(qerr.Error(frame.ErrorCode, frame.ReasonPhrase)) case *wire.GoawayFrame: err = errors.New("unimplemented: handling GOAWAY frames") case *wire.StopWaitingFrame: // ignore STOP_WAITINGs case *wire.RstStreamFrame: err = s.handleRstStreamFrame(frame) case *wire.MaxDataFrame: s.handleMaxDataFrame(frame) case *wire.MaxStreamDataFrame: err = s.handleMaxStreamDataFrame(frame) case *wire.MaxStreamIDFrame: err = s.handleMaxStreamIDFrame(frame) case *wire.BlockedFrame: case *wire.StreamBlockedFrame: case *wire.StreamIDBlockedFrame: case *wire.StopSendingFrame: err = s.handleStopSendingFrame(frame) case *wire.PingFrame: case *wire.PathChallengeFrame: s.handlePathChallengeFrame(frame) case *wire.PathResponseFrame: // since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs err = errors.New("unexpected PATH_RESPONSE frame") default: return errors.New("Session BUG: unexpected frame type") } if err != nil { return err } } return nil } // handlePacket is called by the server with a new packet func (s *session) handlePacket(p *receivedPacket) { // Discard packets once the amount of queued packets is larger than // the channel size, protocol.MaxSessionUnprocessedPackets select { case s.receivedPackets <- p: default: } } func (s *session) handleStreamFrame(frame *wire.StreamFrame, encLevel protocol.EncryptionLevel) error { if frame.StreamID == s.version.CryptoStreamID() { if frame.FinBit { return errors.New("Received STREAM frame with FIN bit for the crypto stream") } return s.cryptoStream.handleStreamFrame(frame) } else if encLevel <= protocol.EncryptionUnencrypted { return qerr.Error(qerr.UnencryptedStreamData, fmt.Sprintf("received unencrypted stream data on stream %d", frame.StreamID)) } str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err } if str == nil { // Stream is closed and already garbage collected // ignore this StreamFrame return nil } return str.handleStreamFrame(frame) } func (s *session) handleMaxDataFrame(frame *wire.MaxDataFrame) { s.connFlowController.UpdateSendWindow(frame.ByteOffset) } func (s *session) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) error { if frame.StreamID == s.version.CryptoStreamID() { s.cryptoStream.handleMaxStreamDataFrame(frame) return nil } str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) if err != nil { return err } if str == nil { // stream is closed and already garbage collected return nil } str.handleMaxStreamDataFrame(frame) return nil } func (s *session) handleMaxStreamIDFrame(frame *wire.MaxStreamIDFrame) error { return s.streamsMap.HandleMaxStreamIDFrame(frame) } func (s *session) handleRstStreamFrame(frame *wire.RstStreamFrame) error { if frame.StreamID == s.version.CryptoStreamID() { return errors.New("Received RST_STREAM frame for the crypto stream") } str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err } if str == nil { // stream is closed and already garbage collected return nil } return str.handleRstStreamFrame(frame) } func (s *session) handleStopSendingFrame(frame *wire.StopSendingFrame) error { if frame.StreamID == s.version.CryptoStreamID() { return errors.New("Received a STOP_SENDING frame for the crypto stream") } str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) if err != nil { return err } if str == nil { // stream is closed and already garbage collected return nil } str.handleStopSendingFrame(frame) return nil } func (s *session) handlePathChallengeFrame(frame *wire.PathChallengeFrame) { s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data}) } func (s *session) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error { if err := s.sentPacketHandler.ReceivedAck(frame, s.lastRcvdPacketNumber, encLevel, s.lastNetworkActivityTime); err != nil { return err } s.receivedPacketHandler.IgnoreBelow(s.sentPacketHandler.GetLowestPacketNotConfirmedAcked()) return nil } // closeLocal closes the session and send a CONNECTION_CLOSE containing the error func (s *session) closeLocal(e error) { s.closeOnce.Do(func() { s.closeChan <- closeError{err: e, sendClose: true, remote: false} }) } // destroy closes the session without sending the error on the wire func (s *session) destroy(e error) { s.closeOnce.Do(func() { s.closeChan <- closeError{err: e, sendClose: false, remote: false} }) } func (s *session) closeRemote(e error) { s.closeOnce.Do(func() { s.closeChan <- closeError{err: e, remote: true} }) } // Close the connection. It sends a qerr.PeerGoingAway. // It waits until the run loop has stopped before returning func (s *session) Close() error { s.closeLocal(nil) <-s.ctx.Done() return nil } func (s *session) CloseWithError(code protocol.ApplicationErrorCode, e error) error { s.closeLocal(qerr.Error(qerr.ErrorCode(code), e.Error())) <-s.ctx.Done() return nil } func (s *session) handleCloseError(closeErr closeError) error { if closeErr.err == nil { closeErr.err = qerr.PeerGoingAway } var quicErr *qerr.QuicError var ok bool if quicErr, ok = closeErr.err.(*qerr.QuicError); !ok { quicErr = qerr.ToQuicError(closeErr.err) } // Don't log 'normal' reasons if quicErr.ErrorCode == qerr.PeerGoingAway || quicErr.ErrorCode == qerr.NetworkIdleTimeout { s.logger.Infof("Closing connection %s.", s.srcConnID) } else { s.logger.Errorf("Closing session with error: %s", closeErr.err.Error()) } s.cryptoStream.closeForShutdown(quicErr) s.streamsMap.CloseWithError(quicErr) if !closeErr.sendClose { return nil } // If this is a remote close we're done here if closeErr.remote { return nil } if quicErr.ErrorCode == qerr.DecryptionFailure || quicErr == handshake.ErrNSTPExperiment { return s.sendPublicReset(s.lastRcvdPacketNumber) } return s.sendConnectionClose(quicErr) } func (s *session) processTransportParameters(params *handshake.TransportParameters) { s.peerParams = params s.streamsMap.UpdateLimits(params) if params.OmitConnectionID { s.packer.SetOmitConnectionID() } if params.MaxPacketSize != 0 { s.packer.SetMaxPacketSize(params.MaxPacketSize) } s.connFlowController.UpdateSendWindow(params.ConnectionFlowControlWindow) // the crypto stream is the only open stream at this moment // so we don't need to update stream flow control windows } func (s *session) sendPackets() error { s.pacingDeadline = time.Time{} sendMode := s.sentPacketHandler.SendMode() if sendMode == ackhandler.SendNone { // shortcut: return immediately if there's nothing to send return nil } numPackets := s.sentPacketHandler.ShouldSendNumPackets() var numPacketsSent int sendLoop: for { switch sendMode { case ackhandler.SendNone: break sendLoop case ackhandler.SendAck: // We can at most send a single ACK only packet. // There will only be a new ACK after receiving new packets. // SendAck is only returned when we're congestion limited, so we don't need to set the pacingt timer. return s.maybeSendAckOnlyPacket() case ackhandler.SendRTO: // try to send a retransmission first sentPacket, err := s.maybeSendRetransmission() if err != nil { return err } if !sentPacket { // In RTO mode, a probe packet has to be sent. // Add a PING frame to make sure a (retransmittable) packet will be sent. s.queueControlFrame(&wire.PingFrame{}) sentPacket, err := s.sendPacket() if err != nil { return err } if !sentPacket { return errors.New("session BUG: expected a packet to be sent in RTO mode") } } numPacketsSent++ case ackhandler.SendTLP: // In TLP mode, a probe packet has to be sent. // Add a PING frame to make sure a (retransmittable) packet will be sent. s.queueControlFrame(&wire.PingFrame{}) sentPacket, err := s.sendPacket() if err != nil { return err } if !sentPacket { return errors.New("session BUG: expected a packet to be sent in TLP mode") } return nil case ackhandler.SendRetransmission: sentPacket, err := s.maybeSendRetransmission() if err != nil { return err } if sentPacket { numPacketsSent++ // This can happen if a retransmission queued, but it wasn't necessary to send it. // e.g. when an Initial is queued, but we already received a packet from the server. } case ackhandler.SendAny: sentPacket, err := s.sendPacket() if err != nil { return err } if !sentPacket { break sendLoop } numPacketsSent++ default: return fmt.Errorf("BUG: invalid send mode %d", sendMode) } if numPacketsSent >= numPackets { break } sendMode = s.sentPacketHandler.SendMode() } // Only start the pacing timer if we sent as many packets as we were allowed. // There will probably be more to send when calling sendPacket again. if numPacketsSent == numPackets { s.pacingDeadline = s.sentPacketHandler.TimeUntilSend() } return nil } func (s *session) maybeSendAckOnlyPacket() error { ack := s.receivedPacketHandler.GetAckFrame() if ack == nil { return nil } s.packer.QueueControlFrame(ack) if s.version.UsesStopWaitingFrames() { // for gQUIC, maybe add a STOP_WAITING if swf := s.sentPacketHandler.GetStopWaitingFrame(false); swf != nil { s.packer.QueueControlFrame(swf) } } packet, err := s.packer.PackAckPacket() if err != nil { return err } s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket()) return s.sendPackedPacket(packet) } // maybeSendRetransmission sends retransmissions for at most one packet. // It takes care that Initials aren't retransmitted, if a packet from the server was already received. func (s *session) maybeSendRetransmission() (bool, error) { var retransmitPacket *ackhandler.Packet for { retransmitPacket = s.sentPacketHandler.DequeuePacketForRetransmission() if retransmitPacket == nil { return false, nil } // Don't retransmit Initial packets if we already received a response. // An Initial might have been retransmitted multiple times before we receive a response. // As soon as we receive one response, we don't need to send any more Initials. if s.receivedFirstPacket && retransmitPacket.PacketType == protocol.PacketTypeInitial { s.logger.Debugf("Skipping retransmission of packet %d. Already received a response to an Initial.", retransmitPacket.PacketNumber) continue } break } if retransmitPacket.EncryptionLevel != protocol.EncryptionForwardSecure { s.logger.Debugf("Dequeueing handshake retransmission for packet 0x%x", retransmitPacket.PacketNumber) } else { s.logger.Debugf("Dequeueing retransmission for packet 0x%x", retransmitPacket.PacketNumber) } if s.version.UsesStopWaitingFrames() { s.packer.QueueControlFrame(s.sentPacketHandler.GetStopWaitingFrame(true)) } packets, err := s.packer.PackRetransmission(retransmitPacket) if err != nil { return false, err } ackhandlerPackets := make([]*ackhandler.Packet, len(packets)) for i, packet := range packets { ackhandlerPackets[i] = packet.ToAckHandlerPacket() } s.sentPacketHandler.SentPacketsAsRetransmission(ackhandlerPackets, retransmitPacket.PacketNumber) for _, packet := range packets { if err := s.sendPackedPacket(packet); err != nil { return false, err } } return true, nil } func (s *session) sendPacket() (bool, error) { if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { s.packer.QueueControlFrame(&wire.BlockedFrame{Offset: offset}) } s.windowUpdateQueue.QueueAll() if ack := s.receivedPacketHandler.GetAckFrame(); ack != nil { s.packer.QueueControlFrame(ack) if s.version.UsesStopWaitingFrames() { if swf := s.sentPacketHandler.GetStopWaitingFrame(false); swf != nil { s.packer.QueueControlFrame(swf) } } } packet, err := s.packer.PackPacket() if err != nil || packet == nil { return false, err } s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket()) if err := s.sendPackedPacket(packet); err != nil { return false, err } return true, nil } func (s *session) sendPackedPacket(packet *packedPacket) error { defer putPacketBuffer(&packet.raw) s.logPacket(packet) return s.conn.Write(packet.raw) } func (s *session) sendConnectionClose(quicErr *qerr.QuicError) error { packet, err := s.packer.PackConnectionClose(&wire.ConnectionCloseFrame{ ErrorCode: quicErr.ErrorCode, ReasonPhrase: quicErr.ErrorMessage, }) if err != nil { return err } s.logPacket(packet) return s.conn.Write(packet.raw) } func (s *session) logPacket(packet *packedPacket) { if !s.logger.Debug() { // We don't need to allocate the slices for calling the format functions return } s.logger.Debugf("-> Sending packet 0x%x (%d bytes) for connection %s, %s", packet.header.PacketNumber, len(packet.raw), s.srcConnID, packet.encryptionLevel) packet.header.Log(s.logger) for _, frame := range packet.frames { wire.LogFrame(s.logger, frame, true) } } // GetOrOpenStream either returns an existing stream, a newly opened stream, or nil if a stream with the provided ID is already closed. // It is *only* needed for gQUIC's H2. // It will be removed as soon as gQUIC moves towards the IETF H2/QUIC stream mapping. func (s *session) GetOrOpenStream(id protocol.StreamID) (Stream, error) { str, err := s.streamsMap.GetOrOpenSendStream(id) if str != nil { if bstr, ok := str.(Stream); ok { return bstr, err } return nil, fmt.Errorf("Stream %d is not a bidirectional stream", id) } // make sure to return an actual nil value here, not an Stream with value nil return nil, err } // AcceptStream returns the next stream openend by the peer func (s *session) AcceptStream() (Stream, error) { return s.streamsMap.AcceptStream() } func (s *session) AcceptUniStream() (ReceiveStream, error) { return s.streamsMap.AcceptUniStream() } // OpenStream opens a stream func (s *session) OpenStream() (Stream, error) { return s.streamsMap.OpenStream() } func (s *session) OpenStreamSync() (Stream, error) { return s.streamsMap.OpenStreamSync() } func (s *session) OpenUniStream() (SendStream, error) { return s.streamsMap.OpenUniStream() } func (s *session) OpenUniStreamSync() (SendStream, error) { return s.streamsMap.OpenUniStreamSync() } func (s *session) newStream(id protocol.StreamID) streamI { flowController := s.newFlowController(id) return newStream(id, s, flowController, s.version) } func (s *session) newFlowController(id protocol.StreamID) flowcontrol.StreamFlowController { var initialSendWindow protocol.ByteCount if s.peerParams != nil { initialSendWindow = s.peerParams.StreamFlowControlWindow } return flowcontrol.NewStreamFlowController( id, s.version.StreamContributesToConnectionFlowControl(id), s.connFlowController, protocol.ReceiveStreamFlowControlWindow, protocol.ByteCount(s.config.MaxReceiveStreamFlowControlWindow), initialSendWindow, s.onHasStreamWindowUpdate, s.rttStats, s.logger, ) } func (s *session) newCryptoStream() cryptoStreamI { id := s.version.CryptoStreamID() flowController := flowcontrol.NewStreamFlowController( id, s.version.StreamContributesToConnectionFlowControl(id), s.connFlowController, protocol.ReceiveStreamFlowControlWindow, protocol.ByteCount(s.config.MaxReceiveStreamFlowControlWindow), 0, s.onHasStreamWindowUpdate, s.rttStats, s.logger, ) return newCryptoStream(s, flowController, s.version) } func (s *session) sendPublicReset(rejectedPacketNumber protocol.PacketNumber) error { s.logger.Infof("Sending public reset for connection %x, packet number %d", s.destConnID, rejectedPacketNumber) return s.conn.Write(wire.WritePublicReset(s.destConnID, rejectedPacketNumber, 0)) } // scheduleSending signals that we have data for sending func (s *session) scheduleSending() { select { case s.sendingScheduled <- struct{}{}: default: } } func (s *session) tryQueueingUndecryptablePacket(p *receivedPacket) { if s.handshakeComplete { s.logger.Debugf("Received undecryptable packet from %s after the handshake: %#v, %d bytes data", p.remoteAddr.String(), p.header, len(p.data)) return } if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets { // if this is the first time the undecryptablePackets runs full, start the timer to send a Public Reset if s.receivedTooManyUndecrytablePacketsTime.IsZero() { s.receivedTooManyUndecrytablePacketsTime = time.Now() s.maybeResetTimer() } s.logger.Infof("Dropping undecrytable packet 0x%x (undecryptable packet queue full)", p.header.PacketNumber) return } s.logger.Infof("Queueing packet 0x%x for later decryption", p.header.PacketNumber) s.undecryptablePackets = append(s.undecryptablePackets, p) } func (s *session) tryDecryptingQueuedPackets() { for _, p := range s.undecryptablePackets { s.handlePacket(p) } s.undecryptablePackets = s.undecryptablePackets[:0] } func (s *session) queueControlFrame(f wire.Frame) { s.packer.QueueControlFrame(f) s.scheduleSending() } func (s *session) onHasStreamWindowUpdate(id protocol.StreamID) { s.windowUpdateQueue.AddStream(id) s.scheduleSending() } func (s *session) onHasConnectionWindowUpdate() { s.windowUpdateQueue.AddConnection() s.scheduleSending() } func (s *session) onHasStreamData(id protocol.StreamID) { s.streamFramer.AddActiveStream(id) s.scheduleSending() } func (s *session) onStreamCompleted(id protocol.StreamID) { if err := s.streamsMap.DeleteStream(id); err != nil { s.closeLocal(err) } } func (s *session) LocalAddr() net.Addr { return s.conn.LocalAddr() } func (s *session) RemoteAddr() net.Addr { return s.conn.RemoteAddr() } func (s *session) getCryptoStream() cryptoStreamI { return s.cryptoStream } func (s *session) GetVersion() protocol.VersionNumber { return s.version } log keep alive packets package quic import ( "context" "crypto/rand" "crypto/tls" "errors" "fmt" "net" "sync" "time" "github.com/lucas-clemente/quic-go/internal/ackhandler" "github.com/lucas-clemente/quic-go/internal/congestion" "github.com/lucas-clemente/quic-go/internal/crypto" "github.com/lucas-clemente/quic-go/internal/flowcontrol" "github.com/lucas-clemente/quic-go/internal/handshake" "github.com/lucas-clemente/quic-go/internal/protocol" "github.com/lucas-clemente/quic-go/internal/utils" "github.com/lucas-clemente/quic-go/internal/wire" "github.com/lucas-clemente/quic-go/qerr" ) type unpacker interface { Unpack(headerBinary []byte, hdr *wire.Header, data []byte) (*unpackedPacket, error) } type streamGetter interface { GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) } type streamManager interface { GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) OpenStream() (Stream, error) OpenUniStream() (SendStream, error) OpenStreamSync() (Stream, error) OpenUniStreamSync() (SendStream, error) AcceptStream() (Stream, error) AcceptUniStream() (ReceiveStream, error) DeleteStream(protocol.StreamID) error UpdateLimits(*handshake.TransportParameters) HandleMaxStreamIDFrame(*wire.MaxStreamIDFrame) error CloseWithError(error) } type cryptoStreamHandler interface { HandleCryptoStream() error ConnectionState() handshake.ConnectionState } type divNonceSetter interface { SetDiversificationNonce([]byte) error } type receivedPacket struct { remoteAddr net.Addr header *wire.Header data []byte rcvTime time.Time } var ( newCryptoSetup = handshake.NewCryptoSetup newCryptoSetupClient = handshake.NewCryptoSetupClient ) type closeError struct { err error remote bool sendClose bool } // A Session is a QUIC session type session struct { sessionRunner sessionRunner destConnID protocol.ConnectionID srcConnID protocol.ConnectionID perspective protocol.Perspective version protocol.VersionNumber config *Config conn connection streamsMap streamManager cryptoStream cryptoStreamI rttStats *congestion.RTTStats sentPacketHandler ackhandler.SentPacketHandler receivedPacketHandler ackhandler.ReceivedPacketHandler streamFramer *streamFramer windowUpdateQueue *windowUpdateQueue connFlowController flowcontrol.ConnectionFlowController unpacker unpacker packer *packetPacker cryptoStreamHandler cryptoStreamHandler receivedPackets chan *receivedPacket sendingScheduled chan struct{} // closeChan is used to notify the run loop that it should terminate. closeChan chan closeError closeOnce sync.Once ctx context.Context ctxCancel context.CancelFunc // when we receive too many undecryptable packets during the handshake, we send a Public reset // but only after a time of protocol.PublicResetTimeout has passed undecryptablePackets []*receivedPacket receivedTooManyUndecrytablePacketsTime time.Time // this channel is passed to the CryptoSetup and receives the transport parameters, as soon as the peer sends them paramsChan <-chan handshake.TransportParameters // the handshakeEvent channel is passed to the CryptoSetup. // It receives when it makes sense to try decrypting undecryptable packets. handshakeEvent <-chan struct{} handshakeComplete bool receivedFirstPacket bool // since packet numbers start at 0, we can't use largestRcvdPacketNumber != 0 for this receivedFirstForwardSecurePacket bool lastRcvdPacketNumber protocol.PacketNumber // Used to calculate the next packet number from the truncated wire // representation, and sent back in public reset packets largestRcvdPacketNumber protocol.PacketNumber sessionCreationTime time.Time lastNetworkActivityTime time.Time // pacingDeadline is the time when the next packet should be sent pacingDeadline time.Time peerParams *handshake.TransportParameters timer *utils.Timer // keepAlivePingSent stores whether a Ping frame was sent to the peer or not // it is reset as soon as we receive a packet from the peer keepAlivePingSent bool logger utils.Logger } var _ Session = &session{} var _ streamSender = &session{} // newSession makes a new session func newSession( conn connection, sessionRunner sessionRunner, v protocol.VersionNumber, connectionID protocol.ConnectionID, scfg *handshake.ServerConfig, tlsConf *tls.Config, config *Config, logger utils.Logger, ) (quicSession, error) { paramsChan := make(chan handshake.TransportParameters) handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: sessionRunner, srcConnID: connectionID, destConnID: connectionID, perspective: protocol.PerspectiveServer, version: v, config: config, handshakeEvent: handshakeEvent, paramsChan: paramsChan, logger: logger, } s.preSetup() transportParams := &handshake.TransportParameters{ StreamFlowControlWindow: protocol.ReceiveStreamFlowControlWindow, ConnectionFlowControlWindow: protocol.ReceiveConnectionFlowControlWindow, MaxStreams: uint32(s.config.MaxIncomingStreams), IdleTimeout: s.config.IdleTimeout, } divNonce := make([]byte, 32) if _, err := rand.Read(divNonce); err != nil { return nil, err } cs, err := newCryptoSetup( s.cryptoStream, connectionID, s.conn.RemoteAddr(), s.version, divNonce, scfg, transportParams, s.config.Versions, s.config.AcceptCookie, paramsChan, handshakeEvent, s.logger, ) if err != nil { return nil, err } s.cryptoStreamHandler = cs s.unpacker = newPacketUnpackerGQUIC(cs, s.version) s.streamsMap = newStreamsMapLegacy(s.newStream, s.config.MaxIncomingStreams, s.perspective) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( connectionID, nil, // no src connection ID 1, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), divNonce, cs, s.streamFramer, s.perspective, s.version, ) return s, s.postSetup() } // declare this as a variable, so that we can it mock it in the tests var newClientSession = func( conn connection, sessionRunner sessionRunner, hostname string, v protocol.VersionNumber, connectionID protocol.ConnectionID, tlsConf *tls.Config, config *Config, initialVersion protocol.VersionNumber, negotiatedVersions []protocol.VersionNumber, // needed for validation of the GQUIC version negotiation logger utils.Logger, ) (quicSession, error) { paramsChan := make(chan handshake.TransportParameters) handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: sessionRunner, srcConnID: connectionID, destConnID: connectionID, perspective: protocol.PerspectiveClient, version: v, config: config, handshakeEvent: handshakeEvent, paramsChan: paramsChan, logger: logger, } s.preSetup() transportParams := &handshake.TransportParameters{ StreamFlowControlWindow: protocol.ReceiveStreamFlowControlWindow, ConnectionFlowControlWindow: protocol.ReceiveConnectionFlowControlWindow, MaxStreams: uint32(s.config.MaxIncomingStreams), IdleTimeout: s.config.IdleTimeout, OmitConnectionID: s.config.RequestConnectionIDOmission, } cs, err := newCryptoSetupClient( s.cryptoStream, hostname, connectionID, s.version, tlsConf, transportParams, paramsChan, handshakeEvent, initialVersion, negotiatedVersions, s.logger, ) if err != nil { return nil, err } s.cryptoStreamHandler = cs s.unpacker = newPacketUnpackerGQUIC(cs, s.version) s.streamsMap = newStreamsMapLegacy(s.newStream, s.config.MaxIncomingStreams, s.perspective) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( connectionID, nil, // no src connection ID 1, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), nil, // no diversification nonce cs, s.streamFramer, s.perspective, s.version, ) return s, s.postSetup() } func newTLSServerSession( conn connection, runner sessionRunner, destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, initialPacketNumber protocol.PacketNumber, config *Config, tls handshake.MintTLS, cryptoStreamConn *handshake.CryptoStreamConn, nullAEAD crypto.AEAD, peerParams *handshake.TransportParameters, v protocol.VersionNumber, logger utils.Logger, ) (quicSession, error) { handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: runner, config: config, srcConnID: srcConnID, destConnID: destConnID, perspective: protocol.PerspectiveServer, version: v, handshakeEvent: handshakeEvent, logger: logger, } s.preSetup() cs := handshake.NewCryptoSetupTLSServer( tls, cryptoStreamConn, nullAEAD, handshakeEvent, v, ) s.cryptoStreamHandler = cs s.streamsMap = newStreamsMap(s, s.newFlowController, s.config.MaxIncomingStreams, s.config.MaxIncomingUniStreams, s.perspective, s.version) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( s.destConnID, s.srcConnID, initialPacketNumber, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), nil, // no diversification nonce cs, s.streamFramer, s.perspective, s.version, ) if err := s.postSetup(); err != nil { return nil, err } s.peerParams = peerParams s.processTransportParameters(peerParams) s.unpacker = newPacketUnpacker(cs, s.version) return s, nil } // declare this as a variable, such that we can it mock it in the tests var newTLSClientSession = func( conn connection, runner sessionRunner, hostname string, v protocol.VersionNumber, destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, config *Config, tls handshake.MintTLS, paramsChan <-chan handshake.TransportParameters, initialPacketNumber protocol.PacketNumber, logger utils.Logger, ) (quicSession, error) { handshakeEvent := make(chan struct{}, 1) s := &session{ conn: conn, sessionRunner: runner, config: config, srcConnID: srcConnID, destConnID: destConnID, perspective: protocol.PerspectiveClient, version: v, handshakeEvent: handshakeEvent, paramsChan: paramsChan, logger: logger, } s.preSetup() tls.SetCryptoStream(s.cryptoStream) cs, err := handshake.NewCryptoSetupTLSClient( s.cryptoStream, s.destConnID, hostname, handshakeEvent, tls, v, ) if err != nil { return nil, err } s.cryptoStreamHandler = cs s.unpacker = newPacketUnpacker(cs, s.version) s.streamsMap = newStreamsMap(s, s.newFlowController, s.config.MaxIncomingStreams, s.config.MaxIncomingUniStreams, s.perspective, s.version) s.streamFramer = newStreamFramer(s.cryptoStream, s.streamsMap, s.version) s.packer = newPacketPacker( s.destConnID, s.srcConnID, initialPacketNumber, s.sentPacketHandler.GetPacketNumberLen, s.RemoteAddr(), nil, // no diversification nonce cs, s.streamFramer, s.perspective, s.version, ) return s, s.postSetup() } func (s *session) preSetup() { s.rttStats = &congestion.RTTStats{} s.sentPacketHandler = ackhandler.NewSentPacketHandler(s.rttStats, s.logger, s.version) s.connFlowController = flowcontrol.NewConnectionFlowController( protocol.ReceiveConnectionFlowControlWindow, protocol.ByteCount(s.config.MaxReceiveConnectionFlowControlWindow), s.onHasConnectionWindowUpdate, s.rttStats, s.logger, ) s.cryptoStream = s.newCryptoStream() } func (s *session) postSetup() error { s.receivedPackets = make(chan *receivedPacket, protocol.MaxSessionUnprocessedPackets) s.closeChan = make(chan closeError, 1) s.sendingScheduled = make(chan struct{}, 1) s.undecryptablePackets = make([]*receivedPacket, 0, protocol.MaxUndecryptablePackets) s.ctx, s.ctxCancel = context.WithCancel(context.Background()) s.timer = utils.NewTimer() now := time.Now() s.lastNetworkActivityTime = now s.sessionCreationTime = now s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.rttStats, s.logger, s.version) s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.cryptoStream, s.connFlowController, s.packer.QueueControlFrame) return nil } // run the session main loop func (s *session) run() error { defer s.ctxCancel() go func() { if err := s.cryptoStreamHandler.HandleCryptoStream(); err != nil { if err == handshake.ErrCloseSessionForRetry { s.destroy(err) } else { s.closeLocal(err) } } }() var closeErr closeError runLoop: for { // Close immediately if requested select { case closeErr = <-s.closeChan: break runLoop case _, ok := <-s.handshakeEvent: // when the handshake is completed, the channel will be closed s.handleHandshakeEvent(!ok) default: } s.maybeResetTimer() select { case closeErr = <-s.closeChan: break runLoop case <-s.timer.Chan(): s.timer.SetRead() // We do all the interesting stuff after the switch statement, so // nothing to see here. case <-s.sendingScheduled: // We do all the interesting stuff after the switch statement, so // nothing to see here. case p := <-s.receivedPackets: err := s.handlePacketImpl(p) if err != nil { if qErr, ok := err.(*qerr.QuicError); ok && qErr.ErrorCode == qerr.DecryptionFailure { s.tryQueueingUndecryptablePacket(p) continue } s.closeLocal(err) continue } // This is a bit unclean, but works properly, since the packet always // begins with the public header and we never copy it. putPacketBuffer(&p.header.Raw) case p := <-s.paramsChan: s.processTransportParameters(&p) case _, ok := <-s.handshakeEvent: // when the handshake is completed, the channel will be closed s.handleHandshakeEvent(!ok) } now := time.Now() if timeout := s.sentPacketHandler.GetAlarmTimeout(); !timeout.IsZero() && timeout.Before(now) { // This could cause packets to be retransmitted. // Check it before trying to send packets. if err := s.sentPacketHandler.OnAlarm(); err != nil { s.closeLocal(err) } } var pacingDeadline time.Time if s.pacingDeadline.IsZero() { // the timer didn't have a pacing deadline set pacingDeadline = s.sentPacketHandler.TimeUntilSend() } if s.config.KeepAlive && !s.keepAlivePingSent && s.handshakeComplete && time.Since(s.lastNetworkActivityTime) >= s.peerParams.IdleTimeout/2 { // send a PING frame since there is no activity in the session s.logger.Debugf("Sending a keep-alive ping to keep the connection alive.") s.packer.QueueControlFrame(&wire.PingFrame{}) s.keepAlivePingSent = true } else if !pacingDeadline.IsZero() && now.Before(pacingDeadline) { // If we get to this point before the pacing deadline, we should wait until that deadline. // This can happen when scheduleSending is called, or a packet is received. // Set the timer and restart the run loop. s.pacingDeadline = pacingDeadline continue } if err := s.sendPackets(); err != nil { s.closeLocal(err) } if !s.receivedTooManyUndecrytablePacketsTime.IsZero() && s.receivedTooManyUndecrytablePacketsTime.Add(protocol.PublicResetTimeout).Before(now) && len(s.undecryptablePackets) != 0 { s.closeLocal(qerr.Error(qerr.DecryptionFailure, "too many undecryptable packets received")) } if !s.handshakeComplete && now.Sub(s.sessionCreationTime) >= s.config.HandshakeTimeout { s.closeLocal(qerr.Error(qerr.HandshakeTimeout, "Crypto handshake did not complete in time.")) } if s.handshakeComplete && now.Sub(s.lastNetworkActivityTime) >= s.config.IdleTimeout { s.closeLocal(qerr.Error(qerr.NetworkIdleTimeout, "No recent network activity.")) } } if err := s.handleCloseError(closeErr); err != nil { s.logger.Infof("Handling close error failed: %s", err) } s.logger.Infof("Connection %s closed.", s.srcConnID) if closeErr.err != handshake.ErrCloseSessionForRetry { s.sessionRunner.removeConnectionID(s.srcConnID) } return closeErr.err } func (s *session) Context() context.Context { return s.ctx } func (s *session) ConnectionState() ConnectionState { return s.cryptoStreamHandler.ConnectionState() } func (s *session) maybeResetTimer() { var deadline time.Time if s.config.KeepAlive && s.handshakeComplete && !s.keepAlivePingSent { deadline = s.lastNetworkActivityTime.Add(s.peerParams.IdleTimeout / 2) } else { deadline = s.lastNetworkActivityTime.Add(s.config.IdleTimeout) } if ackAlarm := s.receivedPacketHandler.GetAlarmTimeout(); !ackAlarm.IsZero() { deadline = utils.MinTime(deadline, ackAlarm) } if lossTime := s.sentPacketHandler.GetAlarmTimeout(); !lossTime.IsZero() { deadline = utils.MinTime(deadline, lossTime) } if !s.handshakeComplete { handshakeDeadline := s.sessionCreationTime.Add(s.config.HandshakeTimeout) deadline = utils.MinTime(deadline, handshakeDeadline) } if !s.receivedTooManyUndecrytablePacketsTime.IsZero() { deadline = utils.MinTime(deadline, s.receivedTooManyUndecrytablePacketsTime.Add(protocol.PublicResetTimeout)) } if !s.pacingDeadline.IsZero() { deadline = utils.MinTime(deadline, s.pacingDeadline) } s.timer.Reset(deadline) } func (s *session) handleHandshakeEvent(completed bool) { if !completed { s.tryDecryptingQueuedPackets() return } s.handshakeComplete = true s.handshakeEvent = nil // prevent this case from ever being selected again s.sessionRunner.onHandshakeComplete(s) // In gQUIC, the server completes the handshake first (after sending the SHLO). // In TLS 1.3, the client completes the handshake first (after sending the CFIN). // We need to make sure they learn about the peer completing the handshake, // in order to stop retransmitting handshake packets. // They will stop retransmitting handshake packets when receiving the first forward-secure packet. // We need to make sure that a retransmittable forward-secure packet is sent, // independent from the application protocol. if (!s.version.UsesTLS() && s.perspective == protocol.PerspectiveClient) || (s.version.UsesTLS() && s.perspective == protocol.PerspectiveServer) { s.queueControlFrame(&wire.PingFrame{}) s.sentPacketHandler.SetHandshakeComplete() } } func (s *session) handlePacketImpl(p *receivedPacket) error { hdr := p.header // The server can change the source connection ID with the first Handshake packet. // After this, all packets with a different source connection have to be ignored. if s.receivedFirstPacket && hdr.IsLongHeader && !hdr.SrcConnectionID.Equal(s.destConnID) { s.logger.Debugf("Dropping packet with unexpected source connection ID: %s (expected %s)", p.header.SrcConnectionID, s.destConnID) return nil } if s.perspective == protocol.PerspectiveClient { if divNonce := p.header.DiversificationNonce; len(divNonce) > 0 { if err := s.cryptoStreamHandler.(divNonceSetter).SetDiversificationNonce(divNonce); err != nil { return err } } } if p.rcvTime.IsZero() { // To simplify testing p.rcvTime = time.Now() } // Calculate packet number hdr.PacketNumber = protocol.InferPacketNumber( hdr.PacketNumberLen, s.largestRcvdPacketNumber, hdr.PacketNumber, s.version, ) packet, err := s.unpacker.Unpack(hdr.Raw, hdr, p.data) if s.logger.Debug() { if err != nil { s.logger.Debugf("<- Reading packet 0x%x (%d bytes) for connection %s", hdr.PacketNumber, len(p.data)+len(hdr.Raw), hdr.DestConnectionID) } else { s.logger.Debugf("<- Reading packet 0x%x (%d bytes) for connection %s, %s", hdr.PacketNumber, len(p.data)+len(hdr.Raw), hdr.DestConnectionID, packet.encryptionLevel) } hdr.Log(s.logger) } // if the decryption failed, this might be a packet sent by an attacker if err != nil { return err } // The server can change the source connection ID with the first Handshake packet. if s.perspective == protocol.PerspectiveClient && !s.receivedFirstPacket && hdr.IsLongHeader && !hdr.SrcConnectionID.Equal(s.destConnID) { s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", hdr.SrcConnectionID) s.destConnID = hdr.SrcConnectionID s.packer.ChangeDestConnectionID(s.destConnID) } s.receivedFirstPacket = true s.lastNetworkActivityTime = p.rcvTime s.keepAlivePingSent = false // In gQUIC, the server completes the handshake first (after sending the SHLO). // In TLS 1.3, the client completes the handshake first (after sending the CFIN). // We know that the peer completed the handshake as soon as we receive a forward-secure packet. if (!s.version.UsesTLS() && s.perspective == protocol.PerspectiveServer) || (s.version.UsesTLS() && s.perspective == protocol.PerspectiveClient) { if !s.receivedFirstForwardSecurePacket && packet.encryptionLevel == protocol.EncryptionForwardSecure { s.receivedFirstForwardSecurePacket = true s.sentPacketHandler.SetHandshakeComplete() } } s.lastRcvdPacketNumber = hdr.PacketNumber // Only do this after decrypting, so we are sure the packet is not attacker-controlled s.largestRcvdPacketNumber = utils.MaxPacketNumber(s.largestRcvdPacketNumber, hdr.PacketNumber) // If this is a Retry packet, there's no need to send an ACK. // The session will be closed and recreated as soon as the crypto setup processed the HRR. if hdr.Type != protocol.PacketTypeRetry { isRetransmittable := ackhandler.HasRetransmittableFrames(packet.frames) if err := s.receivedPacketHandler.ReceivedPacket(hdr.PacketNumber, p.rcvTime, isRetransmittable); err != nil { return err } } return s.handleFrames(packet.frames, packet.encryptionLevel) } func (s *session) handleFrames(fs []wire.Frame, encLevel protocol.EncryptionLevel) error { for _, ff := range fs { var err error wire.LogFrame(s.logger, ff, false) switch frame := ff.(type) { case *wire.StreamFrame: err = s.handleStreamFrame(frame, encLevel) case *wire.AckFrame: err = s.handleAckFrame(frame, encLevel) case *wire.ConnectionCloseFrame: s.closeRemote(qerr.Error(frame.ErrorCode, frame.ReasonPhrase)) case *wire.GoawayFrame: err = errors.New("unimplemented: handling GOAWAY frames") case *wire.StopWaitingFrame: // ignore STOP_WAITINGs case *wire.RstStreamFrame: err = s.handleRstStreamFrame(frame) case *wire.MaxDataFrame: s.handleMaxDataFrame(frame) case *wire.MaxStreamDataFrame: err = s.handleMaxStreamDataFrame(frame) case *wire.MaxStreamIDFrame: err = s.handleMaxStreamIDFrame(frame) case *wire.BlockedFrame: case *wire.StreamBlockedFrame: case *wire.StreamIDBlockedFrame: case *wire.StopSendingFrame: err = s.handleStopSendingFrame(frame) case *wire.PingFrame: case *wire.PathChallengeFrame: s.handlePathChallengeFrame(frame) case *wire.PathResponseFrame: // since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs err = errors.New("unexpected PATH_RESPONSE frame") default: return errors.New("Session BUG: unexpected frame type") } if err != nil { return err } } return nil } // handlePacket is called by the server with a new packet func (s *session) handlePacket(p *receivedPacket) { // Discard packets once the amount of queued packets is larger than // the channel size, protocol.MaxSessionUnprocessedPackets select { case s.receivedPackets <- p: default: } } func (s *session) handleStreamFrame(frame *wire.StreamFrame, encLevel protocol.EncryptionLevel) error { if frame.StreamID == s.version.CryptoStreamID() { if frame.FinBit { return errors.New("Received STREAM frame with FIN bit for the crypto stream") } return s.cryptoStream.handleStreamFrame(frame) } else if encLevel <= protocol.EncryptionUnencrypted { return qerr.Error(qerr.UnencryptedStreamData, fmt.Sprintf("received unencrypted stream data on stream %d", frame.StreamID)) } str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err } if str == nil { // Stream is closed and already garbage collected // ignore this StreamFrame return nil } return str.handleStreamFrame(frame) } func (s *session) handleMaxDataFrame(frame *wire.MaxDataFrame) { s.connFlowController.UpdateSendWindow(frame.ByteOffset) } func (s *session) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) error { if frame.StreamID == s.version.CryptoStreamID() { s.cryptoStream.handleMaxStreamDataFrame(frame) return nil } str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) if err != nil { return err } if str == nil { // stream is closed and already garbage collected return nil } str.handleMaxStreamDataFrame(frame) return nil } func (s *session) handleMaxStreamIDFrame(frame *wire.MaxStreamIDFrame) error { return s.streamsMap.HandleMaxStreamIDFrame(frame) } func (s *session) handleRstStreamFrame(frame *wire.RstStreamFrame) error { if frame.StreamID == s.version.CryptoStreamID() { return errors.New("Received RST_STREAM frame for the crypto stream") } str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err } if str == nil { // stream is closed and already garbage collected return nil } return str.handleRstStreamFrame(frame) } func (s *session) handleStopSendingFrame(frame *wire.StopSendingFrame) error { if frame.StreamID == s.version.CryptoStreamID() { return errors.New("Received a STOP_SENDING frame for the crypto stream") } str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) if err != nil { return err } if str == nil { // stream is closed and already garbage collected return nil } str.handleStopSendingFrame(frame) return nil } func (s *session) handlePathChallengeFrame(frame *wire.PathChallengeFrame) { s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data}) } func (s *session) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error { if err := s.sentPacketHandler.ReceivedAck(frame, s.lastRcvdPacketNumber, encLevel, s.lastNetworkActivityTime); err != nil { return err } s.receivedPacketHandler.IgnoreBelow(s.sentPacketHandler.GetLowestPacketNotConfirmedAcked()) return nil } // closeLocal closes the session and send a CONNECTION_CLOSE containing the error func (s *session) closeLocal(e error) { s.closeOnce.Do(func() { s.closeChan <- closeError{err: e, sendClose: true, remote: false} }) } // destroy closes the session without sending the error on the wire func (s *session) destroy(e error) { s.closeOnce.Do(func() { s.closeChan <- closeError{err: e, sendClose: false, remote: false} }) } func (s *session) closeRemote(e error) { s.closeOnce.Do(func() { s.closeChan <- closeError{err: e, remote: true} }) } // Close the connection. It sends a qerr.PeerGoingAway. // It waits until the run loop has stopped before returning func (s *session) Close() error { s.closeLocal(nil) <-s.ctx.Done() return nil } func (s *session) CloseWithError(code protocol.ApplicationErrorCode, e error) error { s.closeLocal(qerr.Error(qerr.ErrorCode(code), e.Error())) <-s.ctx.Done() return nil } func (s *session) handleCloseError(closeErr closeError) error { if closeErr.err == nil { closeErr.err = qerr.PeerGoingAway } var quicErr *qerr.QuicError var ok bool if quicErr, ok = closeErr.err.(*qerr.QuicError); !ok { quicErr = qerr.ToQuicError(closeErr.err) } // Don't log 'normal' reasons if quicErr.ErrorCode == qerr.PeerGoingAway || quicErr.ErrorCode == qerr.NetworkIdleTimeout { s.logger.Infof("Closing connection %s.", s.srcConnID) } else { s.logger.Errorf("Closing session with error: %s", closeErr.err.Error()) } s.cryptoStream.closeForShutdown(quicErr) s.streamsMap.CloseWithError(quicErr) if !closeErr.sendClose { return nil } // If this is a remote close we're done here if closeErr.remote { return nil } if quicErr.ErrorCode == qerr.DecryptionFailure || quicErr == handshake.ErrNSTPExperiment { return s.sendPublicReset(s.lastRcvdPacketNumber) } return s.sendConnectionClose(quicErr) } func (s *session) processTransportParameters(params *handshake.TransportParameters) { s.peerParams = params s.streamsMap.UpdateLimits(params) if params.OmitConnectionID { s.packer.SetOmitConnectionID() } if params.MaxPacketSize != 0 { s.packer.SetMaxPacketSize(params.MaxPacketSize) } s.connFlowController.UpdateSendWindow(params.ConnectionFlowControlWindow) // the crypto stream is the only open stream at this moment // so we don't need to update stream flow control windows } func (s *session) sendPackets() error { s.pacingDeadline = time.Time{} sendMode := s.sentPacketHandler.SendMode() if sendMode == ackhandler.SendNone { // shortcut: return immediately if there's nothing to send return nil } numPackets := s.sentPacketHandler.ShouldSendNumPackets() var numPacketsSent int sendLoop: for { switch sendMode { case ackhandler.SendNone: break sendLoop case ackhandler.SendAck: // We can at most send a single ACK only packet. // There will only be a new ACK after receiving new packets. // SendAck is only returned when we're congestion limited, so we don't need to set the pacingt timer. return s.maybeSendAckOnlyPacket() case ackhandler.SendRTO: // try to send a retransmission first sentPacket, err := s.maybeSendRetransmission() if err != nil { return err } if !sentPacket { // In RTO mode, a probe packet has to be sent. // Add a PING frame to make sure a (retransmittable) packet will be sent. s.queueControlFrame(&wire.PingFrame{}) sentPacket, err := s.sendPacket() if err != nil { return err } if !sentPacket { return errors.New("session BUG: expected a packet to be sent in RTO mode") } } numPacketsSent++ case ackhandler.SendTLP: // In TLP mode, a probe packet has to be sent. // Add a PING frame to make sure a (retransmittable) packet will be sent. s.queueControlFrame(&wire.PingFrame{}) sentPacket, err := s.sendPacket() if err != nil { return err } if !sentPacket { return errors.New("session BUG: expected a packet to be sent in TLP mode") } return nil case ackhandler.SendRetransmission: sentPacket, err := s.maybeSendRetransmission() if err != nil { return err } if sentPacket { numPacketsSent++ // This can happen if a retransmission queued, but it wasn't necessary to send it. // e.g. when an Initial is queued, but we already received a packet from the server. } case ackhandler.SendAny: sentPacket, err := s.sendPacket() if err != nil { return err } if !sentPacket { break sendLoop } numPacketsSent++ default: return fmt.Errorf("BUG: invalid send mode %d", sendMode) } if numPacketsSent >= numPackets { break } sendMode = s.sentPacketHandler.SendMode() } // Only start the pacing timer if we sent as many packets as we were allowed. // There will probably be more to send when calling sendPacket again. if numPacketsSent == numPackets { s.pacingDeadline = s.sentPacketHandler.TimeUntilSend() } return nil } func (s *session) maybeSendAckOnlyPacket() error { ack := s.receivedPacketHandler.GetAckFrame() if ack == nil { return nil } s.packer.QueueControlFrame(ack) if s.version.UsesStopWaitingFrames() { // for gQUIC, maybe add a STOP_WAITING if swf := s.sentPacketHandler.GetStopWaitingFrame(false); swf != nil { s.packer.QueueControlFrame(swf) } } packet, err := s.packer.PackAckPacket() if err != nil { return err } s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket()) return s.sendPackedPacket(packet) } // maybeSendRetransmission sends retransmissions for at most one packet. // It takes care that Initials aren't retransmitted, if a packet from the server was already received. func (s *session) maybeSendRetransmission() (bool, error) { var retransmitPacket *ackhandler.Packet for { retransmitPacket = s.sentPacketHandler.DequeuePacketForRetransmission() if retransmitPacket == nil { return false, nil } // Don't retransmit Initial packets if we already received a response. // An Initial might have been retransmitted multiple times before we receive a response. // As soon as we receive one response, we don't need to send any more Initials. if s.receivedFirstPacket && retransmitPacket.PacketType == protocol.PacketTypeInitial { s.logger.Debugf("Skipping retransmission of packet %d. Already received a response to an Initial.", retransmitPacket.PacketNumber) continue } break } if retransmitPacket.EncryptionLevel != protocol.EncryptionForwardSecure { s.logger.Debugf("Dequeueing handshake retransmission for packet 0x%x", retransmitPacket.PacketNumber) } else { s.logger.Debugf("Dequeueing retransmission for packet 0x%x", retransmitPacket.PacketNumber) } if s.version.UsesStopWaitingFrames() { s.packer.QueueControlFrame(s.sentPacketHandler.GetStopWaitingFrame(true)) } packets, err := s.packer.PackRetransmission(retransmitPacket) if err != nil { return false, err } ackhandlerPackets := make([]*ackhandler.Packet, len(packets)) for i, packet := range packets { ackhandlerPackets[i] = packet.ToAckHandlerPacket() } s.sentPacketHandler.SentPacketsAsRetransmission(ackhandlerPackets, retransmitPacket.PacketNumber) for _, packet := range packets { if err := s.sendPackedPacket(packet); err != nil { return false, err } } return true, nil } func (s *session) sendPacket() (bool, error) { if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { s.packer.QueueControlFrame(&wire.BlockedFrame{Offset: offset}) } s.windowUpdateQueue.QueueAll() if ack := s.receivedPacketHandler.GetAckFrame(); ack != nil { s.packer.QueueControlFrame(ack) if s.version.UsesStopWaitingFrames() { if swf := s.sentPacketHandler.GetStopWaitingFrame(false); swf != nil { s.packer.QueueControlFrame(swf) } } } packet, err := s.packer.PackPacket() if err != nil || packet == nil { return false, err } s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket()) if err := s.sendPackedPacket(packet); err != nil { return false, err } return true, nil } func (s *session) sendPackedPacket(packet *packedPacket) error { defer putPacketBuffer(&packet.raw) s.logPacket(packet) return s.conn.Write(packet.raw) } func (s *session) sendConnectionClose(quicErr *qerr.QuicError) error { packet, err := s.packer.PackConnectionClose(&wire.ConnectionCloseFrame{ ErrorCode: quicErr.ErrorCode, ReasonPhrase: quicErr.ErrorMessage, }) if err != nil { return err } s.logPacket(packet) return s.conn.Write(packet.raw) } func (s *session) logPacket(packet *packedPacket) { if !s.logger.Debug() { // We don't need to allocate the slices for calling the format functions return } s.logger.Debugf("-> Sending packet 0x%x (%d bytes) for connection %s, %s", packet.header.PacketNumber, len(packet.raw), s.srcConnID, packet.encryptionLevel) packet.header.Log(s.logger) for _, frame := range packet.frames { wire.LogFrame(s.logger, frame, true) } } // GetOrOpenStream either returns an existing stream, a newly opened stream, or nil if a stream with the provided ID is already closed. // It is *only* needed for gQUIC's H2. // It will be removed as soon as gQUIC moves towards the IETF H2/QUIC stream mapping. func (s *session) GetOrOpenStream(id protocol.StreamID) (Stream, error) { str, err := s.streamsMap.GetOrOpenSendStream(id) if str != nil { if bstr, ok := str.(Stream); ok { return bstr, err } return nil, fmt.Errorf("Stream %d is not a bidirectional stream", id) } // make sure to return an actual nil value here, not an Stream with value nil return nil, err } // AcceptStream returns the next stream openend by the peer func (s *session) AcceptStream() (Stream, error) { return s.streamsMap.AcceptStream() } func (s *session) AcceptUniStream() (ReceiveStream, error) { return s.streamsMap.AcceptUniStream() } // OpenStream opens a stream func (s *session) OpenStream() (Stream, error) { return s.streamsMap.OpenStream() } func (s *session) OpenStreamSync() (Stream, error) { return s.streamsMap.OpenStreamSync() } func (s *session) OpenUniStream() (SendStream, error) { return s.streamsMap.OpenUniStream() } func (s *session) OpenUniStreamSync() (SendStream, error) { return s.streamsMap.OpenUniStreamSync() } func (s *session) newStream(id protocol.StreamID) streamI { flowController := s.newFlowController(id) return newStream(id, s, flowController, s.version) } func (s *session) newFlowController(id protocol.StreamID) flowcontrol.StreamFlowController { var initialSendWindow protocol.ByteCount if s.peerParams != nil { initialSendWindow = s.peerParams.StreamFlowControlWindow } return flowcontrol.NewStreamFlowController( id, s.version.StreamContributesToConnectionFlowControl(id), s.connFlowController, protocol.ReceiveStreamFlowControlWindow, protocol.ByteCount(s.config.MaxReceiveStreamFlowControlWindow), initialSendWindow, s.onHasStreamWindowUpdate, s.rttStats, s.logger, ) } func (s *session) newCryptoStream() cryptoStreamI { id := s.version.CryptoStreamID() flowController := flowcontrol.NewStreamFlowController( id, s.version.StreamContributesToConnectionFlowControl(id), s.connFlowController, protocol.ReceiveStreamFlowControlWindow, protocol.ByteCount(s.config.MaxReceiveStreamFlowControlWindow), 0, s.onHasStreamWindowUpdate, s.rttStats, s.logger, ) return newCryptoStream(s, flowController, s.version) } func (s *session) sendPublicReset(rejectedPacketNumber protocol.PacketNumber) error { s.logger.Infof("Sending public reset for connection %x, packet number %d", s.destConnID, rejectedPacketNumber) return s.conn.Write(wire.WritePublicReset(s.destConnID, rejectedPacketNumber, 0)) } // scheduleSending signals that we have data for sending func (s *session) scheduleSending() { select { case s.sendingScheduled <- struct{}{}: default: } } func (s *session) tryQueueingUndecryptablePacket(p *receivedPacket) { if s.handshakeComplete { s.logger.Debugf("Received undecryptable packet from %s after the handshake: %#v, %d bytes data", p.remoteAddr.String(), p.header, len(p.data)) return } if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets { // if this is the first time the undecryptablePackets runs full, start the timer to send a Public Reset if s.receivedTooManyUndecrytablePacketsTime.IsZero() { s.receivedTooManyUndecrytablePacketsTime = time.Now() s.maybeResetTimer() } s.logger.Infof("Dropping undecrytable packet 0x%x (undecryptable packet queue full)", p.header.PacketNumber) return } s.logger.Infof("Queueing packet 0x%x for later decryption", p.header.PacketNumber) s.undecryptablePackets = append(s.undecryptablePackets, p) } func (s *session) tryDecryptingQueuedPackets() { for _, p := range s.undecryptablePackets { s.handlePacket(p) } s.undecryptablePackets = s.undecryptablePackets[:0] } func (s *session) queueControlFrame(f wire.Frame) { s.packer.QueueControlFrame(f) s.scheduleSending() } func (s *session) onHasStreamWindowUpdate(id protocol.StreamID) { s.windowUpdateQueue.AddStream(id) s.scheduleSending() } func (s *session) onHasConnectionWindowUpdate() { s.windowUpdateQueue.AddConnection() s.scheduleSending() } func (s *session) onHasStreamData(id protocol.StreamID) { s.streamFramer.AddActiveStream(id) s.scheduleSending() } func (s *session) onStreamCompleted(id protocol.StreamID) { if err := s.streamsMap.DeleteStream(id); err != nil { s.closeLocal(err) } } func (s *session) LocalAddr() net.Addr { return s.conn.LocalAddr() } func (s *session) RemoteAddr() net.Addr { return s.conn.RemoteAddr() } func (s *session) getCryptoStream() cryptoStreamI { return s.cryptoStream } func (s *session) GetVersion() protocol.VersionNumber { return s.version }
package main import ( "context" "crypto/rand" "encoding/base64" "encoding/hex" "fmt" "io" "strconv" "strings" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/signal" "github.com/urfave/cli" ) const ( defaultUtxoMinConf = 1 userMsgFund = `PSBT funding initiated with peer %x. Please create a PSBT that sends %v (%d satoshi) to the funding address %s. Note: The whole process should be completed within 10 minutes, otherwise there is a risk of the remote node timing out and canceling the funding process. Example with bitcoind: bitcoin-cli walletcreatefundedpsbt [] '[{"%s":%.8f}]' If you are using a wallet that can fund a PSBT directly (currently not possible with bitcoind), you can use this PSBT that contains the same address and amount: %s !!! WARNING !!! DO NOT PUBLISH the finished transaction by yourself or with another tool. lnd MUST publish it in the proper funding flow order OR THE FUNDS CAN BE LOST! Paste the funded PSBT here to continue the funding flow. Base64 encoded PSBT: ` userMsgSign = ` PSBT verified by lnd, please continue the funding flow by signing the PSBT by all required parties/devices. Once the transaction is fully signed, paste it again here. Base64 encoded signed PSBT: ` ) // TODO(roasbeef): change default number of confirmations var openChannelCommand = cli.Command{ Name: "openchannel", Category: "Channels", Usage: "Open a channel to a node or an existing peer.", Description: ` Attempt to open a new channel to an existing peer with the key node-key optionally blocking until the channel is 'open'. One can also connect to a node before opening a new channel to it by setting its host:port via the --connect argument. For this to work, the node_key must be provided, rather than the peer_id. This is optional. The channel will be initialized with local-amt satoshis local and push-amt satoshis for the remote node. Note that specifying push-amt means you give that amount to the remote node as part of the channel opening. Once the channel is open, a channelPoint (txid:vout) of the funding output is returned. If the remote peer supports the option upfront shutdown feature bit (query listpeers to see their supported feature bits), an address to enforce payout of funds on cooperative close can optionally be provided. Note that if you set this value, you will not be able to cooperatively close out to another address. One can manually set the fee to be used for the funding transaction via either the --conf_target or --sat_per_byte arguments. This is optional.`, ArgsUsage: "node-key local-amt push-amt", Flags: []cli.Flag{ cli.StringFlag{ Name: "node_key", Usage: "the identity public key of the target node/peer " + "serialized in compressed format", }, cli.StringFlag{ Name: "connect", Usage: "(optional) the host:port of the target node", }, cli.IntFlag{ Name: "local_amt", Usage: "the number of satoshis the wallet should commit to the channel", }, cli.IntFlag{ Name: "push_amt", Usage: "the number of satoshis to give the remote side " + "as part of the initial commitment state, " + "this is equivalent to first opening a " + "channel and sending the remote party funds, " + "but done all in one step", }, cli.BoolFlag{ Name: "block", Usage: "block and wait until the channel is fully open", }, cli.Int64Flag{ Name: "conf_target", Usage: "(optional) the number of blocks that the " + "transaction *should* confirm in, will be " + "used for fee estimation", }, cli.Int64Flag{ Name: "sat_per_byte", Usage: "(optional) a manual fee expressed in " + "sat/byte that should be used when crafting " + "the transaction", }, cli.BoolFlag{ Name: "private", Usage: "make the channel private, such that it won't " + "be announced to the greater network, and " + "nodes other than the two channel endpoints " + "must be explicitly told about it to be able " + "to route through it", }, cli.Int64Flag{ Name: "min_htlc_msat", Usage: "(optional) the minimum value we will require " + "for incoming HTLCs on the channel", }, cli.Uint64Flag{ Name: "remote_csv_delay", Usage: "(optional) the number of blocks we will require " + "our channel counterparty to wait before accessing " + "its funds in case of unilateral close. If this is " + "not set, we will scale the value according to the " + "channel size", }, cli.Uint64Flag{ Name: "min_confs", Usage: "(optional) the minimum number of confirmations " + "each one of your outputs used for the funding " + "transaction must satisfy", Value: defaultUtxoMinConf, }, cli.StringFlag{ Name: "close_address", Usage: "(optional) an address to enforce payout of our " + "funds to on cooperative close. Note that if this " + "value is set on channel open, you will *not* be " + "able to cooperatively close to a different address.", }, cli.BoolFlag{ Name: "psbt", Usage: "start an interactive mode that initiates " + "funding through a partially signed bitcoin " + "transaction (PSBT), allowing the channel " + "funds to be added and signed from a hardware " + "or other offline device.", }, cli.StringFlag{ Name: "base_psbt", Usage: "when using the interactive PSBT mode to open " + "a new channel, use this base64 encoded PSBT " + "as a base and add the new channel output to " + "it instead of creating a new, empty one.", }, cli.BoolFlag{ Name: "no_publish", Usage: "when using the interactive PSBT mode to open " + "multiple channels in a batch, this flag " + "instructs lnd to not publish the full batch " + "transaction just yet. For safety reasons " + "this flag should be set for each of the " + "batch's transactions except the very last", }, cli.Uint64Flag{ Name: "remote_max_value_in_flight_msat", Usage: "(optional) the maximum value in msat that " + "can be pending within the channel at any given time", }, }, Action: actionDecorator(openChannel), } func openChannel(ctx *cli.Context) error { // TODO(roasbeef): add deadline to context ctxb := context.Background() client, cleanUp := getClient(ctx) defer cleanUp() args := ctx.Args() var err error // Show command help if no arguments provided if ctx.NArg() == 0 && ctx.NumFlags() == 0 { _ = cli.ShowCommandHelp(ctx, "openchannel") return nil } minConfs := int32(ctx.Uint64("min_confs")) req := &lnrpc.OpenChannelRequest{ TargetConf: int32(ctx.Int64("conf_target")), SatPerByte: ctx.Int64("sat_per_byte"), MinHtlcMsat: ctx.Int64("min_htlc_msat"), RemoteCsvDelay: uint32(ctx.Uint64("remote_csv_delay")), MinConfs: minConfs, SpendUnconfirmed: minConfs == 0, CloseAddress: ctx.String("close_address"), RemoteMaxValueInFlightMsat: ctx.Uint64("remote_max_value_in_flight_msat"), } switch { case ctx.IsSet("node_key"): nodePubHex, err := hex.DecodeString(ctx.String("node_key")) if err != nil { return fmt.Errorf("unable to decode node public key: %v", err) } req.NodePubkey = nodePubHex case args.Present(): nodePubHex, err := hex.DecodeString(args.First()) if err != nil { return fmt.Errorf("unable to decode node public key: %v", err) } args = args.Tail() req.NodePubkey = nodePubHex default: return fmt.Errorf("node id argument missing") } // As soon as we can confirm that the node's node_key was set, rather // than the peer_id, we can check if the host:port was also set to // connect to it before opening the channel. if req.NodePubkey != nil && ctx.IsSet("connect") { addr := &lnrpc.LightningAddress{ Pubkey: hex.EncodeToString(req.NodePubkey), Host: ctx.String("connect"), } req := &lnrpc.ConnectPeerRequest{ Addr: addr, Perm: false, } // Check if connecting to the node was successful. // We discard the peer id returned as it is not needed. _, err := client.ConnectPeer(ctxb, req) if err != nil && !strings.Contains(err.Error(), "already connected") { return err } } switch { case ctx.IsSet("local_amt"): req.LocalFundingAmount = int64(ctx.Int("local_amt")) case args.Present(): req.LocalFundingAmount, err = strconv.ParseInt(args.First(), 10, 64) if err != nil { return fmt.Errorf("unable to decode local amt: %v", err) } args = args.Tail() default: return fmt.Errorf("local amt argument missing") } if ctx.IsSet("push_amt") { req.PushSat = int64(ctx.Int("push_amt")) } else if args.Present() { req.PushSat, err = strconv.ParseInt(args.First(), 10, 64) if err != nil { return fmt.Errorf("unable to decode push amt: %v", err) } } req.Private = ctx.Bool("private") // PSBT funding is a more involved, interactive process that is too // large to also fit into this already long function. if ctx.Bool("psbt") { return openChannelPsbt(ctx, client, req) } if !ctx.Bool("psbt") && ctx.Bool("no_publish") { return fmt.Errorf("the --no_publish flag can only be used in " + "combination with the --psbt flag") } stream, err := client.OpenChannel(ctxb, req) if err != nil { return err } for { resp, err := stream.Recv() if err == io.EOF { return nil } else if err != nil { return err } switch update := resp.Update.(type) { case *lnrpc.OpenStatusUpdate_ChanPending: err := printChanPending(update) if err != nil { return err } if !ctx.Bool("block") { return nil } case *lnrpc.OpenStatusUpdate_ChanOpen: return printChanOpen(update) } } } // openChannelPsbt starts an interactive channel open protocol that uses a // partially signed bitcoin transaction (PSBT) to fund the channel output. The // protocol involves several steps between the RPC server and the CLI client: // // RPC server CLI client // | | // | |<------open channel (stream)-----| // | |-------ready for funding----->| | // | |<------PSBT verify------------| | // | |-------ready for signing----->| | // | |<------PSBT finalize----------| | // | |-------channel pending------->| | // | |-------channel open------------->| // | | func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, req *lnrpc.OpenChannelRequest) error { var ( pendingChanID [32]byte shimPending = true basePsbtBytes []byte quit = make(chan struct{}) srvMsg = make(chan *lnrpc.OpenStatusUpdate, 1) srvErr = make(chan error, 1) ctxc, cancel = context.WithCancel(context.Background()) ) defer cancel() // Make sure the user didn't supply any command line flags that are // incompatible with PSBT funding. err := checkPsbtFlags(req) if err != nil { return err } // If the user supplied a base PSBT, only make sure it's valid base64. // The RPC server will make sure it's also a valid PSBT. basePsbt := ctx.String("base_psbt") if basePsbt != "" { basePsbtBytes, err = base64.StdEncoding.DecodeString(basePsbt) if err != nil { return fmt.Errorf("error parsing base PSBT: %v", err) } } // Generate a new, random pending channel ID that we'll use as the main // identifier when sending update messages to the RPC server. if _, err := rand.Read(pendingChanID[:]); err != nil { return fmt.Errorf("unable to generate random chan ID: %v", err) } fmt.Printf("Starting PSBT funding flow with pending channel ID %x.\n", pendingChanID) // maybeCancelShim is a helper function that cancels the funding shim // with the RPC server in case we end up aborting early. maybeCancelShim := func() { // If the user canceled while there was still a shim registered // with the wallet, release the resources now. if shimPending { fmt.Printf("Canceling PSBT funding flow for pending "+ "channel ID %x.\n", pendingChanID) cancelMsg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_ShimCancel{ ShimCancel: &lnrpc.FundingShimCancel{ PendingChanId: pendingChanID[:], }, }, } err := sendFundingState(ctxc, ctx, cancelMsg) if err != nil { fmt.Printf("Error canceling shim: %v\n", err) } shimPending = false } // Abort the stream connection to the server. cancel() } defer maybeCancelShim() // Create the PSBT funding shim that will tell the funding manager we // want to use a PSBT. req.FundingShim = &lnrpc.FundingShim{ Shim: &lnrpc.FundingShim_PsbtShim{ PsbtShim: &lnrpc.PsbtShim{ PendingChanId: pendingChanID[:], BasePsbt: basePsbtBytes, NoPublish: ctx.Bool("no_publish"), }, }, } // Start the interactive process by opening the stream connection to the // daemon. If the user cancels by pressing <Ctrl+C> we need to cancel // the shim. To not just kill the process on interrupt, we need to // explicitly capture the signal. stream, err := client.OpenChannel(ctxc, req) if err != nil { return fmt.Errorf("opening stream to server failed: %v", err) } if err := signal.Intercept(); err != nil { return err } // We also need to spawn a goroutine that reads from the server. This // will copy the messages to the channel as long as they come in or add // exactly one error to the error stream and then bail out. go func() { for { // Recv blocks until a message or error arrives. resp, err := stream.Recv() if err == io.EOF { srvErr <- fmt.Errorf("lnd shutting down: %v", err) return } else if err != nil { srvErr <- fmt.Errorf("got error from server: "+ "%v", err) return } // Don't block on sending in case of shutting down. select { case srvMsg <- resp: case <-quit: return } } }() // Spawn another goroutine that only handles abort from user or errors // from the server. Both will trigger an attempt to cancel the shim with // the server. go func() { select { case <-signal.ShutdownChannel(): fmt.Printf("\nInterrupt signal received.\n") close(quit) case err := <-srvErr: fmt.Printf("\nError received: %v\n", err) // If the remote peer canceled on us, the reservation // has already been deleted. We don't need to try to // remove it again, this would just produce another // error. cancelErr := chanfunding.ErrRemoteCanceled.Error() if err != nil && strings.Contains(err.Error(), cancelErr) { shimPending = false } close(quit) case <-quit: } }() // Our main event loop where we wait for triggers for { var srvResponse *lnrpc.OpenStatusUpdate select { case srvResponse = <-srvMsg: case <-quit: return nil } switch update := srvResponse.Update.(type) { case *lnrpc.OpenStatusUpdate_PsbtFund: // First tell the user how to create the PSBT with the // address and amount we now know. amt := btcutil.Amount(update.PsbtFund.FundingAmount) addr := update.PsbtFund.FundingAddress fmt.Printf( userMsgFund, req.NodePubkey, amt, amt, addr, addr, amt.ToBTC(), base64.StdEncoding.EncodeToString( update.PsbtFund.Psbt, ), ) // Read the user's response and send it to the server to // verify everything's correct before anything is // signed. psbtBase64, err := readLine(quit) if err == io.EOF { return nil } if err != nil { return fmt.Errorf("reading from console "+ "failed: %v", err) } psbt, err := base64.StdEncoding.DecodeString( strings.TrimSpace(psbtBase64), ) if err != nil { return fmt.Errorf("base64 decode failed: %v", err) } verifyMsg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ PsbtVerify: &lnrpc.FundingPsbtVerify{ FundedPsbt: psbt, PendingChanId: pendingChanID[:], }, }, } err = sendFundingState(ctxc, ctx, verifyMsg) if err != nil { return fmt.Errorf("verifying PSBT by lnd "+ "failed: %v", err) } // Now that we know the PSBT looks good, we can let it // be signed by the user. fmt.Print(userMsgSign) // Read the signed PSBT and send it to lnd. psbtBase64, err = readLine(quit) if err == io.EOF { return nil } if err != nil { return fmt.Errorf("reading from console "+ "failed: %v", err) } psbt, err = base64.StdEncoding.DecodeString( strings.TrimSpace(psbtBase64), ) if err != nil { return fmt.Errorf("base64 decode failed: %v", err) } finalizeMsg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ PsbtFinalize: &lnrpc.FundingPsbtFinalize{ SignedPsbt: psbt, PendingChanId: pendingChanID[:], }, }, } err = sendFundingState(ctxc, ctx, finalizeMsg) if err != nil { return fmt.Errorf("finalizing PSBT funding "+ "flow failed: %v", err) } case *lnrpc.OpenStatusUpdate_ChanPending: // As soon as the channel is pending, there is no more // shim that needs to be canceled. If the user // interrupts now, we don't need to clean up anything. shimPending = false err := printChanPending(update) if err != nil { return err } if !ctx.Bool("block") { return nil } case *lnrpc.OpenStatusUpdate_ChanOpen: return printChanOpen(update) } } } // printChanOpen prints the channel point of the channel open message. func printChanOpen(update *lnrpc.OpenStatusUpdate_ChanOpen) error { channelPoint := update.ChanOpen.ChannelPoint // A channel point's funding txid can be get/set as a // byte slice or a string. In the case it is a string, // decode it. var txidHash []byte switch channelPoint.GetFundingTxid().(type) { case *lnrpc.ChannelPoint_FundingTxidBytes: txidHash = channelPoint.GetFundingTxidBytes() case *lnrpc.ChannelPoint_FundingTxidStr: s := channelPoint.GetFundingTxidStr() h, err := chainhash.NewHashFromStr(s) if err != nil { return err } txidHash = h[:] } txid, err := chainhash.NewHash(txidHash) if err != nil { return err } index := channelPoint.OutputIndex printJSON(struct { ChannelPoint string `json:"channel_point"` }{ ChannelPoint: fmt.Sprintf("%v:%v", txid, index), }) return nil } // printChanPending prints the funding transaction ID of the channel pending // message. func printChanPending(update *lnrpc.OpenStatusUpdate_ChanPending) error { txid, err := chainhash.NewHash(update.ChanPending.Txid) if err != nil { return err } printJSON(struct { FundingTxid string `json:"funding_txid"` }{ FundingTxid: txid.String(), }) return nil } // readLine reads a line from standard in but does not block in case of a // system interrupt like syscall.SIGINT (Ctrl+C). func readLine(quit chan struct{}) (string, error) { msg := make(chan string, 1) // In a normal console, reading from stdin won't signal EOF when the // user presses Ctrl+C. That's why we need to put this in a separate // goroutine so it doesn't block. go func() { for { var str string _, _ = fmt.Scan(&str) msg <- str return } }() for { select { case <-quit: return "", io.EOF case str := <-msg: return str, nil } } } // checkPsbtFlags make sure a request to open a channel doesn't set any // parameters that are incompatible with the PSBT funding flow. func checkPsbtFlags(req *lnrpc.OpenChannelRequest) error { if req.MinConfs != defaultUtxoMinConf || req.SpendUnconfirmed { return fmt.Errorf("specifying minimum confirmations for PSBT " + "funding is not supported") } if req.TargetConf != 0 || req.SatPerByte != 0 { return fmt.Errorf("setting fee estimation parameters not " + "supported for PSBT funding") } return nil } // sendFundingState sends a single funding state step message by using a new // client connection. This is necessary if the whole funding flow takes longer // than the default macaroon timeout, then we cannot use a single client // connection. func sendFundingState(cancelCtx context.Context, cliCtx *cli.Context, msg *lnrpc.FundingTransitionMsg) error { client, cleanUp := getClient(cliCtx) defer cleanUp() _, err := client.FundingStateStep(cancelCtx, msg) return err } lncli: allow final transaction as raw hex in PSBT funding flow package main import ( "bytes" "context" "crypto/rand" "encoding/base64" "encoding/hex" "fmt" "io" "strconv" "strings" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/signal" "github.com/urfave/cli" ) const ( defaultUtxoMinConf = 1 userMsgFund = `PSBT funding initiated with peer %x. Please create a PSBT that sends %v (%d satoshi) to the funding address %s. Note: The whole process should be completed within 10 minutes, otherwise there is a risk of the remote node timing out and canceling the funding process. Example with bitcoind: bitcoin-cli walletcreatefundedpsbt [] '[{"%s":%.8f}]' If you are using a wallet that can fund a PSBT directly (currently not possible with bitcoind), you can use this PSBT that contains the same address and amount: %s !!! WARNING !!! DO NOT PUBLISH the finished transaction by yourself or with another tool. lnd MUST publish it in the proper funding flow order OR THE FUNDS CAN BE LOST! Paste the funded PSBT here to continue the funding flow. Base64 encoded PSBT: ` userMsgSign = ` PSBT verified by lnd, please continue the funding flow by signing the PSBT by all required parties/devices. Once the transaction is fully signed, paste it again here either in base64 PSBT or hex encoded raw wire TX format. Signed base64 encoded PSBT or hex encoded raw wire TX: ` ) // TODO(roasbeef): change default number of confirmations var openChannelCommand = cli.Command{ Name: "openchannel", Category: "Channels", Usage: "Open a channel to a node or an existing peer.", Description: ` Attempt to open a new channel to an existing peer with the key node-key optionally blocking until the channel is 'open'. One can also connect to a node before opening a new channel to it by setting its host:port via the --connect argument. For this to work, the node_key must be provided, rather than the peer_id. This is optional. The channel will be initialized with local-amt satoshis local and push-amt satoshis for the remote node. Note that specifying push-amt means you give that amount to the remote node as part of the channel opening. Once the channel is open, a channelPoint (txid:vout) of the funding output is returned. If the remote peer supports the option upfront shutdown feature bit (query listpeers to see their supported feature bits), an address to enforce payout of funds on cooperative close can optionally be provided. Note that if you set this value, you will not be able to cooperatively close out to another address. One can manually set the fee to be used for the funding transaction via either the --conf_target or --sat_per_byte arguments. This is optional.`, ArgsUsage: "node-key local-amt push-amt", Flags: []cli.Flag{ cli.StringFlag{ Name: "node_key", Usage: "the identity public key of the target node/peer " + "serialized in compressed format", }, cli.StringFlag{ Name: "connect", Usage: "(optional) the host:port of the target node", }, cli.IntFlag{ Name: "local_amt", Usage: "the number of satoshis the wallet should commit to the channel", }, cli.IntFlag{ Name: "push_amt", Usage: "the number of satoshis to give the remote side " + "as part of the initial commitment state, " + "this is equivalent to first opening a " + "channel and sending the remote party funds, " + "but done all in one step", }, cli.BoolFlag{ Name: "block", Usage: "block and wait until the channel is fully open", }, cli.Int64Flag{ Name: "conf_target", Usage: "(optional) the number of blocks that the " + "transaction *should* confirm in, will be " + "used for fee estimation", }, cli.Int64Flag{ Name: "sat_per_byte", Usage: "(optional) a manual fee expressed in " + "sat/byte that should be used when crafting " + "the transaction", }, cli.BoolFlag{ Name: "private", Usage: "make the channel private, such that it won't " + "be announced to the greater network, and " + "nodes other than the two channel endpoints " + "must be explicitly told about it to be able " + "to route through it", }, cli.Int64Flag{ Name: "min_htlc_msat", Usage: "(optional) the minimum value we will require " + "for incoming HTLCs on the channel", }, cli.Uint64Flag{ Name: "remote_csv_delay", Usage: "(optional) the number of blocks we will require " + "our channel counterparty to wait before accessing " + "its funds in case of unilateral close. If this is " + "not set, we will scale the value according to the " + "channel size", }, cli.Uint64Flag{ Name: "min_confs", Usage: "(optional) the minimum number of confirmations " + "each one of your outputs used for the funding " + "transaction must satisfy", Value: defaultUtxoMinConf, }, cli.StringFlag{ Name: "close_address", Usage: "(optional) an address to enforce payout of our " + "funds to on cooperative close. Note that if this " + "value is set on channel open, you will *not* be " + "able to cooperatively close to a different address.", }, cli.BoolFlag{ Name: "psbt", Usage: "start an interactive mode that initiates " + "funding through a partially signed bitcoin " + "transaction (PSBT), allowing the channel " + "funds to be added and signed from a hardware " + "or other offline device.", }, cli.StringFlag{ Name: "base_psbt", Usage: "when using the interactive PSBT mode to open " + "a new channel, use this base64 encoded PSBT " + "as a base and add the new channel output to " + "it instead of creating a new, empty one.", }, cli.BoolFlag{ Name: "no_publish", Usage: "when using the interactive PSBT mode to open " + "multiple channels in a batch, this flag " + "instructs lnd to not publish the full batch " + "transaction just yet. For safety reasons " + "this flag should be set for each of the " + "batch's transactions except the very last", }, cli.Uint64Flag{ Name: "remote_max_value_in_flight_msat", Usage: "(optional) the maximum value in msat that " + "can be pending within the channel at any given time", }, }, Action: actionDecorator(openChannel), } func openChannel(ctx *cli.Context) error { // TODO(roasbeef): add deadline to context ctxb := context.Background() client, cleanUp := getClient(ctx) defer cleanUp() args := ctx.Args() var err error // Show command help if no arguments provided if ctx.NArg() == 0 && ctx.NumFlags() == 0 { _ = cli.ShowCommandHelp(ctx, "openchannel") return nil } minConfs := int32(ctx.Uint64("min_confs")) req := &lnrpc.OpenChannelRequest{ TargetConf: int32(ctx.Int64("conf_target")), SatPerByte: ctx.Int64("sat_per_byte"), MinHtlcMsat: ctx.Int64("min_htlc_msat"), RemoteCsvDelay: uint32(ctx.Uint64("remote_csv_delay")), MinConfs: minConfs, SpendUnconfirmed: minConfs == 0, CloseAddress: ctx.String("close_address"), RemoteMaxValueInFlightMsat: ctx.Uint64("remote_max_value_in_flight_msat"), } switch { case ctx.IsSet("node_key"): nodePubHex, err := hex.DecodeString(ctx.String("node_key")) if err != nil { return fmt.Errorf("unable to decode node public key: %v", err) } req.NodePubkey = nodePubHex case args.Present(): nodePubHex, err := hex.DecodeString(args.First()) if err != nil { return fmt.Errorf("unable to decode node public key: %v", err) } args = args.Tail() req.NodePubkey = nodePubHex default: return fmt.Errorf("node id argument missing") } // As soon as we can confirm that the node's node_key was set, rather // than the peer_id, we can check if the host:port was also set to // connect to it before opening the channel. if req.NodePubkey != nil && ctx.IsSet("connect") { addr := &lnrpc.LightningAddress{ Pubkey: hex.EncodeToString(req.NodePubkey), Host: ctx.String("connect"), } req := &lnrpc.ConnectPeerRequest{ Addr: addr, Perm: false, } // Check if connecting to the node was successful. // We discard the peer id returned as it is not needed. _, err := client.ConnectPeer(ctxb, req) if err != nil && !strings.Contains(err.Error(), "already connected") { return err } } switch { case ctx.IsSet("local_amt"): req.LocalFundingAmount = int64(ctx.Int("local_amt")) case args.Present(): req.LocalFundingAmount, err = strconv.ParseInt(args.First(), 10, 64) if err != nil { return fmt.Errorf("unable to decode local amt: %v", err) } args = args.Tail() default: return fmt.Errorf("local amt argument missing") } if ctx.IsSet("push_amt") { req.PushSat = int64(ctx.Int("push_amt")) } else if args.Present() { req.PushSat, err = strconv.ParseInt(args.First(), 10, 64) if err != nil { return fmt.Errorf("unable to decode push amt: %v", err) } } req.Private = ctx.Bool("private") // PSBT funding is a more involved, interactive process that is too // large to also fit into this already long function. if ctx.Bool("psbt") { return openChannelPsbt(ctx, client, req) } if !ctx.Bool("psbt") && ctx.Bool("no_publish") { return fmt.Errorf("the --no_publish flag can only be used in " + "combination with the --psbt flag") } stream, err := client.OpenChannel(ctxb, req) if err != nil { return err } for { resp, err := stream.Recv() if err == io.EOF { return nil } else if err != nil { return err } switch update := resp.Update.(type) { case *lnrpc.OpenStatusUpdate_ChanPending: err := printChanPending(update) if err != nil { return err } if !ctx.Bool("block") { return nil } case *lnrpc.OpenStatusUpdate_ChanOpen: return printChanOpen(update) } } } // openChannelPsbt starts an interactive channel open protocol that uses a // partially signed bitcoin transaction (PSBT) to fund the channel output. The // protocol involves several steps between the RPC server and the CLI client: // // RPC server CLI client // | | // | |<------open channel (stream)-----| // | |-------ready for funding----->| | // | |<------PSBT verify------------| | // | |-------ready for signing----->| | // | |<------PSBT finalize----------| | // | |-------channel pending------->| | // | |-------channel open------------->| // | | func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, req *lnrpc.OpenChannelRequest) error { var ( pendingChanID [32]byte shimPending = true basePsbtBytes []byte quit = make(chan struct{}) srvMsg = make(chan *lnrpc.OpenStatusUpdate, 1) srvErr = make(chan error, 1) ctxc, cancel = context.WithCancel(context.Background()) ) defer cancel() // Make sure the user didn't supply any command line flags that are // incompatible with PSBT funding. err := checkPsbtFlags(req) if err != nil { return err } // If the user supplied a base PSBT, only make sure it's valid base64. // The RPC server will make sure it's also a valid PSBT. basePsbt := ctx.String("base_psbt") if basePsbt != "" { basePsbtBytes, err = base64.StdEncoding.DecodeString(basePsbt) if err != nil { return fmt.Errorf("error parsing base PSBT: %v", err) } } // Generate a new, random pending channel ID that we'll use as the main // identifier when sending update messages to the RPC server. if _, err := rand.Read(pendingChanID[:]); err != nil { return fmt.Errorf("unable to generate random chan ID: %v", err) } fmt.Printf("Starting PSBT funding flow with pending channel ID %x.\n", pendingChanID) // maybeCancelShim is a helper function that cancels the funding shim // with the RPC server in case we end up aborting early. maybeCancelShim := func() { // If the user canceled while there was still a shim registered // with the wallet, release the resources now. if shimPending { fmt.Printf("Canceling PSBT funding flow for pending "+ "channel ID %x.\n", pendingChanID) cancelMsg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_ShimCancel{ ShimCancel: &lnrpc.FundingShimCancel{ PendingChanId: pendingChanID[:], }, }, } err := sendFundingState(ctxc, ctx, cancelMsg) if err != nil { fmt.Printf("Error canceling shim: %v\n", err) } shimPending = false } // Abort the stream connection to the server. cancel() } defer maybeCancelShim() // Create the PSBT funding shim that will tell the funding manager we // want to use a PSBT. req.FundingShim = &lnrpc.FundingShim{ Shim: &lnrpc.FundingShim_PsbtShim{ PsbtShim: &lnrpc.PsbtShim{ PendingChanId: pendingChanID[:], BasePsbt: basePsbtBytes, NoPublish: ctx.Bool("no_publish"), }, }, } // Start the interactive process by opening the stream connection to the // daemon. If the user cancels by pressing <Ctrl+C> we need to cancel // the shim. To not just kill the process on interrupt, we need to // explicitly capture the signal. stream, err := client.OpenChannel(ctxc, req) if err != nil { return fmt.Errorf("opening stream to server failed: %v", err) } if err := signal.Intercept(); err != nil { return err } // We also need to spawn a goroutine that reads from the server. This // will copy the messages to the channel as long as they come in or add // exactly one error to the error stream and then bail out. go func() { for { // Recv blocks until a message or error arrives. resp, err := stream.Recv() if err == io.EOF { srvErr <- fmt.Errorf("lnd shutting down: %v", err) return } else if err != nil { srvErr <- fmt.Errorf("got error from server: "+ "%v", err) return } // Don't block on sending in case of shutting down. select { case srvMsg <- resp: case <-quit: return } } }() // Spawn another goroutine that only handles abort from user or errors // from the server. Both will trigger an attempt to cancel the shim with // the server. go func() { select { case <-signal.ShutdownChannel(): fmt.Printf("\nInterrupt signal received.\n") close(quit) case err := <-srvErr: fmt.Printf("\nError received: %v\n", err) // If the remote peer canceled on us, the reservation // has already been deleted. We don't need to try to // remove it again, this would just produce another // error. cancelErr := chanfunding.ErrRemoteCanceled.Error() if err != nil && strings.Contains(err.Error(), cancelErr) { shimPending = false } close(quit) case <-quit: } }() // Our main event loop where we wait for triggers for { var srvResponse *lnrpc.OpenStatusUpdate select { case srvResponse = <-srvMsg: case <-quit: return nil } switch update := srvResponse.Update.(type) { case *lnrpc.OpenStatusUpdate_PsbtFund: // First tell the user how to create the PSBT with the // address and amount we now know. amt := btcutil.Amount(update.PsbtFund.FundingAmount) addr := update.PsbtFund.FundingAddress fmt.Printf( userMsgFund, req.NodePubkey, amt, amt, addr, addr, amt.ToBTC(), base64.StdEncoding.EncodeToString( update.PsbtFund.Psbt, ), ) // Read the user's response and send it to the server to // verify everything's correct before anything is // signed. psbtBase64, err := readLine(quit) if err == io.EOF { return nil } if err != nil { return fmt.Errorf("reading from console "+ "failed: %v", err) } fundedPsbt, err := base64.StdEncoding.DecodeString( strings.TrimSpace(psbtBase64), ) if err != nil { return fmt.Errorf("base64 decode failed: %v", err) } verifyMsg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ PsbtVerify: &lnrpc.FundingPsbtVerify{ FundedPsbt: fundedPsbt, PendingChanId: pendingChanID[:], }, }, } err = sendFundingState(ctxc, ctx, verifyMsg) if err != nil { return fmt.Errorf("verifying PSBT by lnd "+ "failed: %v", err) } // Now that we know the PSBT looks good, we can let it // be signed by the user. fmt.Print(userMsgSign) // Read the signed PSBT and send it to lnd. finalTxStr, err := readLine(quit) if err == io.EOF { return nil } if err != nil { return fmt.Errorf("reading from console "+ "failed: %v", err) } finalizeMsg, err := finalizeMsgFromString( finalTxStr, pendingChanID[:], ) if err != nil { return err } transitionMsg := &lnrpc.FundingTransitionMsg{ Trigger: finalizeMsg, } err = sendFundingState(ctxc, ctx, transitionMsg) if err != nil { return fmt.Errorf("finalizing PSBT funding "+ "flow failed: %v", err) } case *lnrpc.OpenStatusUpdate_ChanPending: // As soon as the channel is pending, there is no more // shim that needs to be canceled. If the user // interrupts now, we don't need to clean up anything. shimPending = false err := printChanPending(update) if err != nil { return err } if !ctx.Bool("block") { return nil } case *lnrpc.OpenStatusUpdate_ChanOpen: return printChanOpen(update) } } } // printChanOpen prints the channel point of the channel open message. func printChanOpen(update *lnrpc.OpenStatusUpdate_ChanOpen) error { channelPoint := update.ChanOpen.ChannelPoint // A channel point's funding txid can be get/set as a // byte slice or a string. In the case it is a string, // decode it. var txidHash []byte switch channelPoint.GetFundingTxid().(type) { case *lnrpc.ChannelPoint_FundingTxidBytes: txidHash = channelPoint.GetFundingTxidBytes() case *lnrpc.ChannelPoint_FundingTxidStr: s := channelPoint.GetFundingTxidStr() h, err := chainhash.NewHashFromStr(s) if err != nil { return err } txidHash = h[:] } txid, err := chainhash.NewHash(txidHash) if err != nil { return err } index := channelPoint.OutputIndex printJSON(struct { ChannelPoint string `json:"channel_point"` }{ ChannelPoint: fmt.Sprintf("%v:%v", txid, index), }) return nil } // printChanPending prints the funding transaction ID of the channel pending // message. func printChanPending(update *lnrpc.OpenStatusUpdate_ChanPending) error { txid, err := chainhash.NewHash(update.ChanPending.Txid) if err != nil { return err } printJSON(struct { FundingTxid string `json:"funding_txid"` }{ FundingTxid: txid.String(), }) return nil } // readLine reads a line from standard in but does not block in case of a // system interrupt like syscall.SIGINT (Ctrl+C). func readLine(quit chan struct{}) (string, error) { msg := make(chan string, 1) // In a normal console, reading from stdin won't signal EOF when the // user presses Ctrl+C. That's why we need to put this in a separate // goroutine so it doesn't block. go func() { for { var str string _, _ = fmt.Scan(&str) msg <- str return } }() for { select { case <-quit: return "", io.EOF case str := <-msg: return str, nil } } } // checkPsbtFlags make sure a request to open a channel doesn't set any // parameters that are incompatible with the PSBT funding flow. func checkPsbtFlags(req *lnrpc.OpenChannelRequest) error { if req.MinConfs != defaultUtxoMinConf || req.SpendUnconfirmed { return fmt.Errorf("specifying minimum confirmations for PSBT " + "funding is not supported") } if req.TargetConf != 0 || req.SatPerByte != 0 { return fmt.Errorf("setting fee estimation parameters not " + "supported for PSBT funding") } return nil } // sendFundingState sends a single funding state step message by using a new // client connection. This is necessary if the whole funding flow takes longer // than the default macaroon timeout, then we cannot use a single client // connection. func sendFundingState(cancelCtx context.Context, cliCtx *cli.Context, msg *lnrpc.FundingTransitionMsg) error { client, cleanUp := getClient(cliCtx) defer cleanUp() _, err := client.FundingStateStep(cancelCtx, msg) return err } // finalizeMsgFromString creates the final message for the PsbtFinalize step // from either a hex encoded raw wire transaction or a base64 encoded PSBT // packet. func finalizeMsgFromString(tx string, pendingChanID []byte) (*lnrpc.FundingTransitionMsg_PsbtFinalize, error) { rawTx, err := hex.DecodeString(strings.TrimSpace(tx)) if err == nil { // Hex decoding succeeded so we assume we have a raw wire format // transaction. Let's submit that instead of a PSBT packet. tx := &wire.MsgTx{} err := tx.Deserialize(bytes.NewReader(rawTx)) if err != nil { return nil, fmt.Errorf("deserializing as raw wire "+ "transaction failed: %v", err) } return &lnrpc.FundingTransitionMsg_PsbtFinalize{ PsbtFinalize: &lnrpc.FundingPsbtFinalize{ FinalRawTx: rawTx, PendingChanId: pendingChanID, }, }, nil } // If the string isn't a hex encoded transaction, we assume it must be // a base64 encoded PSBT packet. psbtBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(tx)) if err != nil { return nil, fmt.Errorf("base64 decode failed: %v", err) } return &lnrpc.FundingTransitionMsg_PsbtFinalize{ PsbtFinalize: &lnrpc.FundingPsbtFinalize{ SignedPsbt: psbtBytes, PendingChanId: pendingChanID, }, }, nil }
// Package gmrtest provides a sample application to experiment with gangliamr. package main import ( "flag" "fmt" "math/rand" "net/http" "os" "runtime" "time" "github.com/4eek/gofaker/lorem" "github.com/daaku/go.ganglia/gmetric" "github.com/daaku/go.gangliamr" "github.com/daaku/go.metrics" ) type Server struct { MaxSentences int MaxSleep time.Duration ConcurrentRequests metrics.Counter NumRequests metrics.Meter ResponseTime metrics.Timer PageSize metrics.Histogram } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer s.ResponseTime.Start().Stop() defer s.ConcurrentRequests.Inc(1).Dec(1) s.NumRequests.Mark(1) time.Sleep(time.Duration(rand.Int63n(int64(s.MaxSleep)))) bd := lorem.Sentences(rand.Intn(s.MaxSentences)) s.PageSize.Update(int64(len(bd))) fmt.Fprint(w, bd, "\n") } func main() { server := &Server{ ConcurrentRequests: &gangliamr.Counter{ Name: "concurrent_requests", Title: "Number of concurrent requests", Units: "requests", Groups: []string{"gmrtest"}, }, NumRequests: &gangliamr.Meter{ Name: "num_requests", Title: "Number of requests", Units: "requests", Groups: []string{"gmrtest"}, }, ResponseTime: &gangliamr.Timer{ Name: "num_requests", Resolution: time.Millisecond, Title: "Response time", Groups: []string{"gmrtest"}, }, PageSize: &gangliamr.Histogram{ Name: "page_size", Title: "Page size", Units: "bytes", Groups: []string{"gmrtest"}, }, } client := gmetric.ClientFlag("ganglia") registry := &gangliamr.Registry{ Prefix: "gmrtest", WriteTickDuration: 20 * time.Second, Client: client, } addr := flag.String("addr", "0.0.0.0:8077", "server address") gomaxprocs := flag.Int("gomaxprocs", runtime.NumCPU(), "gomaxprocs") flag.DurationVar(&server.MaxSleep, "max-sleep", time.Second*5, "max sleep") flag.IntVar(&server.MaxSentences, "max-sentences", 500, "max sentences") flag.Parse() runtime.GOMAXPROCS(*gomaxprocs) if err := client.Open(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } registry.Register(server.ConcurrentRequests) registry.Register(server.NumRequests) registry.Register(server.ResponseTime) registry.Register(server.PageSize) fmt.Printf("Serving on http://%s/\n", *addr) if err := http.ListenAndServe(*addr, server); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err := client.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } shared groups slice // Package gmrtest provides a sample application to experiment with gangliamr. package main import ( "flag" "fmt" "math/rand" "net/http" "os" "runtime" "time" "github.com/4eek/gofaker/lorem" "github.com/daaku/go.ganglia/gmetric" "github.com/daaku/go.gangliamr" "github.com/daaku/go.metrics" ) type Server struct { MaxSentences int MaxSleep time.Duration ConcurrentRequests metrics.Counter NumRequests metrics.Meter ResponseTime metrics.Timer PageSize metrics.Histogram } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer s.ResponseTime.Start().Stop() defer s.ConcurrentRequests.Inc(1).Dec(1) s.NumRequests.Mark(1) time.Sleep(time.Duration(rand.Int63n(int64(s.MaxSleep)))) bd := lorem.Sentences(rand.Intn(s.MaxSentences)) s.PageSize.Update(int64(len(bd))) fmt.Fprint(w, bd, "\n") } func main() { gmrgroups := []string{"gmrtest"} server := &Server{ ConcurrentRequests: &gangliamr.Counter{ Name: "concurrent_requests", Title: "Number of concurrent requests", Units: "requests", Groups: gmrgroups, }, NumRequests: &gangliamr.Meter{ Name: "num_requests", Title: "Number of requests", Units: "requests", Groups: gmrgroups, }, ResponseTime: &gangliamr.Timer{ Name: "num_requests", Resolution: time.Millisecond, Title: "Response time", Groups: gmrgroups, }, PageSize: &gangliamr.Histogram{ Name: "page_size", Title: "Page size", Units: "bytes", Groups: gmrgroups, }, } client := gmetric.ClientFlag("ganglia") registry := &gangliamr.Registry{ Prefix: "gmrtest", WriteTickDuration: 20 * time.Second, Client: client, } addr := flag.String("addr", "0.0.0.0:8077", "server address") gomaxprocs := flag.Int("gomaxprocs", runtime.NumCPU(), "gomaxprocs") flag.DurationVar(&server.MaxSleep, "max-sleep", time.Second*5, "max sleep") flag.IntVar(&server.MaxSentences, "max-sentences", 500, "max sentences") flag.Parse() runtime.GOMAXPROCS(*gomaxprocs) if err := client.Open(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } registry.Register(server.ConcurrentRequests) registry.Register(server.NumRequests) registry.Register(server.ResponseTime) registry.Register(server.PageSize) fmt.Printf("Serving on http://%s/\n", *addr) if err := http.ListenAndServe(*addr, server); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err := client.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "io" "net" "reflect" "time" "github.com/google/gapid/core/context/keys" "github.com/google/gapid/core/event/task" "github.com/google/gapid/core/java/jdbg" "github.com/google/gapid/core/java/jdwp" "github.com/google/gapid/core/log" "github.com/google/gapid/core/os/android/adb" "github.com/google/gapid/core/os/device" "github.com/google/gapid/gapidapk" ) func expect(r io.Reader, expected []byte) error { got := make([]byte, len(expected)) if _, err := io.ReadFull(r, got); err != nil { return err } if !reflect.DeepEqual(expected, got) { return fmt.Errorf("Expected %v, got %v", expected, got) } return nil } // waitForOnCreate waits for android.app.Application.onCreate to be called, and // then suspends the thread. func waitForOnCreate(ctx context.Context, conn *jdwp.Connection, wakeup jdwp.ThreadID) (*jdwp.EventMethodEntry, error) { app, err := conn.GetClassBySignature("Landroid/app/Application;") if err != nil { return nil, err } onCreate, err := conn.GetClassMethod(app.ClassID(), "onCreate", "()V") if err != nil { return nil, err } return conn.WaitForMethodEntry(ctx, app.ClassID(), onCreate.ID, wakeup) } // waitForVulkanLoad for android.app.ApplicationLoaders.getClassLoader to be called, // and then suspends the thread. // This function is what is used to tell the vulkan loader where to search for // layers. func waitForVulkanLoad(ctx context.Context, conn *jdwp.Connection) (*jdwp.EventMethodEntry, error) { loaders, err := conn.GetClassBySignature("Landroid/app/ApplicationLoaders;") if err != nil { return nil, err } getClassLoader, err := conn.GetClassMethod(loaders.ClassID(), "getClassLoader", "(Ljava/lang/String;IZLjava/lang/String;Ljava/lang/String;Ljava/lang/ClassLoader;)Ljava/lang/ClassLoader;") if err != nil { return nil, err } return conn.WaitForMethodEntry(ctx, loaders.ClassID(), getClassLoader.ID, 0) } // loadAndConnectViaJDWP connects to the application waiting for a JDWP // connection with the specified process id, sends a number of JDWP commands to // load the list of libraries. func (p *Process) loadAndConnectViaJDWP( ctx context.Context, gapidAPK *gapidapk.APK, pid int, d adb.Device) error { const ( reconnectAttempts = 10 reconnectDelay = time.Second ) jdwpPort, err := adb.LocalFreeTCPPort() if err != nil { return log.Err(ctx, err, "Finding free port") } ctx = log.V{"jdwpPort": jdwpPort}.Bind(ctx) log.I(ctx, "Forwarding TCP port %v -> JDWP pid %v", jdwpPort, pid) if err := d.Forward(ctx, adb.TCPPort(jdwpPort), adb.Jdwp(pid)); err != nil { return log.Err(ctx, err, "Setting up JDWP port forwarding") } defer func() { // Clone context to ignore cancellation. ctx := keys.Clone(context.Background(), ctx) d.RemoveForward(ctx, adb.TCPPort(jdwpPort)) }() ctx, stop := task.WithCancel(ctx) defer stop() log.I(ctx, "Connecting to JDWP") // Create a JDWP connection with the application. var sock net.Conn var conn *jdwp.Connection err = task.Retry(ctx, reconnectAttempts, reconnectDelay, func(ctx context.Context) (bool, error) { if sock, err = net.Dial("tcp", fmt.Sprintf("localhost:%v", jdwpPort)); err != nil { return false, err } if conn, err = jdwp.Open(ctx, sock); err != nil { sock.Close() return false, err } return true, nil }) if err != nil { return log.Err(ctx, err, "Connecting to JDWP") } defer sock.Close() processABI := func(j *jdbg.JDbg) (*device.ABI, error) { abiName := j.Class("android.os.Build").Field("CPU_ABI").Get().(string) abi := device.ABIByName(abiName) if abi == nil { return nil, fmt.Errorf("Unknown ABI %v", abiName) } // For NativeBridge emulated devices opt for the native ABI of the // emulator. abi = d.NativeBridgeABI(ctx, abi) return abi, nil } classLoaderThread := jdwp.ThreadID(0) log.I(ctx, "Waiting for ApplicationLoaders.getClassLoader()") getClassLoader, err := waitForVulkanLoad(ctx, conn) if err == nil { // If err != nil that means we could not find or break in getClassLoader // so we have no vulkan support. classLoaderThread = getClassLoader.Thread err = jdbg.Do(conn, getClassLoader.Thread, func(j *jdbg.JDbg) error { abi, err := processABI(j) if err != nil { return err } libsPath := gapidAPK.LibsPath(abi) newLibraryPath := j.String(":" + libsPath) obj := j.GetStackObject("librarySearchPath").Call("concat", newLibraryPath) j.SetStackObject("librarySearchPath", obj) return nil }) if err != nil { return log.Err(ctx, err, "JDWP failure") } } else { log.W(ctx, "Couldn't break in ApplicationLoaders.getClassLoader. Vulkan will not be supported.") } // Wait for Application.onCreate to be called. log.I(ctx, "Waiting for Application.onCreate()") onCreate, err := waitForOnCreate(ctx, conn, classLoaderThread) if err != nil { return log.Err(ctx, err, "Waiting for Application.OnCreate") } // Attempt to get the GVR library handle. // Will throw an exception for non-GVR apps. var gvrHandle uint64 log.I(ctx, "Installing interceptor libraries") loadNativeGvrLibrary, vrCoreLibraryLoader := "loadNativeGvrLibrary", "com/google/vr/cardboard/VrCoreLibraryLoader" gvrMajor, gvrMinor, gvrPoint := 1, 8, 1 getGVRHandle := func(j *jdbg.JDbg, libLoader jdbg.Type) error { // loadNativeGvrLibrary has a couple of different signatures depending // on GVR release. for _, f := range []func() error{ // loadNativeGvrLibrary(Context, int major, int minor, int point) func() error { gvrHandle = (uint64)(libLoader.Call(loadNativeGvrLibrary, j.This(), gvrMajor, gvrMinor, gvrPoint).Get().(int64)) return nil }, // loadNativeGvrLibrary(Context) func() error { gvrHandle = (uint64)(libLoader.Call(loadNativeGvrLibrary, j.This()).Get().(int64)) return nil }, } { if jdbg.Try(f) == nil { return nil } } return fmt.Errorf("Couldn't call loadNativeGvrLibrary") } for _, f := range []func(j *jdbg.JDbg) error{ func(j *jdbg.JDbg) error { libLoader := j.Class(vrCoreLibraryLoader) getGVRHandle(j, libLoader) return nil }, func(j *jdbg.JDbg) error { classLoader := j.This().Call("getClassLoader") libLoader := classLoader.Call("findClass", vrCoreLibraryLoader).AsType() getGVRHandle(j, libLoader) return nil }, } { if err := jdbg.Do(conn, onCreate.Thread, f); err == nil { break } } if gvrHandle == 0 { log.I(ctx, "GVR library not found") } else { log.I(ctx, "GVR library found") } // Connect to GAPII. // This has to be done on a separate go-routine as the call to load gapii // will block until a connection is made. connErr := make(chan error) // Load GAPII library. err = jdbg.Do(conn, onCreate.Thread, func(j *jdbg.JDbg) error { abi, err := processABI(j) if err != nil { return err } interceptorPath := gapidAPK.LibInterceptorPath(abi) go func() { connErr <- p.connect(ctx, gvrHandle, interceptorPath) }() gapiiPath := gapidAPK.LibGAPIIPath(abi) ctx = log.V{"gapii.so": gapiiPath, "process abi": abi.Name}.Bind(ctx) // Load the library. log.D(ctx, "Loading GAPII library...") // Work around for loading libraries in the N previews. See b/29441142. j.Class("java.lang.Runtime").Call("getRuntime").Call("doLoad", gapiiPath, nil) log.D(ctx, "Library loaded") return nil }) if err != nil { return log.Err(ctx, err, "loadGAPII") } return <-connErr } gapii/client: Warn if AS is preventing tracing Issue: #911 // Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "io" "net" "reflect" "time" "github.com/google/gapid/core/context/keys" "github.com/google/gapid/core/event/task" "github.com/google/gapid/core/java/jdbg" "github.com/google/gapid/core/java/jdwp" "github.com/google/gapid/core/log" "github.com/google/gapid/core/os/android/adb" "github.com/google/gapid/core/os/device" "github.com/google/gapid/gapidapk" ) func expect(r io.Reader, expected []byte) error { got := make([]byte, len(expected)) if _, err := io.ReadFull(r, got); err != nil { return err } if !reflect.DeepEqual(expected, got) { return fmt.Errorf("Expected %v, got %v", expected, got) } return nil } // waitForOnCreate waits for android.app.Application.onCreate to be called, and // then suspends the thread. func waitForOnCreate(ctx context.Context, conn *jdwp.Connection, wakeup jdwp.ThreadID) (*jdwp.EventMethodEntry, error) { app, err := conn.GetClassBySignature("Landroid/app/Application;") if err != nil { return nil, err } onCreate, err := conn.GetClassMethod(app.ClassID(), "onCreate", "()V") if err != nil { return nil, err } return conn.WaitForMethodEntry(ctx, app.ClassID(), onCreate.ID, wakeup) } // waitForVulkanLoad for android.app.ApplicationLoaders.getClassLoader to be called, // and then suspends the thread. // This function is what is used to tell the vulkan loader where to search for // layers. func waitForVulkanLoad(ctx context.Context, conn *jdwp.Connection) (*jdwp.EventMethodEntry, error) { loaders, err := conn.GetClassBySignature("Landroid/app/ApplicationLoaders;") if err != nil { return nil, err } getClassLoader, err := conn.GetClassMethod(loaders.ClassID(), "getClassLoader", "(Ljava/lang/String;IZLjava/lang/String;Ljava/lang/String;Ljava/lang/ClassLoader;)Ljava/lang/ClassLoader;") if err != nil { return nil, err } return conn.WaitForMethodEntry(ctx, loaders.ClassID(), getClassLoader.ID, 0) } // loadAndConnectViaJDWP connects to the application waiting for a JDWP // connection with the specified process id, sends a number of JDWP commands to // load the list of libraries. func (p *Process) loadAndConnectViaJDWP( ctx context.Context, gapidAPK *gapidapk.APK, pid int, d adb.Device) error { const ( reconnectAttempts = 10 reconnectDelay = time.Second ) jdwpPort, err := adb.LocalFreeTCPPort() if err != nil { return log.Err(ctx, err, "Finding free port") } ctx = log.V{"jdwpPort": jdwpPort}.Bind(ctx) log.I(ctx, "Forwarding TCP port %v -> JDWP pid %v", jdwpPort, pid) if err := d.Forward(ctx, adb.TCPPort(jdwpPort), adb.Jdwp(pid)); err != nil { return log.Err(ctx, err, "Setting up JDWP port forwarding") } defer func() { // Clone context to ignore cancellation. ctx := keys.Clone(context.Background(), ctx) d.RemoveForward(ctx, adb.TCPPort(jdwpPort)) }() ctx, stop := task.WithCancel(ctx) defer stop() log.I(ctx, "Connecting to JDWP") // Create a JDWP connection with the application. var sock net.Conn var conn *jdwp.Connection err = task.Retry(ctx, reconnectAttempts, reconnectDelay, func(ctx context.Context) (bool, error) { if sock, err = net.Dial("tcp", fmt.Sprintf("localhost:%v", jdwpPort)); err != nil { return false, err } if conn, err = jdwp.Open(ctx, sock); err != nil { sock.Close() return false, err } return true, nil }) if err != nil { if err == io.EOF { return fmt.Errorf("Unable to connect to the application.\n\n" + "This can happen when another debugger or IDE is running " + "in the background, such as Android Studio.\n" + "Please close any running Android debuggers and try again.\n\n" + "See https://github.com/google/gapid/issues/911 for more " + "information") } return log.Err(ctx, err, "Connecting to JDWP") } defer sock.Close() processABI := func(j *jdbg.JDbg) (*device.ABI, error) { abiName := j.Class("android.os.Build").Field("CPU_ABI").Get().(string) abi := device.ABIByName(abiName) if abi == nil { return nil, fmt.Errorf("Unknown ABI %v", abiName) } // For NativeBridge emulated devices opt for the native ABI of the // emulator. abi = d.NativeBridgeABI(ctx, abi) return abi, nil } classLoaderThread := jdwp.ThreadID(0) log.I(ctx, "Waiting for ApplicationLoaders.getClassLoader()") getClassLoader, err := waitForVulkanLoad(ctx, conn) if err == nil { // If err != nil that means we could not find or break in getClassLoader // so we have no vulkan support. classLoaderThread = getClassLoader.Thread err = jdbg.Do(conn, getClassLoader.Thread, func(j *jdbg.JDbg) error { abi, err := processABI(j) if err != nil { return err } libsPath := gapidAPK.LibsPath(abi) newLibraryPath := j.String(":" + libsPath) obj := j.GetStackObject("librarySearchPath").Call("concat", newLibraryPath) j.SetStackObject("librarySearchPath", obj) return nil }) if err != nil { return log.Err(ctx, err, "JDWP failure") } } else { log.W(ctx, "Couldn't break in ApplicationLoaders.getClassLoader. Vulkan will not be supported.") } // Wait for Application.onCreate to be called. log.I(ctx, "Waiting for Application.onCreate()") onCreate, err := waitForOnCreate(ctx, conn, classLoaderThread) if err != nil { return log.Err(ctx, err, "Waiting for Application.OnCreate") } // Attempt to get the GVR library handle. // Will throw an exception for non-GVR apps. var gvrHandle uint64 log.I(ctx, "Installing interceptor libraries") loadNativeGvrLibrary, vrCoreLibraryLoader := "loadNativeGvrLibrary", "com/google/vr/cardboard/VrCoreLibraryLoader" gvrMajor, gvrMinor, gvrPoint := 1, 8, 1 getGVRHandle := func(j *jdbg.JDbg, libLoader jdbg.Type) error { // loadNativeGvrLibrary has a couple of different signatures depending // on GVR release. for _, f := range []func() error{ // loadNativeGvrLibrary(Context, int major, int minor, int point) func() error { gvrHandle = (uint64)(libLoader.Call(loadNativeGvrLibrary, j.This(), gvrMajor, gvrMinor, gvrPoint).Get().(int64)) return nil }, // loadNativeGvrLibrary(Context) func() error { gvrHandle = (uint64)(libLoader.Call(loadNativeGvrLibrary, j.This()).Get().(int64)) return nil }, } { if jdbg.Try(f) == nil { return nil } } return fmt.Errorf("Couldn't call loadNativeGvrLibrary") } for _, f := range []func(j *jdbg.JDbg) error{ func(j *jdbg.JDbg) error { libLoader := j.Class(vrCoreLibraryLoader) getGVRHandle(j, libLoader) return nil }, func(j *jdbg.JDbg) error { classLoader := j.This().Call("getClassLoader") libLoader := classLoader.Call("findClass", vrCoreLibraryLoader).AsType() getGVRHandle(j, libLoader) return nil }, } { if err := jdbg.Do(conn, onCreate.Thread, f); err == nil { break } } if gvrHandle == 0 { log.I(ctx, "GVR library not found") } else { log.I(ctx, "GVR library found") } // Connect to GAPII. // This has to be done on a separate go-routine as the call to load gapii // will block until a connection is made. connErr := make(chan error) // Load GAPII library. err = jdbg.Do(conn, onCreate.Thread, func(j *jdbg.JDbg) error { abi, err := processABI(j) if err != nil { return err } interceptorPath := gapidAPK.LibInterceptorPath(abi) go func() { connErr <- p.connect(ctx, gvrHandle, interceptorPath) }() gapiiPath := gapidAPK.LibGAPIIPath(abi) ctx = log.V{"gapii.so": gapiiPath, "process abi": abi.Name}.Bind(ctx) // Load the library. log.D(ctx, "Loading GAPII library...") // Work around for loading libraries in the N previews. See b/29441142. j.Class("java.lang.Runtime").Call("getRuntime").Call("doLoad", gapiiPath, nil) log.D(ctx, "Library loaded") return nil }) if err != nil { return log.Err(ctx, err, "loadGAPII") } return <-connErr }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gles import ( "context" "fmt" "github.com/google/gapid/core/image" "github.com/google/gapid/core/log" "github.com/google/gapid/gapis/api" "github.com/google/gapid/gapis/capture" "github.com/google/gapid/gapis/messages" "github.com/google/gapid/gapis/resolve" "github.com/google/gapid/gapis/service" "github.com/google/gapid/gapis/service/box" "github.com/google/gapid/gapis/service/path" ) // IsResource returns true if this instance should be considered as a resource. func (t *Texture) IsResource() bool { return t.ID != 0 } // ResourceHandle returns the UI identity for the resource. func (t *Texture) ResourceHandle() string { return fmt.Sprintf("Texture<%d>", t.ID) } // ResourceLabel returns an optional debug label for the resource. func (t *Texture) ResourceLabel() string { return t.Label } // Order returns an integer used to sort the resources for presentation. func (t *Texture) Order() uint64 { return uint64(t.ID) } // ResourceType returns the type of this resource. func (t *Texture) ResourceType(ctx context.Context) api.ResourceType { return api.ResourceType_TextureResource } // ResourceData returns the resource data given the current state. func (t *Texture) ResourceData(ctx context.Context, s *api.State) (*api.ResourceData, error) { ctx = log.Enter(ctx, "Texture.ResourceData()") switch t.Kind { case GLenum_GL_TEXTURE_2D: levels := make([]*image.Info, len(t.Levels)) for i, level := range t.Levels { img := level.Layers[0] if img.Data.count == 0 { // TODO: Make other results available return nil, &service.ErrDataUnavailable{Reason: messages.ErrNoTextureData(t.ResourceHandle())} } dataFormat, dataType := img.getUnsizedFormatAndType() format, err := getImageFormat(dataFormat, dataType) if err != nil { return nil, err } levels[i] = &image.Info{ Format: format, Width: uint32(img.Width), Height: uint32(img.Height), Depth: 1, Bytes: image.NewID(img.Data.ResourceID(ctx, s)), } } return api.NewResourceData(api.NewTexture(&api.Texture2D{Levels: levels})), nil case GLenum_GL_TEXTURE_CUBE_MAP: levels := make([]*api.CubemapLevel, len(t.Levels)) for i, level := range t.Levels { levels[i] = &api.CubemapLevel{} for j, face := range level.Layers { if face.Data.count == 0 { // TODO: Make other results available return nil, &service.ErrDataUnavailable{Reason: messages.ErrNoTextureData(t.ResourceHandle())} } dataFormat, dataType := face.getUnsizedFormatAndType() format, err := getImageFormat(dataFormat, dataType) if err != nil { return nil, err } img := &image.Info{ Format: format, Width: uint32(face.Width), Height: uint32(face.Height), Depth: 1, Bytes: image.NewID(face.Data.ResourceID(ctx, s)), } switch GLenum(j) + GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_X { case GLenum_GL_TEXTURE_CUBE_MAP_NEGATIVE_X: levels[i].NegativeX = img case GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_X: levels[i].PositiveX = img case GLenum_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y: levels[i].NegativeY = img case GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_Y: levels[i].PositiveY = img case GLenum_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z: levels[i].NegativeZ = img case GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_Z: levels[i].PositiveZ = img } } } return api.NewResourceData(api.NewTexture(&api.Cubemap{Levels: levels})), nil default: return nil, &service.ErrDataUnavailable{Reason: messages.ErrNoTextureData(t.ResourceHandle())} } } func (t *Texture) SetResourceData(ctx context.Context, at *path.Command, data *api.ResourceData, resources api.ResourceMap, edits api.ReplaceCallback) error { return fmt.Errorf("SetResourceData is not supported for Texture") } // IsResource returns true if this instance should be considered as a resource. func (s *Shader) IsResource() bool { return s.ID != 0 } // ResourceHandle returns the UI identity for the resource. func (s *Shader) ResourceHandle() string { return fmt.Sprintf("Shader<%d>", s.ID) } // ResourceLabel returns an optional debug label for the resource. func (s *Shader) ResourceLabel() string { return s.Label } // Order returns an integer used to sort the resources for presentation. func (s *Shader) Order() uint64 { return uint64(s.ID) } // ResourceType returns the type of this resource. func (s *Shader) ResourceType(ctx context.Context) api.ResourceType { return api.ResourceType_ShaderResource } // ResourceData returns the resource data given the current state. func (s *Shader) ResourceData(ctx context.Context, t *api.State) (*api.ResourceData, error) { ctx = log.Enter(ctx, "Shader.ResourceData()") var ty api.ShaderType switch s.ShaderType { case GLenum_GL_VERTEX_SHADER: ty = api.ShaderType_Vertex case GLenum_GL_GEOMETRY_SHADER: ty = api.ShaderType_Geometry case GLenum_GL_TESS_CONTROL_SHADER: ty = api.ShaderType_TessControl case GLenum_GL_TESS_EVALUATION_SHADER: ty = api.ShaderType_TessEvaluation case GLenum_GL_FRAGMENT_SHADER: ty = api.ShaderType_Fragment case GLenum_GL_COMPUTE_SHADER: ty = api.ShaderType_Compute } return api.NewResourceData(&api.Shader{Type: ty, Source: s.Source}), nil } func (shader *Shader) SetResourceData( ctx context.Context, at *path.Command, data *api.ResourceData, resourceIDs api.ResourceMap, edits api.ReplaceCallback) error { atomIdx := at.Indices[0] if len(at.Indices) > 1 { return fmt.Errorf("Subcommands currently not supported for GLES resources") // TODO: Subcommands } // Dirty. TODO: Make separate type for getting info for a single resource. capturePath := at.Capture resources, err := resolve.Resources(ctx, capturePath) if err != nil { return err } resourceID := resourceIDs[shader] resource := resources.Find(shader.ResourceType(ctx), resourceID) if resource == nil { return fmt.Errorf("Couldn't find resource") } c, err := capture.ResolveFromPath(ctx, capturePath) if err != nil { return err } index := len(resource.Accesses) - 1 for resource.Accesses[index].Indices[0] > atomIdx && index >= 0 { // TODO: Subcommands index-- } for j := index; j >= 0; j-- { i := resource.Accesses[j].Indices[0] // TODO: Subcommands if a, ok := c.Commands[i].(*GlShaderSource); ok { edits(uint64(i), a.Replace(ctx, c, data)) return nil } } return fmt.Errorf("No command to set data in") } func (a *GlShaderSource) Replace(ctx context.Context, c *capture.Capture, data *api.ResourceData) interface{} { state := c.NewState() shader := data.GetShader() source := shader.Source src := state.AllocDataOrPanic(ctx, source) srcLen := state.AllocDataOrPanic(ctx, GLint(len(source))) srcPtr := state.AllocDataOrPanic(ctx, src.Ptr()) cb := CommandBuilder{Thread: a.thread} return cb.GlShaderSource(a.Shader, 1, srcPtr.Ptr(), srcLen.Ptr()). AddRead(srcPtr.Data()). AddRead(srcLen.Data()). AddRead(src.Data()) } // IsResource returns true if this instance should be considered as a resource. func (p *Program) IsResource() bool { return p.ID != 0 } // ResourceHandle returns the UI identity for the resource. func (p *Program) ResourceHandle() string { return fmt.Sprintf("Program<%d>", p.ID) } // ResourceLabel returns an optional debug label for the resource. func (p *Program) ResourceLabel() string { return p.Label } // Order returns an integer used to sort the resources for presentation. func (p *Program) Order() uint64 { return uint64(p.ID) } // ResourceType returns the type of this resource. func (p *Program) ResourceType(ctx context.Context) api.ResourceType { return api.ResourceType_ProgramResource } // ResourceData returns the resource data given the current state. func (p *Program) ResourceData(ctx context.Context, s *api.State) (*api.ResourceData, error) { ctx = log.Enter(ctx, "Program.ResourceData()") shaders := make([]*api.Shader, 0, len(p.Shaders)) for shaderType, shader := range p.Shaders { var ty api.ShaderType switch shaderType { case GLenum_GL_VERTEX_SHADER: ty = api.ShaderType_Vertex case GLenum_GL_GEOMETRY_SHADER: ty = api.ShaderType_Geometry case GLenum_GL_TESS_CONTROL_SHADER: ty = api.ShaderType_TessControl case GLenum_GL_TESS_EVALUATION_SHADER: ty = api.ShaderType_TessEvaluation case GLenum_GL_FRAGMENT_SHADER: ty = api.ShaderType_Fragment case GLenum_GL_COMPUTE_SHADER: ty = api.ShaderType_Compute } shaders = append(shaders, &api.Shader{ Type: ty, Source: shader.Source, }) } uniforms := make([]*api.Uniform, 0, len(p.ActiveUniforms)) for _, activeUniform := range p.ActiveUniforms { uniform := p.Uniforms[activeUniform.Location] var uniformFormat api.UniformFormat var uniformType api.UniformType switch activeUniform.Type { case GLenum_GL_FLOAT: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Float case GLenum_GL_FLOAT_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Float case GLenum_GL_INT: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Int32 case GLenum_GL_INT_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Int32 case GLenum_GL_INT_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Int32 case GLenum_GL_INT_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Int32 case GLenum_GL_UNSIGNED_INT: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Uint32 case GLenum_GL_BOOL: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Bool case GLenum_GL_BOOL_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Bool case GLenum_GL_BOOL_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Bool case GLenum_GL_BOOL_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Bool case GLenum_GL_FLOAT_MAT2: uniformFormat = api.UniformFormat_Mat2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT3: uniformFormat = api.UniformFormat_Mat3 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT4: uniformFormat = api.UniformFormat_Mat4 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT2x3: uniformFormat = api.UniformFormat_Mat2x3 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT2x4: uniformFormat = api.UniformFormat_Mat2x4 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT3x2: uniformFormat = api.UniformFormat_Mat3x2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT3x4: uniformFormat = api.UniformFormat_Mat3x4 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT4x2: uniformFormat = api.UniformFormat_Mat4x2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT4x3: uniformFormat = api.UniformFormat_Mat4x3 uniformType = api.UniformType_Float case GLenum_GL_SAMPLER_2D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_3D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_CUBE: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_2D_SHADOW: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_2D_ARRAY: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_2D_ARRAY_SHADOW: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_CUBE_SHADOW: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_2D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_3D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_CUBE: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_2D_ARRAY: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_2D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_3D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_CUBE: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 default: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Float } uniforms = append(uniforms, &api.Uniform{ UniformLocation: uint32(activeUniform.Location), Name: activeUniform.Name, Format: uniformFormat, Type: uniformType, Value: box.NewValue(uniformValue(ctx, s, uniformType, uniform.Value)), }) } return api.NewResourceData(&api.Program{Shaders: shaders, Uniforms: uniforms}), nil } func uniformValue(ctx context.Context, s *api.State, kind api.UniformType, data U8ˢ) interface{} { r := data.Reader(ctx, s) switch kind { case api.UniformType_Int32: a := make([]int32, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Int32() } return a case api.UniformType_Uint32: a := make([]uint32, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Uint32() } return a case api.UniformType_Bool: a := make([]bool, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Int32() != 0 } return a case api.UniformType_Float: a := make([]float32, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Float32() } return a case api.UniformType_Double: a := make([]float64, data.count/8) for i := 0; i < len(a); i++ { a[i] = r.Float64() } return a default: panic(fmt.Errorf("Can't box uniform data type %v", kind)) } } func (program *Program) SetResourceData(ctx context.Context, at *path.Command, data *api.ResourceData, resources api.ResourceMap, edits api.ReplaceCallback) error { return fmt.Errorf("SetResourceData is not supported for Program") } gles: Show partially updated cubemaps // Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gles import ( "context" "fmt" "github.com/google/gapid/core/image" "github.com/google/gapid/core/log" "github.com/google/gapid/gapis/api" "github.com/google/gapid/gapis/capture" "github.com/google/gapid/gapis/messages" "github.com/google/gapid/gapis/resolve" "github.com/google/gapid/gapis/service" "github.com/google/gapid/gapis/service/box" "github.com/google/gapid/gapis/service/path" ) // IsResource returns true if this instance should be considered as a resource. func (t *Texture) IsResource() bool { return t.ID != 0 } // ResourceHandle returns the UI identity for the resource. func (t *Texture) ResourceHandle() string { return fmt.Sprintf("Texture<%d>", t.ID) } // ResourceLabel returns an optional debug label for the resource. func (t *Texture) ResourceLabel() string { return t.Label } // Order returns an integer used to sort the resources for presentation. func (t *Texture) Order() uint64 { return uint64(t.ID) } // ResourceType returns the type of this resource. func (t *Texture) ResourceType(ctx context.Context) api.ResourceType { return api.ResourceType_TextureResource } // ResourceData returns the resource data given the current state. func (t *Texture) ResourceData(ctx context.Context, s *api.State) (*api.ResourceData, error) { ctx = log.Enter(ctx, "Texture.ResourceData()") switch t.Kind { case GLenum_GL_TEXTURE_2D: levels := make([]*image.Info, len(t.Levels)) for i, level := range t.Levels { img := level.Layers[0] if img.Data.count == 0 { // TODO: Make other results available return nil, &service.ErrDataUnavailable{Reason: messages.ErrNoTextureData(t.ResourceHandle())} } dataFormat, dataType := img.getUnsizedFormatAndType() format, err := getImageFormat(dataFormat, dataType) if err != nil { return nil, err } levels[i] = &image.Info{ Format: format, Width: uint32(img.Width), Height: uint32(img.Height), Depth: 1, Bytes: image.NewID(img.Data.ResourceID(ctx, s)), } } return api.NewResourceData(api.NewTexture(&api.Texture2D{Levels: levels})), nil case GLenum_GL_TEXTURE_CUBE_MAP: levels := make([]*api.CubemapLevel, len(t.Levels)) anyData := false for i, level := range t.Levels { levels[i] = &api.CubemapLevel{} for j, face := range level.Layers { if face.Data.count == 0 { continue } dataFormat, dataType := face.getUnsizedFormatAndType() format, err := getImageFormat(dataFormat, dataType) if err != nil { return nil, err } img := &image.Info{ Format: format, Width: uint32(face.Width), Height: uint32(face.Height), Depth: 1, Bytes: image.NewID(face.Data.ResourceID(ctx, s)), } switch GLenum(j) + GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_X { case GLenum_GL_TEXTURE_CUBE_MAP_NEGATIVE_X: levels[i].NegativeX = img case GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_X: levels[i].PositiveX = img case GLenum_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y: levels[i].NegativeY = img case GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_Y: levels[i].PositiveY = img case GLenum_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z: levels[i].NegativeZ = img case GLenum_GL_TEXTURE_CUBE_MAP_POSITIVE_Z: levels[i].PositiveZ = img } anyData = true } } if !anyData { return nil, &service.ErrDataUnavailable{Reason: messages.ErrNoTextureData(t.ResourceHandle())} } return api.NewResourceData(api.NewTexture(&api.Cubemap{Levels: levels})), nil default: return nil, &service.ErrDataUnavailable{Reason: messages.ErrNoTextureData(t.ResourceHandle())} } } func (t *Texture) SetResourceData(ctx context.Context, at *path.Command, data *api.ResourceData, resources api.ResourceMap, edits api.ReplaceCallback) error { return fmt.Errorf("SetResourceData is not supported for Texture") } // IsResource returns true if this instance should be considered as a resource. func (s *Shader) IsResource() bool { return s.ID != 0 } // ResourceHandle returns the UI identity for the resource. func (s *Shader) ResourceHandle() string { return fmt.Sprintf("Shader<%d>", s.ID) } // ResourceLabel returns an optional debug label for the resource. func (s *Shader) ResourceLabel() string { return s.Label } // Order returns an integer used to sort the resources for presentation. func (s *Shader) Order() uint64 { return uint64(s.ID) } // ResourceType returns the type of this resource. func (s *Shader) ResourceType(ctx context.Context) api.ResourceType { return api.ResourceType_ShaderResource } // ResourceData returns the resource data given the current state. func (s *Shader) ResourceData(ctx context.Context, t *api.State) (*api.ResourceData, error) { ctx = log.Enter(ctx, "Shader.ResourceData()") var ty api.ShaderType switch s.ShaderType { case GLenum_GL_VERTEX_SHADER: ty = api.ShaderType_Vertex case GLenum_GL_GEOMETRY_SHADER: ty = api.ShaderType_Geometry case GLenum_GL_TESS_CONTROL_SHADER: ty = api.ShaderType_TessControl case GLenum_GL_TESS_EVALUATION_SHADER: ty = api.ShaderType_TessEvaluation case GLenum_GL_FRAGMENT_SHADER: ty = api.ShaderType_Fragment case GLenum_GL_COMPUTE_SHADER: ty = api.ShaderType_Compute } return api.NewResourceData(&api.Shader{Type: ty, Source: s.Source}), nil } func (shader *Shader) SetResourceData( ctx context.Context, at *path.Command, data *api.ResourceData, resourceIDs api.ResourceMap, edits api.ReplaceCallback) error { atomIdx := at.Indices[0] if len(at.Indices) > 1 { return fmt.Errorf("Subcommands currently not supported for GLES resources") // TODO: Subcommands } // Dirty. TODO: Make separate type for getting info for a single resource. capturePath := at.Capture resources, err := resolve.Resources(ctx, capturePath) if err != nil { return err } resourceID := resourceIDs[shader] resource := resources.Find(shader.ResourceType(ctx), resourceID) if resource == nil { return fmt.Errorf("Couldn't find resource") } c, err := capture.ResolveFromPath(ctx, capturePath) if err != nil { return err } index := len(resource.Accesses) - 1 for resource.Accesses[index].Indices[0] > atomIdx && index >= 0 { // TODO: Subcommands index-- } for j := index; j >= 0; j-- { i := resource.Accesses[j].Indices[0] // TODO: Subcommands if a, ok := c.Commands[i].(*GlShaderSource); ok { edits(uint64(i), a.Replace(ctx, c, data)) return nil } } return fmt.Errorf("No command to set data in") } func (a *GlShaderSource) Replace(ctx context.Context, c *capture.Capture, data *api.ResourceData) interface{} { state := c.NewState() shader := data.GetShader() source := shader.Source src := state.AllocDataOrPanic(ctx, source) srcLen := state.AllocDataOrPanic(ctx, GLint(len(source))) srcPtr := state.AllocDataOrPanic(ctx, src.Ptr()) cb := CommandBuilder{Thread: a.thread} return cb.GlShaderSource(a.Shader, 1, srcPtr.Ptr(), srcLen.Ptr()). AddRead(srcPtr.Data()). AddRead(srcLen.Data()). AddRead(src.Data()) } // IsResource returns true if this instance should be considered as a resource. func (p *Program) IsResource() bool { return p.ID != 0 } // ResourceHandle returns the UI identity for the resource. func (p *Program) ResourceHandle() string { return fmt.Sprintf("Program<%d>", p.ID) } // ResourceLabel returns an optional debug label for the resource. func (p *Program) ResourceLabel() string { return p.Label } // Order returns an integer used to sort the resources for presentation. func (p *Program) Order() uint64 { return uint64(p.ID) } // ResourceType returns the type of this resource. func (p *Program) ResourceType(ctx context.Context) api.ResourceType { return api.ResourceType_ProgramResource } // ResourceData returns the resource data given the current state. func (p *Program) ResourceData(ctx context.Context, s *api.State) (*api.ResourceData, error) { ctx = log.Enter(ctx, "Program.ResourceData()") shaders := make([]*api.Shader, 0, len(p.Shaders)) for shaderType, shader := range p.Shaders { var ty api.ShaderType switch shaderType { case GLenum_GL_VERTEX_SHADER: ty = api.ShaderType_Vertex case GLenum_GL_GEOMETRY_SHADER: ty = api.ShaderType_Geometry case GLenum_GL_TESS_CONTROL_SHADER: ty = api.ShaderType_TessControl case GLenum_GL_TESS_EVALUATION_SHADER: ty = api.ShaderType_TessEvaluation case GLenum_GL_FRAGMENT_SHADER: ty = api.ShaderType_Fragment case GLenum_GL_COMPUTE_SHADER: ty = api.ShaderType_Compute } shaders = append(shaders, &api.Shader{ Type: ty, Source: shader.Source, }) } uniforms := make([]*api.Uniform, 0, len(p.ActiveUniforms)) for _, activeUniform := range p.ActiveUniforms { uniform := p.Uniforms[activeUniform.Location] var uniformFormat api.UniformFormat var uniformType api.UniformType switch activeUniform.Type { case GLenum_GL_FLOAT: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Float case GLenum_GL_FLOAT_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Float case GLenum_GL_INT: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Int32 case GLenum_GL_INT_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Int32 case GLenum_GL_INT_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Int32 case GLenum_GL_INT_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Int32 case GLenum_GL_UNSIGNED_INT: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Uint32 case GLenum_GL_BOOL: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Bool case GLenum_GL_BOOL_VEC2: uniformFormat = api.UniformFormat_Vec2 uniformType = api.UniformType_Bool case GLenum_GL_BOOL_VEC3: uniformFormat = api.UniformFormat_Vec3 uniformType = api.UniformType_Bool case GLenum_GL_BOOL_VEC4: uniformFormat = api.UniformFormat_Vec4 uniformType = api.UniformType_Bool case GLenum_GL_FLOAT_MAT2: uniformFormat = api.UniformFormat_Mat2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT3: uniformFormat = api.UniformFormat_Mat3 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT4: uniformFormat = api.UniformFormat_Mat4 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT2x3: uniformFormat = api.UniformFormat_Mat2x3 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT2x4: uniformFormat = api.UniformFormat_Mat2x4 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT3x2: uniformFormat = api.UniformFormat_Mat3x2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT3x4: uniformFormat = api.UniformFormat_Mat3x4 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT4x2: uniformFormat = api.UniformFormat_Mat4x2 uniformType = api.UniformType_Float case GLenum_GL_FLOAT_MAT4x3: uniformFormat = api.UniformFormat_Mat4x3 uniformType = api.UniformType_Float case GLenum_GL_SAMPLER_2D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_3D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_CUBE: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_2D_SHADOW: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_2D_ARRAY: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_2D_ARRAY_SHADOW: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_SAMPLER_CUBE_SHADOW: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_2D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_3D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_CUBE: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_INT_SAMPLER_2D_ARRAY: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_2D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_3D: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_CUBE: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 case GLenum_GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: uniformFormat = api.UniformFormat_Sampler uniformType = api.UniformType_Uint32 default: uniformFormat = api.UniformFormat_Scalar uniformType = api.UniformType_Float } uniforms = append(uniforms, &api.Uniform{ UniformLocation: uint32(activeUniform.Location), Name: activeUniform.Name, Format: uniformFormat, Type: uniformType, Value: box.NewValue(uniformValue(ctx, s, uniformType, uniform.Value)), }) } return api.NewResourceData(&api.Program{Shaders: shaders, Uniforms: uniforms}), nil } func uniformValue(ctx context.Context, s *api.State, kind api.UniformType, data U8ˢ) interface{} { r := data.Reader(ctx, s) switch kind { case api.UniformType_Int32: a := make([]int32, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Int32() } return a case api.UniformType_Uint32: a := make([]uint32, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Uint32() } return a case api.UniformType_Bool: a := make([]bool, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Int32() != 0 } return a case api.UniformType_Float: a := make([]float32, data.count/4) for i := 0; i < len(a); i++ { a[i] = r.Float32() } return a case api.UniformType_Double: a := make([]float64, data.count/8) for i := 0; i < len(a); i++ { a[i] = r.Float64() } return a default: panic(fmt.Errorf("Can't box uniform data type %v", kind)) } } func (program *Program) SetResourceData(ctx context.Context, at *path.Command, data *api.ResourceData, resources api.ResourceMap, edits api.ReplaceCallback) error { return fmt.Errorf("SetResourceData is not supported for Program") }
package libkb // // Code for encoding and decoding SKB-formatted keys. Also works for decoding // general Keybase Packet types, but we only have SKB at present. // // SKB = "Secret Key Bundle", which contains an unencrypted public key and // and encrypted secret key. // import ( "bytes" "encoding/base64" "fmt" "io" "os" "sync" keybase1 "github.com/keybase/client/protocol/go" triplesec "github.com/keybase/go-triplesec" "golang.org/x/crypto/openpgp" ) type SKB struct { Priv SKBPriv `codec:"priv"` Pub []byte `codec:"pub"` Type AlgoType `codec:"type,omitempty"` decodedPub GenericKey decryptedSecret GenericKey decryptedRaw []byte // in case we need to reexport it uid keybase1.UID // UID that the key is for Contextified // TODO(akalin): Remove this in favor of making LKSec // Contextified (see // https://github.com/keybase/client/issues/329 ). newLKSecForTest func(clientHalf []byte) *LKSec sync.Mutex // currently only for uid } type SKBPriv struct { Data []byte `codec:"data"` Encryption int `codec:"encryption"` } func (key *PGPKeyBundle) ToSKB(gc *GlobalContext, tsec *triplesec.Cipher) (ret *SKB, err error) { ret = &SKB{} ret.SetGlobalContext(gc) var pk, sk bytes.Buffer // Need to serialize Private first, because err = (*openpgp.Entity)(key).SerializePrivate(&sk, nil) if err != nil { return } if tsec != nil { ret.Priv.Data, err = tsec.Encrypt(sk.Bytes()) ret.Priv.Encryption = int(triplesec.Version) // Version 3 is the current TripleSec version if err != nil { return } } else { ret.Priv.Data = sk.Bytes() ret.Priv.Encryption = 0 } err = (*openpgp.Entity)(key).Serialize(&pk) if err != nil { return } ret.Pub = pk.Bytes() ret.Type = key.GetAlgoType() return } func (key *PGPKeyBundle) ToLksSKB(lks *LKSec) (ret *SKB, err error) { if lks == nil { return nil, fmt.Errorf("nil lks") } var pk, sk bytes.Buffer err = (*openpgp.Entity)(key).SerializePrivate(&sk, nil) if err != nil { return nil, err } ret = &SKB{} ret.Priv.Data, err = lks.Encrypt(sk.Bytes()) if err != nil { return nil, err } ret.Priv.Encryption = LKSecVersion err = (*openpgp.Entity)(key).Serialize(&pk) if err != nil { return nil, err } ret.Pub = pk.Bytes() ret.Type = key.GetAlgoType() return ret, nil } func (s *SKB) newLKSec(clientHalf []byte, ppGen PassphraseGeneration) *LKSec { if s.newLKSecForTest != nil { return s.newLKSecForTest(clientHalf) } if s.uid.IsNil() { panic("no uid set in skb") } return NewLKSec(clientHalf, ppGen, s.uid, s.G()) } func (s *SKB) ToPacket() (ret *KeybasePacket, err error) { ret = &KeybasePacket{ Version: KeybasePacketV1, Tag: TagP3skb, } ret.Body = s err = ret.HashMe() return } func (s *SKB) ReadKey() (g GenericKey, err error) { switch { case IsPGPAlgo(s.Type) || s.Type == 0: g, err = ReadOneKeyFromBytes(s.Pub) case s.Type == KIDNaclEddsa: g, err = ImportNaclSigningKeyPairFromBytes(s.Pub, nil) case s.Type == KIDNaclDH: g, err = ImportNaclDHKeyPairFromBytes(s.Pub, nil) default: err = UnknownKeyTypeError{s.Type} } return } func (s *SKB) GetPubKey() (key GenericKey, err error) { if key = s.decodedPub; key == nil { key, err = s.ReadKey() s.decodedPub = key } return } func (s *SKB) VerboseDescription() (ret string, err error) { var key GenericKey key, err = s.GetPubKey() if err == nil && key != nil { ret = key.VerboseDescription() } return } func (s *SKB) RawUnlockedKey() []byte { return s.decryptedRaw } func (s *SKB) unlockSecretKeyFromSecretRetriever(secretRetriever SecretRetriever) (key GenericKey, err error) { if key = s.decryptedSecret; key != nil { return } var unlocked []byte switch s.Priv.Encryption { case 0: unlocked = s.Priv.Data case LKSecVersion: unlocked, err = s.lksUnlockWithSecretRetriever(secretRetriever) default: err = BadKeyError{fmt.Sprintf("Can't unlock secret from secret retriever with protection type %d", int(s.Priv.Encryption))} } if err == nil { key, err = s.parseUnlocked(unlocked) } return } // unverifiedPassphraseStream takes a passphrase as a parameter and // also the salt from the Account and computes a Triplesec and // a passphrase stream. It's not verified through a Login. func (s *SKB) unverifiedPassphraseStream(lctx LoginContext, passphrase string) (tsec *triplesec.Cipher, ret *PassphraseStream, err error) { var salt []byte username := s.G().Env.GetUsername() if lctx != nil { if len(username) > 0 { err = lctx.LoadLoginSession(username) if err != nil { return nil, nil, err } } salt, err = lctx.LoginSession().Salt() } else { aerr := s.G().LoginState().Account(func(a *Account) { if len(username) > 0 { err = a.LoadLoginSession(username) if err != nil { return } } salt, err = a.LoginSession().Salt() }, "skb - salt") if aerr != nil { return nil, nil, err } } if err != nil { return nil, nil, err } return StretchPassphrase(passphrase, salt) } func (s *SKB) UnlockSecretKey(lctx LoginContext, passphrase string, tsec *triplesec.Cipher, pps *PassphraseStream, secretStorer SecretStorer, lksPreload *LKSec) (key GenericKey, err error) { if key = s.decryptedSecret; key != nil { return } var unlocked []byte switch s.Priv.Encryption { case 0: unlocked = s.Priv.Data case int(triplesec.Version): if tsec == nil { tsec, err = triplesec.NewCipher([]byte(passphrase), nil) if err != nil { return nil, err } } unlocked, err = s.tsecUnlock(tsec) case LKSecVersion: ppsIn := pps if pps == nil { tsec, pps, err = s.unverifiedPassphraseStream(lctx, passphrase) if err != nil { return nil, fmt.Errorf("UnlockSecretKey: %s", err) } } if unlocked, err = s.lksUnlock(lctx, pps, secretStorer, lksPreload); err == nil && ppsIn == nil { // the unverified tsec, pps has been verified, so cache it: if lctx != nil { lctx.CreateStreamCache(tsec, pps) } else { aerr := s.G().LoginState().Account(func(a *Account) { a.CreateStreamCache(tsec, pps) }, "skb - UnlockSecretKey - CreateStreamCache") if aerr != nil { return nil, aerr } } } default: err = BadKeyError{fmt.Sprintf("Can't unlock secret with protection type %d", int(s.Priv.Encryption))} } if err == nil { key, err = s.parseUnlocked(unlocked) } return } func (s *SKB) parseUnlocked(unlocked []byte) (key GenericKey, err error) { switch { case IsPGPAlgo(s.Type) || s.Type == 0: key, err = ReadOneKeyFromBytes(unlocked) case s.Type == KIDNaclEddsa: key, err = ImportNaclSigningKeyPairFromBytes(s.Pub, unlocked) case s.Type == KIDNaclDH: key, err = ImportNaclDHKeyPairFromBytes(s.Pub, unlocked) } if key == nil { err = BadKeyError{"can't parse secret key after unlock"} } if err != nil { return } if err = key.CheckSecretKey(); err == nil { s.decryptedRaw = unlocked s.decryptedSecret = key } return } func (s *SKB) tsecUnlock(tsec *triplesec.Cipher) ([]byte, error) { unlocked, err := tsec.Decrypt(s.Priv.Data) if err != nil { if _, ok := err.(triplesec.BadPassphraseError); ok { err = PassphraseError{} } return nil, err } return unlocked, nil } func (s *SKB) lksUnlock(lctx LoginContext, pps *PassphraseStream, secretStorer SecretStorer, lks *LKSec) (unlocked []byte, err error) { if lks == nil { s.G().Log.Debug("creating new lks") lks = s.newLKSec(pps.LksClientHalf(), pps.Generation()) s.Lock() s.G().Log.Debug("setting uid in lks to %s", s.uid) lks.SetUID(s.uid) s.Unlock() } unlocked, err = lks.Decrypt(lctx, s.Priv.Data) if err != nil { return } if secretStorer != nil { var secret []byte secret, err = lks.GetSecret() if err != nil { unlocked = nil return } // Ignore any errors storing the secret. storeSecretErr := secretStorer.StoreSecret(secret) if storeSecretErr != nil { s.G().Log.Warning("StoreSecret error: %s", storeSecretErr) } } return } func (s *SKB) lksUnlockWithSecretRetriever(secretRetriever SecretRetriever) (unlocked []byte, err error) { secret, err := secretRetriever.RetrieveSecret() if err != nil { return } if s.uid.IsNil() { panic("no uid set in skb") } lks := NewLKSecWithFullSecret(secret, s.uid, s.G()) return lks.Decrypt(nil, s.Priv.Data) } func (s *SKB) SetUID(uid keybase1.UID) { G.Log.Debug("| Setting UID on SKB to %s", uid) s.Lock() s.uid = uid s.Unlock() } type SKBKeyringFile struct { filename string Blocks []*SKB fpIndex map[PGPFingerprint]*SKB kidIndex map[keybase1.KID]*SKB dirty bool } func NewSKBKeyringFile(n string) *SKBKeyringFile { return &SKBKeyringFile{ filename: n, fpIndex: make(map[PGPFingerprint]*SKB), kidIndex: make(map[keybase1.KID]*SKB), dirty: false, } } func (k *SKBKeyringFile) Load() (err error) { G.Log.Debug("+ Loading SKB keyring: %s", k.filename) var packets KeybasePackets var file *os.File if file, err = os.OpenFile(k.filename, os.O_RDONLY, 0); err == nil { stream := base64.NewDecoder(base64.StdEncoding, file) packets, err = DecodePackets(stream) tmp := file.Close() if err == nil && tmp != nil { err = tmp } } if err != nil { if os.IsNotExist(err) { G.Log.Debug("| Keybase secret keyring doesn't exist: %s", k.filename) } else { G.Log.Warning("Error opening %s: %s", k.filename, err) } } else if err == nil { k.Blocks, err = packets.ToListOfSKBs() } G.Log.Debug("- Loaded SKB keyring: %s -> %s", k.filename, ErrToOk(err)) return } func (k *SKBKeyringFile) addToIndex(g GenericKey, b *SKB) { if g == nil { return } if fp := g.GetFingerprintP(); fp != nil { k.fpIndex[*fp] = b } k.kidIndex[g.GetKID()] = b } func (k *SKBKeyringFile) Index() (err error) { for _, b := range k.Blocks { var key GenericKey key, err = b.GetPubKey() if err != nil { return } // Last-writer wins! k.addToIndex(key, b) } G.Log.Debug("| Indexed %d secret keys", len(k.Blocks)) return } func (k SKBKeyringFile) SearchWithComputedKeyFamily(ckf *ComputedKeyFamily, ska SecretKeyArg) *SKB { var kid keybase1.KID G.Log.Debug("+ SKBKeyringFile.SearchWithComputedKeyFamily") defer func() { var res string if kid.Exists() { res = kid.String() } else { res = "<nil>" } G.Log.Debug("- SKBKeyringFile.SearchWithComputedKeyFamily -> %s\n", res) }() G.Log.Debug("| Searching %d possible blocks", len(k.Blocks)) for i := len(k.Blocks) - 1; i >= 0; i-- { G.Log.Debug("| trying key index# -> %d", i) if key, err := k.Blocks[i].GetPubKey(); err == nil && key != nil { kid = key.GetKID() active := ckf.GetKeyRole(kid) G.Log.Debug("| Checking KID: %s -> %d", kid, int(active)) if !ska.KeyType.nonDeviceKeyMatches(key) { G.Log.Debug("| Skipped, doesn't match type=%s", ska.KeyType) } else if !KeyMatchesQuery(key, ska.KeyQuery, ska.ExactMatch) { G.Log.Debug("| Skipped, doesn't match query=%s", ska.KeyQuery) } else if active != DLGSibkey { G.Log.Debug("| Skipped, active=%d", int(active)) } else { return k.Blocks[i] } } else { G.Log.Debug("| failed --> %v", err) } } return nil } func (k SKBKeyringFile) LookupByFingerprint(fp PGPFingerprint) *SKB { ret, ok := k.fpIndex[fp] if !ok { ret = nil } return ret } // FindSecretKey will, given a list of KIDs, find the first one in the // list that has a corresponding secret key in the keyring file. func (k SKBKeyringFile) FindSecretKey(kids []keybase1.KID) (ret *SKB) { for _, kid := range kids { if ret = k.LookupByKid(kid); ret != nil { return } } return } func (k SKBKeyringFile) LookupByKid(kid keybase1.KID) *SKB { ret, ok := k.kidIndex[kid] if !ok { ret = nil } return ret } func (k *SKBKeyringFile) LoadAndIndex() error { err := k.Load() if err == nil { err = k.Index() } return err } func (p KeybasePacket) ToSKB() (*SKB, error) { ret, ok := p.Body.(*SKB) if !ok { return nil, UnmarshalError{"SKB"} } return ret, nil } func (s *SKB) ArmoredEncode() (ret string, err error) { return PacketArmoredEncode(s) } func (k *SKBKeyringFile) Push(skb *SKB) error { key, err := skb.GetPubKey() if err != nil { return fmt.Errorf("Failed to get pubkey: %s", err) } k.dirty = true k.Blocks = append(k.Blocks, skb) k.addToIndex(key, skb) return nil } func (k SKBKeyringFile) GetFilename() string { return k.filename } func (k SKBKeyringFile) WriteTo(w io.Writer) (int64, error) { G.Log.Debug("+ WriteTo") packets := make(KeybasePackets, len(k.Blocks)) var err error for i, b := range k.Blocks { if packets[i], err = b.ToPacket(); err != nil { return 0, err } } b64 := base64.NewEncoder(base64.StdEncoding, w) if err = packets.EncodeTo(b64); err != nil { G.Log.Warning("Encoding problem: %s", err) return 0, err } G.Log.Debug("- WriteTo") b64.Close() return 0, nil } func (k *SKBKeyringFile) Save(lui LogUI) error { if !k.dirty { return nil } if err := SafeWriteToFile(*k); err != nil { return err } k.dirty = false lui.Debug("Updated keyring %s", k.filename) return nil } func (p KeybasePackets) ToListOfSKBs() ([]*SKB, error) { ret := make([]*SKB, len(p)) for i, e := range p { k, ok := e.Body.(*SKB) if !ok { return nil, fmt.Errorf("Bad SKB sequence; got packet of wrong type %T", e.Body) } ret[i] = k } return ret, nil } func (s *SKB) UnlockWithStoredSecret(secretRetriever SecretRetriever) (ret GenericKey, err error) { s.G().Log.Debug("+ UnlockWithStoredSecret()") defer func() { s.G().Log.Debug("- UnlockWithStoredSecret -> %s", ErrToOk(err)) }() if ret = s.decryptedSecret; ret != nil { return } return s.unlockSecretKeyFromSecretRetriever(secretRetriever) } func (s *SKB) PromptAndUnlock(lctx LoginContext, reason, which string, secretStore SecretStore, ui SecretUI, lksPreload *LKSec) (ret GenericKey, err error) { s.G().Log.Debug("+ PromptAndUnlock(%s,%s)", reason, which) defer func() { s.G().Log.Debug("- PromptAndUnlock -> %s", ErrToOk(err)) }() if ret = s.decryptedSecret; ret != nil { return } if secretStore != nil { ret, err = s.unlockSecretKeyFromSecretRetriever(secretStore) s.G().Log.Debug("| unlockSecretKeyFromSecretRetriever -> %s", ErrToOk(err)) if err == nil { return } // Just fall through if we failed to unlock with // retrieved secret. err = nil } var tsec *triplesec.Cipher var pps *PassphraseStream if lctx != nil { tsec = lctx.PassphraseStreamCache().Triplesec() pps = lctx.PassphraseStreamCache().PassphraseStream() } else { s.G().LoginState().PassphraseStreamCache(func(sc *PassphraseStreamCache) { tsec = sc.Triplesec() pps = sc.PassphraseStream() }, "skb - PromptAndUnlock - tsec, pps") } if tsec != nil || pps != nil { ret, err = s.UnlockSecretKey(lctx, "", tsec, pps, nil, lksPreload) if err == nil { s.G().Log.Debug("| Unlocked key with cached 3Sec and passphrase stream") return } if _, ok := err.(PassphraseError); !ok { return } // if it's a passphrase error, fall through... } else { s.G().Log.Debug("| No 3Sec or PassphraseStream in PromptAndUnlock") } var desc string if desc, err = s.VerboseDescription(); err != nil { return } unlocker := func(pw string, storeSecret bool) (ret GenericKey, err error) { var secretStorer SecretStorer if storeSecret { secretStorer = secretStore } return s.UnlockSecretKey(lctx, pw, nil, nil, secretStorer, nil) } return KeyUnlocker{ Tries: 4, Reason: reason, KeyDesc: desc, Which: which, UseSecretStore: secretStore != nil, Unlocker: unlocker, UI: ui, }.Run() } func (k *SKBKeyringFile) PushAndSave(skb *SKB, lui LogUI) error { if err := k.Push(skb); err != nil { return err } return k.Save(lui) } cleaned up troublesome PromptAndUnlock in advance of tracking down issues package libkb // // Code for encoding and decoding SKB-formatted keys. Also works for decoding // general Keybase Packet types, but we only have SKB at present. // // SKB = "Secret Key Bundle", which contains an unencrypted public key and // and encrypted secret key. // import ( "bytes" "encoding/base64" "errors" "fmt" "io" "os" "sync" keybase1 "github.com/keybase/client/protocol/go" triplesec "github.com/keybase/go-triplesec" "golang.org/x/crypto/openpgp" ) type SKB struct { Priv SKBPriv `codec:"priv"` Pub []byte `codec:"pub"` Type AlgoType `codec:"type,omitempty"` decodedPub GenericKey decryptedSecret GenericKey decryptedRaw []byte // in case we need to reexport it uid keybase1.UID // UID that the key is for Contextified // TODO(akalin): Remove this in favor of making LKSec // Contextified (see // https://github.com/keybase/client/issues/329 ). newLKSecForTest func(clientHalf []byte) *LKSec sync.Mutex // currently only for uid } type SKBPriv struct { Data []byte `codec:"data"` Encryption int `codec:"encryption"` } func (key *PGPKeyBundle) ToSKB(gc *GlobalContext, tsec *triplesec.Cipher) (ret *SKB, err error) { ret = &SKB{} ret.SetGlobalContext(gc) var pk, sk bytes.Buffer // Need to serialize Private first, because err = (*openpgp.Entity)(key).SerializePrivate(&sk, nil) if err != nil { return } if tsec != nil { ret.Priv.Data, err = tsec.Encrypt(sk.Bytes()) ret.Priv.Encryption = int(triplesec.Version) // Version 3 is the current TripleSec version if err != nil { return } } else { ret.Priv.Data = sk.Bytes() ret.Priv.Encryption = 0 } err = (*openpgp.Entity)(key).Serialize(&pk) if err != nil { return } ret.Pub = pk.Bytes() ret.Type = key.GetAlgoType() return } func (key *PGPKeyBundle) ToLksSKB(lks *LKSec) (ret *SKB, err error) { if lks == nil { return nil, fmt.Errorf("nil lks") } var pk, sk bytes.Buffer err = (*openpgp.Entity)(key).SerializePrivate(&sk, nil) if err != nil { return nil, err } ret = &SKB{} ret.Priv.Data, err = lks.Encrypt(sk.Bytes()) if err != nil { return nil, err } ret.Priv.Encryption = LKSecVersion err = (*openpgp.Entity)(key).Serialize(&pk) if err != nil { return nil, err } ret.Pub = pk.Bytes() ret.Type = key.GetAlgoType() return ret, nil } func (s *SKB) newLKSec(clientHalf []byte, ppGen PassphraseGeneration) *LKSec { if s.newLKSecForTest != nil { return s.newLKSecForTest(clientHalf) } if s.uid.IsNil() { panic("no uid set in skb") } return NewLKSec(clientHalf, ppGen, s.uid, s.G()) } func (s *SKB) ToPacket() (ret *KeybasePacket, err error) { ret = &KeybasePacket{ Version: KeybasePacketV1, Tag: TagP3skb, } ret.Body = s err = ret.HashMe() return } func (s *SKB) ReadKey() (g GenericKey, err error) { switch { case IsPGPAlgo(s.Type) || s.Type == 0: g, err = ReadOneKeyFromBytes(s.Pub) case s.Type == KIDNaclEddsa: g, err = ImportNaclSigningKeyPairFromBytes(s.Pub, nil) case s.Type == KIDNaclDH: g, err = ImportNaclDHKeyPairFromBytes(s.Pub, nil) default: err = UnknownKeyTypeError{s.Type} } return } func (s *SKB) GetPubKey() (key GenericKey, err error) { if key = s.decodedPub; key == nil { key, err = s.ReadKey() s.decodedPub = key } return } func (s *SKB) VerboseDescription() (ret string, err error) { var key GenericKey key, err = s.GetPubKey() if err == nil && key != nil { ret = key.VerboseDescription() } return } func (s *SKB) RawUnlockedKey() []byte { return s.decryptedRaw } func (s *SKB) unlockSecretKeyFromSecretRetriever(secretRetriever SecretRetriever) (key GenericKey, err error) { if key = s.decryptedSecret; key != nil { return } var unlocked []byte switch s.Priv.Encryption { case 0: unlocked = s.Priv.Data case LKSecVersion: unlocked, err = s.lksUnlockWithSecretRetriever(secretRetriever) default: err = BadKeyError{fmt.Sprintf("Can't unlock secret from secret retriever with protection type %d", int(s.Priv.Encryption))} } if err == nil { key, err = s.parseUnlocked(unlocked) } return } // unverifiedPassphraseStream takes a passphrase as a parameter and // also the salt from the Account and computes a Triplesec and // a passphrase stream. It's not verified through a Login. func (s *SKB) unverifiedPassphraseStream(lctx LoginContext, passphrase string) (tsec *triplesec.Cipher, ret *PassphraseStream, err error) { var salt []byte username := s.G().Env.GetUsername() if lctx != nil { if len(username) > 0 { err = lctx.LoadLoginSession(username) if err != nil { return nil, nil, err } } salt, err = lctx.LoginSession().Salt() } else { aerr := s.G().LoginState().Account(func(a *Account) { if len(username) > 0 { err = a.LoadLoginSession(username) if err != nil { return } } salt, err = a.LoginSession().Salt() }, "skb - salt") if aerr != nil { return nil, nil, err } } if err != nil { return nil, nil, err } return StretchPassphrase(passphrase, salt) } func (s *SKB) UnlockSecretKey(lctx LoginContext, passphrase string, tsec *triplesec.Cipher, pps *PassphraseStream, secretStorer SecretStorer, lksPreload *LKSec) (key GenericKey, err error) { if key = s.decryptedSecret; key != nil { return } var unlocked []byte switch s.Priv.Encryption { case 0: unlocked = s.Priv.Data case int(triplesec.Version): if tsec == nil { tsec, err = triplesec.NewCipher([]byte(passphrase), nil) if err != nil { return nil, err } } unlocked, err = s.tsecUnlock(tsec) case LKSecVersion: ppsIn := pps if pps == nil { tsec, pps, err = s.unverifiedPassphraseStream(lctx, passphrase) if err != nil { return nil, fmt.Errorf("UnlockSecretKey: %s", err) } } if unlocked, err = s.lksUnlock(lctx, pps, secretStorer, lksPreload); err == nil && ppsIn == nil { // the unverified tsec, pps has been verified, so cache it: if lctx != nil { lctx.CreateStreamCache(tsec, pps) } else { aerr := s.G().LoginState().Account(func(a *Account) { a.CreateStreamCache(tsec, pps) }, "skb - UnlockSecretKey - CreateStreamCache") if aerr != nil { return nil, aerr } } } default: err = BadKeyError{fmt.Sprintf("Can't unlock secret with protection type %d", int(s.Priv.Encryption))} } if err == nil { key, err = s.parseUnlocked(unlocked) } return } func (s *SKB) parseUnlocked(unlocked []byte) (key GenericKey, err error) { switch { case IsPGPAlgo(s.Type) || s.Type == 0: key, err = ReadOneKeyFromBytes(unlocked) case s.Type == KIDNaclEddsa: key, err = ImportNaclSigningKeyPairFromBytes(s.Pub, unlocked) case s.Type == KIDNaclDH: key, err = ImportNaclDHKeyPairFromBytes(s.Pub, unlocked) } if key == nil { err = BadKeyError{"can't parse secret key after unlock"} } if err != nil { return } if err = key.CheckSecretKey(); err == nil { s.decryptedRaw = unlocked s.decryptedSecret = key } return } func (s *SKB) tsecUnlock(tsec *triplesec.Cipher) ([]byte, error) { unlocked, err := tsec.Decrypt(s.Priv.Data) if err != nil { if _, ok := err.(triplesec.BadPassphraseError); ok { err = PassphraseError{} } return nil, err } return unlocked, nil } func (s *SKB) lksUnlock(lctx LoginContext, pps *PassphraseStream, secretStorer SecretStorer, lks *LKSec) (unlocked []byte, err error) { if lks == nil { s.G().Log.Debug("creating new lks") lks = s.newLKSec(pps.LksClientHalf(), pps.Generation()) s.Lock() s.G().Log.Debug("setting uid in lks to %s", s.uid) lks.SetUID(s.uid) s.Unlock() } unlocked, err = lks.Decrypt(lctx, s.Priv.Data) if err != nil { return } if secretStorer != nil { var secret []byte secret, err = lks.GetSecret() if err != nil { unlocked = nil return } // Ignore any errors storing the secret. storeSecretErr := secretStorer.StoreSecret(secret) if storeSecretErr != nil { s.G().Log.Warning("StoreSecret error: %s", storeSecretErr) } } return } func (s *SKB) lksUnlockWithSecretRetriever(secretRetriever SecretRetriever) (unlocked []byte, err error) { secret, err := secretRetriever.RetrieveSecret() if err != nil { return } if s.uid.IsNil() { panic("no uid set in skb") } lks := NewLKSecWithFullSecret(secret, s.uid, s.G()) return lks.Decrypt(nil, s.Priv.Data) } func (s *SKB) SetUID(uid keybase1.UID) { G.Log.Debug("| Setting UID on SKB to %s", uid) s.Lock() s.uid = uid s.Unlock() } type SKBKeyringFile struct { filename string Blocks []*SKB fpIndex map[PGPFingerprint]*SKB kidIndex map[keybase1.KID]*SKB dirty bool } func NewSKBKeyringFile(n string) *SKBKeyringFile { return &SKBKeyringFile{ filename: n, fpIndex: make(map[PGPFingerprint]*SKB), kidIndex: make(map[keybase1.KID]*SKB), dirty: false, } } func (k *SKBKeyringFile) Load() (err error) { G.Log.Debug("+ Loading SKB keyring: %s", k.filename) var packets KeybasePackets var file *os.File if file, err = os.OpenFile(k.filename, os.O_RDONLY, 0); err == nil { stream := base64.NewDecoder(base64.StdEncoding, file) packets, err = DecodePackets(stream) tmp := file.Close() if err == nil && tmp != nil { err = tmp } } if err != nil { if os.IsNotExist(err) { G.Log.Debug("| Keybase secret keyring doesn't exist: %s", k.filename) } else { G.Log.Warning("Error opening %s: %s", k.filename, err) } } else if err == nil { k.Blocks, err = packets.ToListOfSKBs() } G.Log.Debug("- Loaded SKB keyring: %s -> %s", k.filename, ErrToOk(err)) return } func (k *SKBKeyringFile) addToIndex(g GenericKey, b *SKB) { if g == nil { return } if fp := g.GetFingerprintP(); fp != nil { k.fpIndex[*fp] = b } k.kidIndex[g.GetKID()] = b } func (k *SKBKeyringFile) Index() (err error) { for _, b := range k.Blocks { var key GenericKey key, err = b.GetPubKey() if err != nil { return } // Last-writer wins! k.addToIndex(key, b) } G.Log.Debug("| Indexed %d secret keys", len(k.Blocks)) return } func (k SKBKeyringFile) SearchWithComputedKeyFamily(ckf *ComputedKeyFamily, ska SecretKeyArg) *SKB { var kid keybase1.KID G.Log.Debug("+ SKBKeyringFile.SearchWithComputedKeyFamily") defer func() { var res string if kid.Exists() { res = kid.String() } else { res = "<nil>" } G.Log.Debug("- SKBKeyringFile.SearchWithComputedKeyFamily -> %s\n", res) }() G.Log.Debug("| Searching %d possible blocks", len(k.Blocks)) for i := len(k.Blocks) - 1; i >= 0; i-- { G.Log.Debug("| trying key index# -> %d", i) if key, err := k.Blocks[i].GetPubKey(); err == nil && key != nil { kid = key.GetKID() active := ckf.GetKeyRole(kid) G.Log.Debug("| Checking KID: %s -> %d", kid, int(active)) if !ska.KeyType.nonDeviceKeyMatches(key) { G.Log.Debug("| Skipped, doesn't match type=%s", ska.KeyType) } else if !KeyMatchesQuery(key, ska.KeyQuery, ska.ExactMatch) { G.Log.Debug("| Skipped, doesn't match query=%s", ska.KeyQuery) } else if active != DLGSibkey { G.Log.Debug("| Skipped, active=%d", int(active)) } else { return k.Blocks[i] } } else { G.Log.Debug("| failed --> %v", err) } } return nil } func (k SKBKeyringFile) LookupByFingerprint(fp PGPFingerprint) *SKB { ret, ok := k.fpIndex[fp] if !ok { ret = nil } return ret } // FindSecretKey will, given a list of KIDs, find the first one in the // list that has a corresponding secret key in the keyring file. func (k SKBKeyringFile) FindSecretKey(kids []keybase1.KID) (ret *SKB) { for _, kid := range kids { if ret = k.LookupByKid(kid); ret != nil { return } } return } func (k SKBKeyringFile) LookupByKid(kid keybase1.KID) *SKB { ret, ok := k.kidIndex[kid] if !ok { ret = nil } return ret } func (k *SKBKeyringFile) LoadAndIndex() error { err := k.Load() if err == nil { err = k.Index() } return err } func (p KeybasePacket) ToSKB() (*SKB, error) { ret, ok := p.Body.(*SKB) if !ok { return nil, UnmarshalError{"SKB"} } return ret, nil } func (s *SKB) ArmoredEncode() (ret string, err error) { return PacketArmoredEncode(s) } func (k *SKBKeyringFile) Push(skb *SKB) error { key, err := skb.GetPubKey() if err != nil { return fmt.Errorf("Failed to get pubkey: %s", err) } k.dirty = true k.Blocks = append(k.Blocks, skb) k.addToIndex(key, skb) return nil } func (k SKBKeyringFile) GetFilename() string { return k.filename } func (k SKBKeyringFile) WriteTo(w io.Writer) (int64, error) { G.Log.Debug("+ WriteTo") packets := make(KeybasePackets, len(k.Blocks)) var err error for i, b := range k.Blocks { if packets[i], err = b.ToPacket(); err != nil { return 0, err } } b64 := base64.NewEncoder(base64.StdEncoding, w) if err = packets.EncodeTo(b64); err != nil { G.Log.Warning("Encoding problem: %s", err) return 0, err } G.Log.Debug("- WriteTo") b64.Close() return 0, nil } func (k *SKBKeyringFile) Save(lui LogUI) error { if !k.dirty { return nil } if err := SafeWriteToFile(*k); err != nil { return err } k.dirty = false lui.Debug("Updated keyring %s", k.filename) return nil } func (p KeybasePackets) ToListOfSKBs() ([]*SKB, error) { ret := make([]*SKB, len(p)) for i, e := range p { k, ok := e.Body.(*SKB) if !ok { return nil, fmt.Errorf("Bad SKB sequence; got packet of wrong type %T", e.Body) } ret[i] = k } return ret, nil } func (s *SKB) UnlockWithStoredSecret(secretRetriever SecretRetriever) (ret GenericKey, err error) { s.G().Log.Debug("+ UnlockWithStoredSecret()") defer func() { s.G().Log.Debug("- UnlockWithStoredSecret -> %s", ErrToOk(err)) }() if ret = s.decryptedSecret; ret != nil { return } return s.unlockSecretKeyFromSecretRetriever(secretRetriever) } var errUnlockNotPossible = errors.New("unlock not possible") func (s *SKB) UnlockNoPrompt(lctx LoginContext, secretStore SecretStore, lksPreload *LKSec) (GenericKey, error) { // already have decrypted secret? if s.decryptedSecret != nil { return s.decryptedSecret, nil } // try using the secret store: if secretStore != nil { key, err := s.unlockSecretKeyFromSecretRetriever(secretStore) s.G().Log.Debug("| unlockSecretKeyFromSecretRetriever -> %s", ErrToOk(err)) if err == nil { return key, nil } // fall through if we failed to unlock with retrieved secret... } // try using the passphrase stream cache var tsec *triplesec.Cipher var pps *PassphraseStream if lctx != nil { tsec = lctx.PassphraseStreamCache().Triplesec() pps = lctx.PassphraseStreamCache().PassphraseStream() } else { s.G().LoginState().PassphraseStreamCache(func(sc *PassphraseStreamCache) { tsec = sc.Triplesec() pps = sc.PassphraseStream() }, "skb - PromptAndUnlock - tsec, pps") } if tsec != nil || pps != nil { key, err := s.UnlockSecretKey(lctx, "", tsec, pps, nil, lksPreload) if err == nil { s.G().Log.Debug("| Unlocked key with cached 3Sec and passphrase stream") return key, nil } if _, ok := err.(PassphraseError); !ok { // not a passphrase error return nil, err } // fall through if it's a passphrase error } else { s.G().Log.Debug("| No 3Sec or PassphraseStream in PromptAndUnlock") } // failed to unlock without prompting user for passphrase return nil, errUnlockNotPossible } func (s *SKB) UnlockPrompt(lctx LoginContext, reason, which string, secretStore SecretStore, ui SecretUI) (GenericKey, error) { desc, err := s.VerboseDescription() if err != nil { return nil, err } unlocker := func(pw string, storeSecret bool) (ret GenericKey, err error) { var secretStorer SecretStorer if storeSecret { secretStorer = secretStore } return s.UnlockSecretKey(lctx, pw, nil, nil, secretStorer, nil) } return KeyUnlocker{ Tries: 4, Reason: reason, KeyDesc: desc, Which: which, UseSecretStore: secretStore != nil, Unlocker: unlocker, UI: ui, }.Run() } func (s *SKB) PromptAndUnlock(lctx LoginContext, reason, which string, secretStore SecretStore, ui SecretUI, lksPreload *LKSec) (ret GenericKey, err error) { s.G().Log.Debug("+ PromptAndUnlock(%s,%s)", reason, which) defer func() { s.G().Log.Debug("- PromptAndUnlock -> %s", ErrToOk(err)) }() // First try to unlock without prompting the user. ret, err = s.UnlockNoPrompt(lctx, secretStore, lksPreload) if err == nil { return } if err != errUnlockNotPossible { return } // Prompt necessary: ret, err = s.UnlockPrompt(lctx, reason, which, secretStore, ui) return } func (k *SKBKeyringFile) PushAndSave(skb *SKB, lui LogUI) error { if err := k.Push(skb); err != nil { return err } return k.Save(lui) }
package ui import ( "fmt" "github.com/pkg/errors" "strconv" "strings" "github.com/lunixbochs/usercorn/go/models" "github.com/lunixbochs/usercorn/go/models/trace" ) func pad(s string, to int) string { if len(s) >= to { return "" } return strings.Repeat(" ", to-len(s)) } type StreamUI struct { replay *trace.Replay config *models.Config regfmt string inscol int regcol int // pending is an OpStep representing the last unflushed instruction. Cleared by Flush(). pending *trace.OpStep effects []models.Op lastPC uint64 } func NewStreamUI(c *models.Config, r *trace.Replay) *StreamUI { // find the longest register name longest := 0 for _, name := range r.Arch.RegNames() { if len(name) > longest { longest = len(name) } } return &StreamUI{ replay: r, config: c, regfmt: fmt.Sprintf("%%%ds = %%#0%dx", longest, r.Arch.Bits/4), inscol: 60, // FIXME regcol: longest + 5 + r.Arch.Bits/4, } } func (s *StreamUI) Feed(op models.Op, effects []models.Op) { switch o := op.(type) { case *trace.OpJmp: s.blockPrint(s.replay.PC) s.lastPC = s.replay.PC case *trace.OpStep: s.insPrint(s.replay.PC, o.Size, effects) s.lastPC = s.replay.PC case *trace.OpSyscall: s.sysPrint(o) } } func (s *StreamUI) OnStart(entry uint64) { if !s.config.Verbose { return } s.Printf("[entry @ 0x%x]\n", entry) dis, err := s.dis(entry, 64) if err != nil { s.Println(err) } else { s.Println(dis) } sp := s.replay.SP buf := make([]byte, 128) if err := s.replay.Mem.MemReadInto(buf, sp-32); err != nil { s.Println("error reading stack:", err) } else { s.Println("[stack]") for _, line := range models.HexDump(sp-32, buf[:32], s.replay.Arch.Bits) { s.Println(line) } } s.Println("[stack pointer]") for _, line := range models.HexDump(sp, buf[32:], s.replay.Arch.Bits) { s.Println(line) } s.Println("[memory map]") for _, mm := range s.replay.Mem.Maps() { s.Printf(" %s\n", mm) } s.Println("=====================================") s.Println("==== Program output begins here. ====") s.Println("=====================================") } func (s *StreamUI) OnExit(clean bool, msg string) { if clean && !s.config.Verbose { return } if msg != "" { s.Println(msg) } dis, err := s.dis(s.replay.PC, 64) if err == nil { s.Println("[pc]") s.Println(dis) } s.Println("[memory map]") for _, mm := range s.replay.Mem.Maps() { s.Printf(" %s\n", mm) } s.Println("[registers]") // FIXME: cache reg names as a list names := s.replay.Arch.RegNames() for enum, val := range s.replay.Regs { name, ok := names[enum] if !ok { name = strconv.Itoa(enum) } s.Printf("%s: %#x\n", name, val) } s.Println("[callstack]") pc := s.replay.PC sp := s.replay.SP for _, frame := range s.replay.Callstack.Freeze(pc, sp) { s.Printf(" %#x\n", frame.PC, s.addrsym(frame.PC, true)) } } func (s *StreamUI) addrsym(addr uint64, includeSource bool) string { _, sym := s.replay.Symbolicate(addr, true) if sym == "" { if page := s.replay.Mem.Maps().Find(addr); s.config.SymFile && page != nil && page.File != nil { sym = fmt.Sprintf("@%s", page.File.Name) } } else { sym = fmt.Sprintf(" %s", sym) } return fmt.Sprintf("%#x%s", addr, sym) } func (s *StreamUI) dis(addr, size uint64) (string, error) { mem, err := s.replay.Mem.MemRead(addr, uint64(size)) if err != nil { return "", errors.Wrap(err, "dis() mem read failed") } return models.Disas(mem, addr, s.replay.Arch, s.config.DisBytes) } // blockPrint() takes a basic block address to pretty-print func (s *StreamUI) blockPrint(addr uint64) { // this fixes a problem displaying `rep mov` if addr != s.lastPC { _, sym := s.replay.Symbolicate(addr, true) if sym != "" { s.Printf("\n%s\n", sym) } } } func (s *StreamUI) Printf(f string, args ...interface{}) { fmt.Fprintf(s.config.Output, f, args...) } func (s *StreamUI) Println(args ...interface{}) { fmt.Fprintln(s.config.Output, args...) } // sysPrint() takes a syscall op to pretty-print func (s *StreamUI) sysPrint(op *trace.OpSyscall) { // This is a workaround for live tracing. // Desc is not serialized so offline traces won't have access to it. if op.Desc != "" { s.Println(op.Desc) } else { // FIXME: this is a regression, how do we strace? // I think I need to embed the strace string during trace // until I get a chance to rework the strace backend // SECOND THOUGHT // I just need to expose a method on models.OS to convert syscall number into name // then I should be able to use the strace from kernel common // except I need to be able to dependency-inject the MemIO (as we might be on MemSim) args := make([]string, len(op.Args)) for i, v := range op.Args { args[i] = fmt.Sprintf("%#x", v) } s.Printf("syscall(%d, [%s]) = %d\n", op.Num, strings.Join(args, ", "), op.Ret) } } // insPrint() takes an instruction address and side-effects to pretty-print func (s *StreamUI) insPrint(pc uint64, size uint8, effects []models.Op) { // TODO: make all of this into Sprintf columns, and align the columns var ins string dis, err := s.dis(pc, uint64(size)) if err != nil { insmem, _ := s.replay.Mem.MemRead(pc, uint64(size)) ins = fmt.Sprintf("%#x: %x", pc, insmem) } else { ins = fmt.Sprintf("%s", dis) } // collect effects (should just be memory IO and register changes) var regs []string var mem []string for _, op := range effects { switch o := op.(type) { case *trace.OpReg: // FIXME: cache reg names as a list name, ok := s.replay.Arch.RegNames()[int(o.Num)] if !ok { name = strconv.Itoa(int(o.Num)) } reg := fmt.Sprintf(s.regfmt, name, o.Val) regs = append(regs, reg) case *trace.OpSpReg: s.Println("<unimplemented special register>") case *trace.OpMemRead: // TODO: hexdump -C mem = append(mem, fmt.Sprintf("R %x", o.Addr)) case *trace.OpMemWrite: // TODO: hexdump -C mem = append(mem, fmt.Sprintf("W %x", o.Addr)) } } var reg, m string if len(regs) > 0 { reg = regs[0] + pad(regs[0], s.regcol) } else { reg = strings.Repeat(" ", s.regcol) } if len(mem) > 0 { m = mem[0] } ins += pad(ins, s.inscol) // TODO: remove dword, etc from x86 disassembly? // generally simplifying disassembly would improve the output // mov eax, dword ptr [eax + 8] // -> mov eax, [eax+8] // // 0x1004: mov eax, 1 | eax = 1 // 0x1008: mov eax, dword ptr [eax + 8] | eax = 2 |R 0x1020 0011 2233 4455 6677 [........] if m == "" { s.Printf("%s | %s\n", ins, reg) } else { s.Printf("%s | %s | %s\n", ins, reg, m) } // print extra effects if len(regs) > 1 { inspad := strings.Repeat(" ", s.inscol) for i, r := range regs[1:] { if i+1 < len(mem) { s.Printf("%s + %s + %s\n", inspad, r, mem[i+1]) } else { s.Printf("%s + %s\n", inspad, r) } } } for _, op := range effects { switch o := op.(type) { case *trace.OpMemBatch: s.Printf("%s", o.Render(s.replay.Mem)) } } } fix regression printing stacktrace package ui import ( "fmt" "github.com/pkg/errors" "strconv" "strings" "github.com/lunixbochs/usercorn/go/models" "github.com/lunixbochs/usercorn/go/models/trace" ) func pad(s string, to int) string { if len(s) >= to { return "" } return strings.Repeat(" ", to-len(s)) } type StreamUI struct { replay *trace.Replay config *models.Config regfmt string inscol int regcol int // pending is an OpStep representing the last unflushed instruction. Cleared by Flush(). pending *trace.OpStep effects []models.Op lastPC uint64 } func NewStreamUI(c *models.Config, r *trace.Replay) *StreamUI { // find the longest register name longest := 0 for _, name := range r.Arch.RegNames() { if len(name) > longest { longest = len(name) } } return &StreamUI{ replay: r, config: c, regfmt: fmt.Sprintf("%%%ds = %%#0%dx", longest, r.Arch.Bits/4), inscol: 60, // FIXME regcol: longest + 5 + r.Arch.Bits/4, } } func (s *StreamUI) Feed(op models.Op, effects []models.Op) { switch o := op.(type) { case *trace.OpJmp: s.blockPrint(s.replay.PC) s.lastPC = s.replay.PC case *trace.OpStep: s.insPrint(s.replay.PC, o.Size, effects) s.lastPC = s.replay.PC case *trace.OpSyscall: s.sysPrint(o) } } func (s *StreamUI) OnStart(entry uint64) { if !s.config.Verbose { return } s.Printf("[entry @ 0x%x]\n", entry) dis, err := s.dis(entry, 64) if err != nil { s.Println(err) } else { s.Println(dis) } sp := s.replay.SP buf := make([]byte, 128) if err := s.replay.Mem.MemReadInto(buf, sp-32); err != nil { s.Println("error reading stack:", err) } else { s.Println("[stack]") for _, line := range models.HexDump(sp-32, buf[:32], s.replay.Arch.Bits) { s.Println(line) } } s.Println("[stack pointer]") for _, line := range models.HexDump(sp, buf[32:], s.replay.Arch.Bits) { s.Println(line) } s.Println("[memory map]") for _, mm := range s.replay.Mem.Maps() { s.Printf(" %s\n", mm) } s.Println("=====================================") s.Println("==== Program output begins here. ====") s.Println("=====================================") } func (s *StreamUI) OnExit(clean bool, msg string) { if clean && !s.config.Verbose { return } if msg != "" { s.Println(msg) } dis, err := s.dis(s.replay.PC, 64) if err == nil { s.Println("[pc]") s.Println(dis) } s.Println("[memory map]") for _, mm := range s.replay.Mem.Maps() { s.Printf(" %s\n", mm) } s.Println("[registers]") // FIXME: cache reg names as a list names := s.replay.Arch.RegNames() for enum, val := range s.replay.Regs { name, ok := names[enum] if !ok { name = strconv.Itoa(enum) } s.Printf("%s: %#x\n", name, val) } s.Println("[callstack]") pc := s.replay.PC sp := s.replay.SP for _, frame := range s.replay.Callstack.Freeze(pc, sp) { s.Printf(" %s\n", s.addrsym(frame.PC, true)) } } func (s *StreamUI) addrsym(addr uint64, includeSource bool) string { _, sym := s.replay.Symbolicate(addr, true) if sym == "" { if page := s.replay.Mem.Maps().Find(addr); s.config.SymFile && page != nil && page.File != nil { sym = fmt.Sprintf("@%s", page.File.Name) } } else { sym = fmt.Sprintf(" %s", sym) } return fmt.Sprintf("%#x%s", addr, sym) } func (s *StreamUI) dis(addr, size uint64) (string, error) { mem, err := s.replay.Mem.MemRead(addr, uint64(size)) if err != nil { return "", errors.Wrap(err, "dis() mem read failed") } return models.Disas(mem, addr, s.replay.Arch, s.config.DisBytes) } // blockPrint() takes a basic block address to pretty-print func (s *StreamUI) blockPrint(addr uint64) { // this fixes a problem displaying `rep mov` if addr != s.lastPC { _, sym := s.replay.Symbolicate(addr, true) if sym != "" { s.Printf("\n%s\n", sym) } } } func (s *StreamUI) Printf(f string, args ...interface{}) { fmt.Fprintf(s.config.Output, f, args...) } func (s *StreamUI) Println(args ...interface{}) { fmt.Fprintln(s.config.Output, args...) } // sysPrint() takes a syscall op to pretty-print func (s *StreamUI) sysPrint(op *trace.OpSyscall) { // This is a workaround for live tracing. // Desc is not serialized so offline traces won't have access to it. if op.Desc != "" { s.Println(op.Desc) } else { // FIXME: this is a regression, how do we strace? // I think I need to embed the strace string during trace // until I get a chance to rework the strace backend // SECOND THOUGHT // I just need to expose a method on models.OS to convert syscall number into name // then I should be able to use the strace from kernel common // except I need to be able to dependency-inject the MemIO (as we might be on MemSim) args := make([]string, len(op.Args)) for i, v := range op.Args { args[i] = fmt.Sprintf("%#x", v) } s.Printf("syscall(%d, [%s]) = %d\n", op.Num, strings.Join(args, ", "), op.Ret) } } // insPrint() takes an instruction address and side-effects to pretty-print func (s *StreamUI) insPrint(pc uint64, size uint8, effects []models.Op) { // TODO: make all of this into Sprintf columns, and align the columns var ins string dis, err := s.dis(pc, uint64(size)) if err != nil { insmem, _ := s.replay.Mem.MemRead(pc, uint64(size)) ins = fmt.Sprintf("%#x: %x", pc, insmem) } else { ins = fmt.Sprintf("%s", dis) } // collect effects (should just be memory IO and register changes) var regs []string var mem []string for _, op := range effects { switch o := op.(type) { case *trace.OpReg: // FIXME: cache reg names as a list name, ok := s.replay.Arch.RegNames()[int(o.Num)] if !ok { name = strconv.Itoa(int(o.Num)) } reg := fmt.Sprintf(s.regfmt, name, o.Val) regs = append(regs, reg) case *trace.OpSpReg: s.Println("<unimplemented special register>") case *trace.OpMemRead: // TODO: hexdump -C mem = append(mem, fmt.Sprintf("R %x", o.Addr)) case *trace.OpMemWrite: // TODO: hexdump -C mem = append(mem, fmt.Sprintf("W %x", o.Addr)) } } var reg, m string if len(regs) > 0 { reg = regs[0] + pad(regs[0], s.regcol) } else { reg = strings.Repeat(" ", s.regcol) } if len(mem) > 0 { m = mem[0] } ins += pad(ins, s.inscol) // TODO: remove dword, etc from x86 disassembly? // generally simplifying disassembly would improve the output // mov eax, dword ptr [eax + 8] // -> mov eax, [eax+8] // // 0x1004: mov eax, 1 | eax = 1 // 0x1008: mov eax, dword ptr [eax + 8] | eax = 2 |R 0x1020 0011 2233 4455 6677 [........] if m == "" { s.Printf("%s | %s\n", ins, reg) } else { s.Printf("%s | %s | %s\n", ins, reg, m) } // print extra effects if len(regs) > 1 { inspad := strings.Repeat(" ", s.inscol) for i, r := range regs[1:] { if i+1 < len(mem) { s.Printf("%s + %s + %s\n", inspad, r, mem[i+1]) } else { s.Printf("%s + %s\n", inspad, r) } } } for _, op := range effects { switch o := op.(type) { case *trace.OpMemBatch: s.Printf("%s", o.Render(s.replay.Mem)) } } }
package main import ( "encoding/json" "flag" "fmt" "github.com/sebkl/flagconf" "image" "image/color" "image/png" "log" "os" "strconv" "text/template" ) const ( DEFAULT_COUNTRYMAP_IMAGE_FN = "atlas/countrymap.png" DEFAULT_OUTPUT_FN = "lookup.js" DEFAULT_COUNTRYNAME_FN = "countrydata/country_names.json" DEFAULT_COUNTRYMAP_FN = "countrydata/country_map.json" STEP = 5 TEMPLATE_LOOKUP_MAP = ` GLOBE.GEO.NO_COUNTRY = ""; GLOBE.GEO.index_to_country = function(idx) { return this.countrylist[idx]; } GLOBE.GEO.country_to_index = function(iso) { return this.countryrlist[iso]; } GLOBE.GEO.lookup_country = function(x,y) { try { var long_idx = Math.round((x+180)*10/{{.Step}}); var lat_idx = Math.round((y+90)*10/{{.Step}}); return this.geolookup[long_idx][lat_idx]; } catch (err) { return undefined; } } GLOBE.GEO.lookup_geo_array = function(iso) { return this.countrylookup[iso.toUpperCase()]; } GLOBE.GEO.lookup_geo_points = function(iso) { var a = this.lookup_geo_array(iso.toUpperCase()); try { var ret = a[Math.round(a.length/2)-1]; return ([ret[0] - 180,ret[1] -90]); } catch (err) { console.log('Cannot lookup geopoints for: '+iso); return undefined; } } GLOBE.GEO.lookup_countryname = function(iso) { try { return this.countrynames[iso.toUpperCase()]; } catch (err) { console.log('Cannot lookup country name for: '+iso); } } ` ) var config struct { CountryMapImageFN string OutputFN string CountryNamesFN string CountryMapFN string } func init() { flag.StringVar(&config.CountryMapImageFN, "imagemap", DEFAULT_COUNTRYMAP_IMAGE_FN, "Country image map in greyscale: Color->idx (PNG)") flag.StringVar(&config.OutputFN, "o", DEFAULT_OUTPUT_FN, "Output file.") flag.StringVar(&config.CountryNamesFN, "cn", DEFAULT_COUNTRYNAME_FN, "Country name mapping: ISO->name (JSON)") flag.StringVar(&config.CountryMapFN, "cm", DEFAULT_COUNTRYMAP_FN, "Country mapping: idx->ISO (JSON)") } // polar2pixel extracts the pixel of image that maps to the given longitude // and latitude. func polar2pixel(img image.Image, long, lat float64) color.Color { x := int((long / 360.0) * float64(img.Bounds().Dx())) y := int((lat / 180.0) * float64(img.Bounds().Dy())) return img.At(x, y) } // loadPNGImage loads a PNG image from the given filename func loadPNGImage(fn string) (img image.Image, err error) { f, err := os.Open(fn) if err != nil { return img, fmt.Errorf("Failed to open image '%s': %s", fn, err) } defer f.Close() img, err = png.Decode(f) if err != nil { return img, fmt.Errorf("Failed to load image '%s': %s", fn, err) } return img, nil } // loadJSONMap loads a JSON string->string map from the given filename. func loadJSONMap(fn string) (ret map[string]string, err error) { f, err := os.Open(fn) if err != nil { return ret, err } defer f.Close() ret = make(map[string]string) decoder := json.NewDecoder(f) err = decoder.Decode(&ret) if err != nil { return ret, err } return ret, err } func main() { flagconf.Parse("GLOBEJS_LOOKUPMAP") log.Printf("Outputfile: %s", config.OutputFN) cmap_image, err := loadPNGImage(config.CountryMapImageFN) if err != nil { log.Fatal("Failed to load countrymap: '%s'", err) } cc, err := loadJSONMap(config.CountryMapFN) if err != nil { log.Fatalf("Failed to load country map '%s': %s", config.CountryMapFN, err) } sm := make([][]string, int(3600/STEP)) countryindex := make(map[string][2]int) for lo := 0; lo < 3600; lo += STEP { //longidx := int(lo / (10 * STEP)) longidx := int(lo / STEP) sm[longidx] = make([]string, int(1800/STEP)) for la := 0; la < 1800; la += STEP { //latidx := int(la / (10 * STEP)) latidx := int(la / STEP) lau := float64(la) / 10 lou := float64(lo) / 10 //log.Printf("%f, %f", lau, lou) col := polar2pixel(cmap_image, lou, lau) r, _, _, _ := col.RGBA() //uint32 ref := int(r >> 8) sref := strconv.Itoa(ref) if val, ok := cc[sref]; ok { //sm[longidx][latidx] = fmt.Sprintf("'%s'", val) sm[longidx][latidx] = val } else { //sm[longidx][latidx] = sref } countryindex[sref] = [2]int{int(lo / 10), int(la / 10)} } } f, err := os.OpenFile(config.OutputFN, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { log.Fatalf("Could not open output file '%s': %s", config.OutputFN, err) } defer f.Close() enc := json.NewEncoder(f) // ########## GEO -> ISO ############# fmt.Fprintf(f, "GLOBE.GEO.geolookup = ") err = enc.Encode(sm) if err != nil { log.Fatalf("Failed to encode geo 2 country mapping: %s", err) } fmt.Fprintf(f, ";\n") // ########## ISO -> GEO ############# fmt.Fprintf(f, "GLOBE.GEO.countrylookup = ") err = enc.Encode(countryindex) if err != nil { log.Fatalf("Failed to encode country 2 geo mapping: %s", err) } fmt.Fprintf(f, ";\n") // ########## ISO -> COUNTRY NAME ############# ccs, err := loadJSONMap(config.CountryNamesFN) if err != nil { log.Fatalf("Failed to load country name '%s': %s", config.CountryNamesFN, err) } fmt.Fprintf(f, "GLOBE.GEO.countrnames = ") err = enc.Encode(ccs) if err != nil { log.Fatalf("Failed to encode iso 2 country-name mapping: %s", err) } fmt.Fprintf(f, ";\n") // ########## IDX -> ISO, ISO -> IDX ############# clist := make([]string, 256) rclist := make(map[string]int) for i, _ := range clist { cidxs := strconv.Itoa(i) if iso, ok := cc[cidxs]; ok { clist[i] = iso if iso == "UK" { //UK/GB fuckup. rclist["GB"] = i } rclist[iso] = i } else { clist[i] = cidxs rclist[cidxs] = i } } fmt.Fprintf(f, "GLOBE.GEO.countrylist = ") enc.Encode(clist) fmt.Fprintf(f, ";\n") fmt.Fprintf(f, "GLOBE.GEO.countryrlist = ") enc.Encode(rclist) fmt.Fprintf(f, ";\n") tmpl := template.New("functions") tmpl, err = tmpl.Parse(TEMPLATE_LOOKUP_MAP) if err != nil { log.Fatalf("Could not parse tempalte: %s", err) } err = tmpl.Execute(f, &struct{ Step int }{STEP}) if err != nil { log.Fatalf("Failed to execute template: %s", err) } } Fixed geo points bug. package main import ( "encoding/json" "flag" "fmt" "github.com/sebkl/flagconf" "image" "image/color" "image/png" "log" "os" "strconv" "text/template" ) const ( DEFAULT_COUNTRYMAP_IMAGE_FN = "atlas/countrymap.png" DEFAULT_OUTPUT_FN = "lookup.js" DEFAULT_COUNTRYNAME_FN = "countrydata/country_names.json" DEFAULT_COUNTRYMAP_FN = "countrydata/country_map.json" STEP = 5 TEMPLATE_LOOKUP_MAP = ` GLOBE.GEO.NO_COUNTRY = ""; GLOBE.GEO.index_to_country = function(idx) { return this.countrylist[idx]; } GLOBE.GEO.country_to_index = function(iso) { return this.countryrlist[iso]; } GLOBE.GEO.lookup_country = function(x,y) { try { var long_idx = Math.round((x+180)*10/{{.Step}}); var lat_idx = Math.round((y+90)*10/{{.Step}}); return this.geolookup[long_idx][lat_idx]; } catch (err) { return undefined; } } GLOBE.GEO.lookup_geo_array = function(iso) { return this.countrylookup[iso.toUpperCase()]; } GLOBE.GEO.lookup_geo_points = function(iso) { var a = this.lookup_geo_array(iso.toUpperCase()); try { var ret = a[Math.round(a.length/2)-1]; return ([ret[0] - 180,ret[1] -90]); } catch (err) { console.log('Cannot lookup geopoints for "'+iso+'": ' + err); return undefined; } } GLOBE.GEO.lookup_countryname = function(iso) { try { return this.countrynames[iso.toUpperCase()]; } catch (err) { console.log('Cannot lookup country name for: '+iso); } } ` ) var config struct { CountryMapImageFN string OutputFN string CountryNamesFN string CountryMapFN string } func init() { flag.StringVar(&config.CountryMapImageFN, "imagemap", DEFAULT_COUNTRYMAP_IMAGE_FN, "Country image map in greyscale: Color->idx (PNG)") flag.StringVar(&config.OutputFN, "o", DEFAULT_OUTPUT_FN, "Output file.") flag.StringVar(&config.CountryNamesFN, "cn", DEFAULT_COUNTRYNAME_FN, "Country name mapping: ISO->name (JSON)") flag.StringVar(&config.CountryMapFN, "cm", DEFAULT_COUNTRYMAP_FN, "Country mapping: idx->ISO (JSON)") } // polar2pixel extracts the pixel of image that maps to the given longitude // and latitude. func polar2pixel(img image.Image, long, lat float64) color.Color { x := int((long / 360.0) * float64(img.Bounds().Dx())) y := int((lat / 180.0) * float64(img.Bounds().Dy())) return img.At(x, y) } // loadPNGImage loads a PNG image from the given filename func loadPNGImage(fn string) (img image.Image, err error) { f, err := os.Open(fn) if err != nil { return img, fmt.Errorf("Failed to open image '%s': %s", fn, err) } defer f.Close() img, err = png.Decode(f) if err != nil { return img, fmt.Errorf("Failed to load image '%s': %s", fn, err) } return img, nil } // loadJSONMap loads a JSON string->string map from the given filename. func loadJSONMap(fn string) (ret map[string]string, err error) { f, err := os.Open(fn) if err != nil { return ret, err } defer f.Close() ret = make(map[string]string) decoder := json.NewDecoder(f) err = decoder.Decode(&ret) if err != nil { return ret, err } return ret, err } func main() { flagconf.Parse("GLOBEJS_LOOKUPMAP") log.Printf("Outputfile: %s", config.OutputFN) cmap_image, err := loadPNGImage(config.CountryMapImageFN) if err != nil { log.Fatal("Failed to load countrymap: '%s'", err) } cc, err := loadJSONMap(config.CountryMapFN) if err != nil { log.Fatalf("Failed to load country map '%s': %s", config.CountryMapFN, err) } sm := make([][]string, int(3600/STEP)) countryindex := make(map[string][][2]int) for lo := 0; lo < 3600; lo += STEP { //longidx := int(lo / (10 * STEP)) longidx := int(lo / STEP) sm[longidx] = make([]string, int(1800/STEP)) for la := 0; la < 1800; la += STEP { //latidx := int(la / (10 * STEP)) latidx := int(la / STEP) lau := float64(la) / 10 lou := float64(lo) / 10 //log.Printf("%f, %f", lau, lou) col := polar2pixel(cmap_image, lou, lau) r, _, _, _ := col.RGBA() //uint32 ref := int(r >> 8) sref := strconv.Itoa(ref) if val, ok := cc[sref]; ok { sref = val } sm[longidx][latidx] = sref countryindex[sref] = append(countryindex[sref], [2]int{int(lo / 10), int(la / 10)}) } } f, err := os.OpenFile(config.OutputFN, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { log.Fatalf("Could not open output file '%s': %s", config.OutputFN, err) } defer f.Close() enc := json.NewEncoder(f) // ########## GEO -> ISO ############# fmt.Fprintf(f, "GLOBE.GEO.geolookup = ") err = enc.Encode(sm) if err != nil { log.Fatalf("Failed to encode geo 2 country mapping: %s", err) } fmt.Fprintf(f, ";\n") // ########## ISO -> GEO ############# fmt.Fprintf(f, "GLOBE.GEO.countrylookup = ") err = enc.Encode(countryindex) if err != nil { log.Fatalf("Failed to encode country 2 geo mapping: %s", err) } fmt.Fprintf(f, ";\n") // ########## ISO -> COUNTRY NAME ############# ccs, err := loadJSONMap(config.CountryNamesFN) if err != nil { log.Fatalf("Failed to load country name '%s': %s", config.CountryNamesFN, err) } fmt.Fprintf(f, "GLOBE.GEO.countrynames = ") err = enc.Encode(ccs) if err != nil { log.Fatalf("Failed to encode iso 2 country-name mapping: %s", err) } fmt.Fprintf(f, ";\n") // ########## IDX -> ISO, ISO -> IDX ############# clist := make([]string, 256) rclist := make(map[string]int) for i, _ := range clist { cidxs := strconv.Itoa(i) if iso, ok := cc[cidxs]; ok { clist[i] = iso if iso == "UK" { //UK/GB fuckup. rclist["GB"] = i } rclist[iso] = i } else { clist[i] = cidxs rclist[cidxs] = i } } fmt.Fprintf(f, "GLOBE.GEO.countrylist = ") enc.Encode(clist) fmt.Fprintf(f, ";\n") fmt.Fprintf(f, "GLOBE.GEO.countryrlist = ") enc.Encode(rclist) fmt.Fprintf(f, ";\n") tmpl := template.New("functions") tmpl, err = tmpl.Parse(TEMPLATE_LOOKUP_MAP) if err != nil { log.Fatalf("Could not parse tempalte: %s", err) } err = tmpl.Execute(f, &struct{ Step int }{STEP}) if err != nil { log.Fatalf("Failed to execute template: %s", err) } }
package sync import ( "fmt" "io" "os" "path/filepath" "github.com/cmars/replican-sync/replican/fs" "github.com/cmars/replican-sync/replican/treegen" "strings" "testing" "github.com/bmizerany/assert" ) // Print a description of the steps that the patch plan will follow. func printPlan(plan *PatchPlan) { for i := 0; i < len(plan.Cmds); i++ { fmt.Printf("%s\n", plan.Cmds[i].String()) } } // Test an actual file patch on the munged file scenario from TestMatchMunge. // Resulting patched file should be identical to the source file. func TestPatch(t *testing.T) { srcPath := "../../testroot/My Music/0 10k 30.mp4" dstPath := filepath.Join(os.TempDir(), "foo.mp4") os.Remove(dstPath) origDstF, err := os.Open("../../testroot/My Music/0 10k 30 munged.mp4") assert.Tf(t, err == nil, "%v", err) dstF, err := os.Create(dstPath) assert.Tf(t, err == nil, "%v", err) _, err = io.Copy(dstF, origDstF) assert.Tf(t, err == nil, "%v", err) origDstF.Close() dstF.Close() patchPlan, err := Patch(srcPath, dstPath) // printPlan(patchPlan) assert.Tf(t, err == nil, "%v", err) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) srcFileInfo, _, err := fs.IndexFile(srcPath) assert.T(t, err == nil) dstFileInfo, _, err := fs.IndexFile(dstPath) assert.Tf(t, err == nil, "%v", err) assert.Equal(t, srcFileInfo.Strong, dstFileInfo.Strong) } // Test the patch planner on two identical directory structures. func TestPatchIdentity(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) assert.T(t, len(patchPlan.Cmds) > 0) for i := 0; i < len(patchPlan.Cmds); i++ { keep := patchPlan.Cmds[0].(*Keep) assert.T(t, strings.HasPrefix(dstpath, keep.Path.Resolve())) } } // Test the matcher on a case where the source file has the same // prefix as destination, but has been appended to. func TestMatchAppend(t *testing.T) { tg := treegen.New() treeSpec := tg.F("bar", tg.B(42, 65537), tg.B(43, 65537)) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) // Try indexing root dir as a file srcFileInfo, srcBlocksInfo, err := fs.IndexFile(srcpath) assert.Tf(t, err != nil, "%v", err) // Ok, for real this time srcFileInfo, srcBlocksInfo, err = fs.IndexFile(filepath.Join(srcpath, "bar")) assert.Tf(t, err == nil, "%v", err) assert.Equal(t, 17, len(srcBlocksInfo)) tg = treegen.New() treeSpec = tg.F("bar", tg.B(42, 65537)) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) _, dstBlocksInfo, err := fs.IndexFile(filepath.Join(dstpath, "bar")) assert.Equal(t, 9, len(dstBlocksInfo)) srcRepo := fs.NewMemRepo() srcFile := srcRepo.AddFile(nil, srcFileInfo, srcBlocksInfo) match, err := MatchFile(srcFile, filepath.Join(dstpath, "bar")) assert.T(t, err == nil, "%v", err) assert.Equal(t, 8, len(match.BlockMatches)) notMatched := match.NotMatched() assert.Equal(t, 1, len(notMatched)) assert.Equal(t, int64(65536), notMatched[0].From) assert.Equal(t, int64(65537+65537), notMatched[0].To) } // Test the patch planner on a case where the source file has the same // prefix as destination, but has been appended to. // Execute the patch plan and check both resulting trees are identical. func TestPatchFileAppend(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537), tg.B(43, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("bar", tg.B(42, 65537))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) complete := false for i, cmd := range patchPlan.Cmds { switch { case i == 0: localTemp, isTemp := cmd.(*LocalTemp) assert.T(t, isTemp) assert.Equal(t, filepath.Join(dstpath, "foo", "bar"), localTemp.Path.Resolve()) case i >= 1 && i <= 8: ltc, isLtc := cmd.(*LocalTempCopy) assert.Tf(t, isLtc, "cmd %d", i) assert.Equal(t, ltc.LocalOffset, ltc.TempOffset) assert.Equal(t, int64(fs.BLOCKSIZE), ltc.Length) assert.Equal(t, int64(0), ltc.LocalOffset%int64(fs.BLOCKSIZE)) case i == 9: stc, isStc := cmd.(*SrcTempCopy) assert.T(t, isStc) assert.Equal(t, int64(65538), stc.Length) case i == 10: _, isRwt := cmd.(*ReplaceWithTemp) assert.T(t, isRwt) complete = true case i > 10: t.Fatalf("too many commands") } } assert.T(t, complete, "missing expected number of commands") failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcRoot := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstRoot := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcRoot.Info().Strong, dstRoot.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } // Test the patch planner on a case where the source file is a shorter, // truncated version of the destination. // Execute the patch plan and check both resulting trees are identical. func TestPatchFileTruncate(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("bar", tg.B(42, 65537), tg.B(43, 65537))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) complete := false for i, cmd := range patchPlan.Cmds { switch { case i == 0: localTemp, isTemp := cmd.(*LocalTemp) assert.T(t, isTemp) assert.Equal(t, filepath.Join(dstpath, "foo", "bar"), localTemp.Path.Resolve()) case i >= 1 && i <= 8: ltc, isLtc := cmd.(*LocalTempCopy) assert.Tf(t, isLtc, "cmd %d", i) assert.Equal(t, ltc.LocalOffset, ltc.TempOffset) assert.Equal(t, int64(fs.BLOCKSIZE), ltc.Length) assert.Equal(t, int64(0), ltc.LocalOffset%int64(fs.BLOCKSIZE)) case i == 9: stc, isStc := cmd.(*SrcTempCopy) assert.T(t, isStc) assert.Equal(t, int64(1), stc.Length) complete = true case i > 10: t.Fatalf("too many commands") } } assert.T(t, complete, "missing expected number of commands") failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcRoot := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstRoot := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcRoot.Info().Strong, dstRoot.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } // Test the patch planner's ability to track adding a bunch of new files. func TestPatchAdd(t *testing.T) { tg := treegen.New() files := []treegen.Generated{} for i := 0; i < 10; i++ { files = append(files, tg.F("", tg.B(int64(42*i), int64(500000*i)))) } treeSpec := tg.D("foo", tg.D("bar", files...)) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(filepath.Join(srcpath, "foo"), fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.D("bar"), tg.D("baz")) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(filepath.Join(dstpath, "foo"), fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) for _, cmd := range patchPlan.Cmds { _, isSfd := cmd.(*SrcFileDownload) assert.T(t, isSfd) } } // Test patch planner on a file rename. Contents remain the same. func TestPatchRenameFileSameDir(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("baz", tg.B(42, 65537))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) assert.Equal(t, 1, len(patchPlan.Cmds)) rename, isRename := patchPlan.Cmds[0].(*Transfer) assert.T(t, isRename) assert.T(t, strings.HasSuffix(rename.From.Resolve(), filepath.Join("foo", "baz"))) assert.T(t, strings.HasSuffix(rename.To.Resolve(), filepath.Join("foo", "bar"))) } // Test patch planner on a file directory restructuring between // source and destination, where files have identical content in both. func TestPatchRenameFileDifferentDir(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 99)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("pancake", tg.F("butter", tg.B(42, 65537)), tg.F("syrup", tg.B(99, 99))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) assert.Equal(t, 2, len(patchPlan.Cmds)) for i := 0; i < len(patchPlan.Cmds); i++ { _, isRename := patchPlan.Cmds[0].(*Transfer) assert.T(t, isRename) } // Now flip patchPlan = NewPatchPlan(dstStore, srcStore) assert.Equal(t, 2, len(patchPlan.Cmds)) for i := 0; i < len(patchPlan.Cmds); i++ { _, isRename := patchPlan.Cmds[0].(*Transfer) assert.T(t, isRename) } } // Test patch planner on case where the source and // destination have a direct conflict in structure. // A path in the source is a directory, path in destination // already contains a file at that location. func TestPatchSimpleDirFileConflict(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 99)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("gloo", tg.B(99, 999))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) assert.Equal(t, 3, len(patchPlan.Cmds)) for i, cmd := range patchPlan.Cmds { switch i { case 0: conflict, is := cmd.(*Conflict) assert.T(t, is) assert.T(t, strings.HasSuffix(conflict.Path.RelPath, filepath.Join("foo", "gloo"))) case 1: copy, is := cmd.(*SrcFileDownload) assert.T(t, is) assert.Equal(t, "beced72da0cf22301e23bdccec61bf9763effd6f", copy.SrcFile.Info().Strong) case 2: copy, is := cmd.(*SrcFileDownload) assert.T(t, is) assert.Equal(t, "764b5f659f70e69d4a87fe6ed138af40be36c514", copy.SrcFile.Info().Strong) } } } func assertNoRelocs(t *testing.T, path string) { d, err := os.Open(path) assert.T(t, err == nil) names, err := d.Readdirnames(0) assert.T(t, err == nil) for _, name := range names { assert.T(t, !strings.HasPrefix(name, "_reloc")) } } // Test patch planner on case where the source and // destination have a direct conflict in structure. // A path in the source is a directory, path in destination // already contains a file at that location. func TestPatchRelocConflict(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 99)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("gloo", tg.B(99, 99))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) assert.Equal(t, 3, len(patchPlan.Cmds)) for i, cmd := range patchPlan.Cmds { switch i { case 0: conflict, is := cmd.(*Conflict) assert.T(t, is) assert.T(t, strings.HasSuffix(conflict.Path.RelPath, filepath.Join("foo", "gloo"))) case 1: copy, is := cmd.(*Transfer) assert.T(t, is) assert.T(t, strings.HasSuffix(copy.From.Resolve(), filepath.Join("foo", "gloo"))) assert.T(t, strings.HasSuffix(copy.To.Resolve(), filepath.Join("foo", "gloo", "bloo"))) case 2: copy, is := cmd.(*SrcFileDownload) assert.T(t, is) assert.Equal(t, "764b5f659f70e69d4a87fe6ed138af40be36c514", copy.SrcFile.Info().Strong) } } failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) assertNoRelocs(t, dstpath) } func TestPatchDepConflict(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 8192), tg.B(100, 10000)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("gloo", tg.B(99, 10000))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) assertNoRelocs(t, dstpath) } func TestPatchWeakCollision(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(6806, 65536))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("bar", tg.B(9869, 65536))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) // Src and dst blocks have same weak checksum assert.Equal(t, (srcStore.Repo().Root().(fs.Dir)).SubDirs()[0].Files()[0].Blocks()[0].Info().Weak, (dstStore.Repo().Root().(fs.Dir)).SubDirs()[0].Files()[0].Blocks()[0].Info().Weak) // Src and dst blocks have different strong checksum srcRoot := srcStore.Repo().Root().(fs.Dir) dstRoot := srcStore.Repo().Root().(fs.Dir) assert.Tf(t, srcRoot.Info().Strong != dstRoot.Info().Strong, "wtf: %v == %v", srcRoot.Info().Strong, dstRoot.Info().Strong) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcDir := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstDir := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcDir.Info().Strong, dstDir.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } func TestPatchRenameScope(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(6806, 65536)), tg.F("baz", tg.B(6806, 65536))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("baz", tg.B(6806, 65536)), tg.F("blop", tg.B(6806, 65536))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcDir := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstDir := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcDir.Info().Strong, dstDir.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } func TestPatchPreserveKeeps(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(6806, 65536)), tg.F("blop", tg.B(6806, 65536))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("baz", tg.B(6806, 65536)), tg.F("blop", tg.B(6806, 65536))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) info, err := os.Stat(filepath.Join(dstpath, "foo", "bar")) assert.T(t, err == nil && info != nil) info, err = os.Stat(filepath.Join(dstpath, "foo", "blop")) assert.T(t, err == nil && info != nil) } func TestClean(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))), tg.D("beth", tg.F("B", tg.B(43, 65537)), tg.F("b", tg.B(43, 65537))), tg.D("jimmy", tg.F("G", tg.B(44, 65537)), tg.F("g", tg.B(44, 65537)))), tg.D("baz", tg.D("uno", tg.F("1", tg.B(1, 65537)), tg.F("I", tg.B(1, 65537))), tg.D("dos", tg.F("2", tg.B(11, 65537)), tg.F("II", tg.B(11, 65537))), tg.D("tres", tg.F("3", tg.B(111, 65537)), tg.F("III", tg.B(111, 65537))))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) onePath := dstStore.Resolve(filepath.Join("foo", "baz", "uno", "1")) _, err = os.Stat(onePath) assert.Tf(t, err == nil, "%v", err) patchPlan := NewPatchPlan(srcStore, dstStore) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil, "%v", failedCmd) assert.Tf(t, err == nil, "%v", err) errors := make(chan os.Error) go func() { patchPlan.Clean(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } onePath = dstStore.Resolve(filepath.Join("foo", "baz", "uno", "1")) _, err = os.Stat(onePath) assert.Tf(t, err != nil, "%v", err) } func TestSetModeNew(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) srcpath := treegen.TestTree(t, treeSpec) os.Chmod(filepath.Join(srcpath, "foo", "bar", "aleph", "A"), 0765) os.Chmod(filepath.Join(srcpath, "foo", "bar"), 0711) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo") dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil, "%v", failedCmd) assert.Tf(t, err == nil, "%v", err) errors := make(chan os.Error) go func() { patchPlan.Clean(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } errors = make(chan os.Error) go func() { patchPlan.SetMode(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } fileinfo, err := os.Stat(filepath.Join(dstpath, "foo", "bar", "aleph", "A")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0765), fileinfo.Permission()) fileinfo, err = os.Stat(filepath.Join(dstpath, "foo", "bar")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0711), fileinfo.Permission()) } func TestSetModeOverwrite(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) srcpath := treegen.TestTree(t, treeSpec) os.Chmod(filepath.Join(srcpath, "foo", "bar", "aleph", "A"), 0765) os.Chmod(filepath.Join(srcpath, "foo", "bar"), 0711) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) dstpath := treegen.TestTree(t, treeSpec) os.Chmod(filepath.Join(dstpath, "foo", "bar", "aleph", "A"), 0600) os.Chmod(filepath.Join(dstpath, "foo", "bar"), 0700) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil, "%v %v", failedCmd, err) assert.Tf(t, err == nil, "%v", err) errors := make(chan os.Error) go func() { patchPlan.Clean(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } errors = make(chan os.Error) go func() { patchPlan.SetMode(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } fileinfo, err := os.Stat(filepath.Join(dstpath, "foo", "bar", "aleph", "A")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0765), fileinfo.Permission()) fileinfo, err = os.Stat(filepath.Join(dstpath, "foo", "bar")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0711), fileinfo.Permission()) } Fix test condition. package sync import ( "fmt" "io" "os" "path/filepath" "github.com/cmars/replican-sync/replican/fs" "github.com/cmars/replican-sync/replican/treegen" "strings" "testing" "github.com/bmizerany/assert" ) // Print a description of the steps that the patch plan will follow. func printPlan(plan *PatchPlan) { for i := 0; i < len(plan.Cmds); i++ { fmt.Printf("%s\n", plan.Cmds[i].String()) } } // Test an actual file patch on the munged file scenario from TestMatchMunge. // Resulting patched file should be identical to the source file. func TestPatch(t *testing.T) { srcPath := "../../testroot/My Music/0 10k 30.mp4" dstPath := filepath.Join(os.TempDir(), "foo.mp4") os.Remove(dstPath) origDstF, err := os.Open("../../testroot/My Music/0 10k 30 munged.mp4") assert.Tf(t, err == nil, "%v", err) dstF, err := os.Create(dstPath) assert.Tf(t, err == nil, "%v", err) _, err = io.Copy(dstF, origDstF) assert.Tf(t, err == nil, "%v", err) origDstF.Close() dstF.Close() patchPlan, err := Patch(srcPath, dstPath) // printPlan(patchPlan) assert.Tf(t, err == nil, "%v", err) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) srcFileInfo, _, err := fs.IndexFile(srcPath) assert.T(t, err == nil) dstFileInfo, _, err := fs.IndexFile(dstPath) assert.Tf(t, err == nil, "%v", err) assert.Equal(t, srcFileInfo.Strong, dstFileInfo.Strong) } // Test the patch planner on two identical directory structures. func TestPatchIdentity(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) assert.T(t, len(patchPlan.Cmds) > 0) for i := 0; i < len(patchPlan.Cmds); i++ { keep := patchPlan.Cmds[0].(*Keep) assert.T(t, strings.HasPrefix(dstpath, keep.Path.Resolve())) } } // Test the matcher on a case where the source file has the same // prefix as destination, but has been appended to. func TestMatchAppend(t *testing.T) { tg := treegen.New() treeSpec := tg.F("bar", tg.B(42, 65537), tg.B(43, 65537)) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) // Try indexing root dir as a file srcFileInfo, srcBlocksInfo, err := fs.IndexFile(srcpath) assert.Tf(t, err != nil, "%v", err) // Ok, for real this time srcFileInfo, srcBlocksInfo, err = fs.IndexFile(filepath.Join(srcpath, "bar")) assert.Tf(t, err == nil, "%v", err) assert.Equal(t, 17, len(srcBlocksInfo)) tg = treegen.New() treeSpec = tg.F("bar", tg.B(42, 65537)) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) _, dstBlocksInfo, err := fs.IndexFile(filepath.Join(dstpath, "bar")) assert.Equal(t, 9, len(dstBlocksInfo)) srcRepo := fs.NewMemRepo() srcFile := srcRepo.AddFile(nil, srcFileInfo, srcBlocksInfo) match, err := MatchFile(srcFile, filepath.Join(dstpath, "bar")) assert.T(t, err == nil, "%v", err) assert.Equal(t, 8, len(match.BlockMatches)) notMatched := match.NotMatched() assert.Equal(t, 1, len(notMatched)) assert.Equal(t, int64(65536), notMatched[0].From) assert.Equal(t, int64(65537+65537), notMatched[0].To) } // Test the patch planner on a case where the source file has the same // prefix as destination, but has been appended to. // Execute the patch plan and check both resulting trees are identical. func TestPatchFileAppend(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537), tg.B(43, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("bar", tg.B(42, 65537))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) complete := false for i, cmd := range patchPlan.Cmds { switch { case i == 0: localTemp, isTemp := cmd.(*LocalTemp) assert.T(t, isTemp) assert.Equal(t, filepath.Join(dstpath, "foo", "bar"), localTemp.Path.Resolve()) case i >= 1 && i <= 8: ltc, isLtc := cmd.(*LocalTempCopy) assert.Tf(t, isLtc, "cmd %d", i) assert.Equal(t, ltc.LocalOffset, ltc.TempOffset) assert.Equal(t, int64(fs.BLOCKSIZE), ltc.Length) assert.Equal(t, int64(0), ltc.LocalOffset%int64(fs.BLOCKSIZE)) case i == 9: stc, isStc := cmd.(*SrcTempCopy) assert.T(t, isStc) assert.Equal(t, int64(65538), stc.Length) case i == 10: _, isRwt := cmd.(*ReplaceWithTemp) assert.T(t, isRwt) complete = true case i > 10: t.Fatalf("too many commands") } } assert.T(t, complete, "missing expected number of commands") failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcRoot := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstRoot := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcRoot.Info().Strong, dstRoot.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } // Test the patch planner on a case where the source file is a shorter, // truncated version of the destination. // Execute the patch plan and check both resulting trees are identical. func TestPatchFileTruncate(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("bar", tg.B(42, 65537), tg.B(43, 65537))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) complete := false for i, cmd := range patchPlan.Cmds { switch { case i == 0: localTemp, isTemp := cmd.(*LocalTemp) assert.T(t, isTemp) assert.Equal(t, filepath.Join(dstpath, "foo", "bar"), localTemp.Path.Resolve()) case i >= 1 && i <= 8: ltc, isLtc := cmd.(*LocalTempCopy) assert.Tf(t, isLtc, "cmd %d", i) assert.Equal(t, ltc.LocalOffset, ltc.TempOffset) assert.Equal(t, int64(fs.BLOCKSIZE), ltc.Length) assert.Equal(t, int64(0), ltc.LocalOffset%int64(fs.BLOCKSIZE)) case i == 9: stc, isStc := cmd.(*SrcTempCopy) assert.T(t, isStc) assert.Equal(t, int64(1), stc.Length) complete = true case i > 10: t.Fatalf("too many commands") } } assert.T(t, complete, "missing expected number of commands") failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcRoot := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstRoot := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcRoot.Info().Strong, dstRoot.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } // Test the patch planner's ability to track adding a bunch of new files. func TestPatchAdd(t *testing.T) { tg := treegen.New() files := []treegen.Generated{} for i := 0; i < 10; i++ { files = append(files, tg.F("", tg.B(int64(42*i), int64(500000*i)))) } treeSpec := tg.D("foo", tg.D("bar", files...)) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(filepath.Join(srcpath, "foo"), fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.D("bar"), tg.D("baz")) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(filepath.Join(dstpath, "foo"), fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) for _, cmd := range patchPlan.Cmds { _, isSfd := cmd.(*SrcFileDownload) assert.T(t, isSfd) } } // Test patch planner on a file rename. Contents remain the same. func TestPatchRenameFileSameDir(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(42, 65537))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("baz", tg.B(42, 65537))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) assert.Equal(t, 1, len(patchPlan.Cmds)) rename, isRename := patchPlan.Cmds[0].(*Transfer) assert.T(t, isRename) assert.T(t, strings.HasSuffix(rename.From.Resolve(), filepath.Join("foo", "baz"))) assert.T(t, strings.HasSuffix(rename.To.Resolve(), filepath.Join("foo", "bar"))) } // Test patch planner on a file directory restructuring between // source and destination, where files have identical content in both. func TestPatchRenameFileDifferentDir(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 99)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("pancake", tg.F("butter", tg.B(42, 65537)), tg.F("syrup", tg.B(99, 99))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) assert.Equal(t, 2, len(patchPlan.Cmds)) for i := 0; i < len(patchPlan.Cmds); i++ { _, isRename := patchPlan.Cmds[0].(*Transfer) assert.T(t, isRename) } // Now flip patchPlan = NewPatchPlan(dstStore, srcStore) assert.Equal(t, 2, len(patchPlan.Cmds)) for i := 0; i < len(patchPlan.Cmds); i++ { _, isRename := patchPlan.Cmds[0].(*Transfer) assert.T(t, isRename) } } // Test patch planner on case where the source and // destination have a direct conflict in structure. // A path in the source is a directory, path in destination // already contains a file at that location. func TestPatchSimpleDirFileConflict(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 99)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("gloo", tg.B(99, 999))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) assert.Equal(t, 3, len(patchPlan.Cmds)) for i, cmd := range patchPlan.Cmds { switch i { case 0: conflict, is := cmd.(*Conflict) assert.T(t, is) assert.T(t, strings.HasSuffix(conflict.Path.RelPath, filepath.Join("foo", "gloo"))) case 1: copy, is := cmd.(*SrcFileDownload) assert.T(t, is) assert.Equal(t, "beced72da0cf22301e23bdccec61bf9763effd6f", copy.SrcFile.Info().Strong) case 2: copy, is := cmd.(*SrcFileDownload) assert.T(t, is) assert.Equal(t, "764b5f659f70e69d4a87fe6ed138af40be36c514", copy.SrcFile.Info().Strong) } } } func assertNoRelocs(t *testing.T, path string) { d, err := os.Open(path) assert.T(t, err == nil) names, err := d.Readdirnames(0) assert.T(t, err == nil) for _, name := range names { assert.T(t, !strings.HasPrefix(name, "_reloc")) } } // Test patch planner on case where the source and // destination have a direct conflict in structure. // A path in the source is a directory, path in destination // already contains a file at that location. func TestPatchRelocConflict(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 99)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("gloo", tg.B(99, 99))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) assert.Equal(t, 3, len(patchPlan.Cmds)) for i, cmd := range patchPlan.Cmds { switch i { case 0: conflict, is := cmd.(*Conflict) assert.T(t, is) assert.T(t, strings.HasSuffix(conflict.Path.RelPath, filepath.Join("foo", "gloo"))) case 1: copy, is := cmd.(*Transfer) assert.T(t, is) assert.T(t, strings.HasSuffix(copy.From.Resolve(), filepath.Join("foo", "gloo"))) assert.T(t, strings.HasSuffix(copy.To.Resolve(), filepath.Join("foo", "gloo", "bloo"))) case 2: copy, is := cmd.(*SrcFileDownload) assert.T(t, is) assert.Equal(t, "764b5f659f70e69d4a87fe6ed138af40be36c514", copy.SrcFile.Info().Strong) } } failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) assertNoRelocs(t, dstpath) } func TestPatchDepConflict(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("gloo", tg.F("bloo", tg.B(99, 8192), tg.B(100, 10000)), tg.D("groo", tg.D("snoo", tg.F("bar", tg.B(42, 65537)))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("gloo", tg.B(99, 10000))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) assertNoRelocs(t, dstpath) } func TestPatchWeakCollision(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(6806, 65536))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("bar", tg.B(9869, 65536))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) // Src and dst blocks have same weak checksum assert.Equal(t, (srcStore.Repo().Root().(fs.Dir)).SubDirs()[0].Files()[0].Blocks()[0].Info().Weak, (dstStore.Repo().Root().(fs.Dir)).SubDirs()[0].Files()[0].Blocks()[0].Info().Weak) // Src and dst blocks have different strong checksum srcRoot := srcStore.Repo().Root().(fs.Dir) dstRoot := dstStore.Repo().Root().(fs.Dir) assert.Tf(t, srcRoot.Info().Strong != dstRoot.Info().Strong, "wtf: %v == %v", srcRoot.Info().Strong, dstRoot.Info().Strong) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcDir := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstDir := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcDir.Info().Strong, dstDir.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } func TestPatchRenameScope(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(6806, 65536)), tg.F("baz", tg.B(6806, 65536))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("baz", tg.B(6806, 65536)), tg.F("blop", tg.B(6806, 65536))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) errorChan := make(chan os.Error) go func() { srcDir := fs.IndexDir(srcpath, fs.NewMemRepo(), errorChan) dstDir := fs.IndexDir(dstpath, fs.NewMemRepo(), errorChan) assert.Equal(t, srcDir.Info().Strong, dstDir.Info().Strong) close(errorChan) }() for err := range errorChan { assert.Tf(t, err == nil, "%v", err) } } func TestPatchPreserveKeeps(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.F("bar", tg.B(6806, 65536)), tg.F("blop", tg.B(6806, 65536))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.F("baz", tg.B(6806, 65536)), tg.F("blop", tg.B(6806, 65536))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) // printPlan(patchPlan) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil && err == nil, "%v: %v", failedCmd, err) info, err := os.Stat(filepath.Join(dstpath, "foo", "bar")) assert.T(t, err == nil && info != nil) info, err = os.Stat(filepath.Join(dstpath, "foo", "blop")) assert.T(t, err == nil && info != nil) } func TestClean(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) srcpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))), tg.D("beth", tg.F("B", tg.B(43, 65537)), tg.F("b", tg.B(43, 65537))), tg.D("jimmy", tg.F("G", tg.B(44, 65537)), tg.F("g", tg.B(44, 65537)))), tg.D("baz", tg.D("uno", tg.F("1", tg.B(1, 65537)), tg.F("I", tg.B(1, 65537))), tg.D("dos", tg.F("2", tg.B(11, 65537)), tg.F("II", tg.B(11, 65537))), tg.D("tres", tg.F("3", tg.B(111, 65537)), tg.F("III", tg.B(111, 65537))))) dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) onePath := dstStore.Resolve(filepath.Join("foo", "baz", "uno", "1")) _, err = os.Stat(onePath) assert.Tf(t, err == nil, "%v", err) patchPlan := NewPatchPlan(srcStore, dstStore) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil, "%v", failedCmd) assert.Tf(t, err == nil, "%v", err) errors := make(chan os.Error) go func() { patchPlan.Clean(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } onePath = dstStore.Resolve(filepath.Join("foo", "baz", "uno", "1")) _, err = os.Stat(onePath) assert.Tf(t, err != nil, "%v", err) } func TestSetModeNew(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) srcpath := treegen.TestTree(t, treeSpec) os.Chmod(filepath.Join(srcpath, "foo", "bar", "aleph", "A"), 0765) os.Chmod(filepath.Join(srcpath, "foo", "bar"), 0711) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo") dstpath := treegen.TestTree(t, treeSpec) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil, "%v", failedCmd) assert.Tf(t, err == nil, "%v", err) errors := make(chan os.Error) go func() { patchPlan.Clean(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } errors = make(chan os.Error) go func() { patchPlan.SetMode(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } fileinfo, err := os.Stat(filepath.Join(dstpath, "foo", "bar", "aleph", "A")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0765), fileinfo.Permission()) fileinfo, err = os.Stat(filepath.Join(dstpath, "foo", "bar")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0711), fileinfo.Permission()) } func TestSetModeOverwrite(t *testing.T) { tg := treegen.New() treeSpec := tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) srcpath := treegen.TestTree(t, treeSpec) os.Chmod(filepath.Join(srcpath, "foo", "bar", "aleph", "A"), 0765) os.Chmod(filepath.Join(srcpath, "foo", "bar"), 0711) defer os.RemoveAll(srcpath) srcStore, err := fs.NewLocalStore(srcpath, fs.NewMemRepo()) assert.T(t, err == nil) tg = treegen.New() treeSpec = tg.D("foo", tg.D("bar", tg.D("aleph", tg.F("A", tg.B(42, 65537)), tg.F("a", tg.B(42, 65537))))) dstpath := treegen.TestTree(t, treeSpec) os.Chmod(filepath.Join(dstpath, "foo", "bar", "aleph", "A"), 0600) os.Chmod(filepath.Join(dstpath, "foo", "bar"), 0700) defer os.RemoveAll(dstpath) dstStore, err := fs.NewLocalStore(dstpath, fs.NewMemRepo()) assert.T(t, err == nil) patchPlan := NewPatchPlan(srcStore, dstStore) failedCmd, err := patchPlan.Exec() assert.Tf(t, failedCmd == nil, "%v %v", failedCmd, err) assert.Tf(t, err == nil, "%v", err) errors := make(chan os.Error) go func() { patchPlan.Clean(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } errors = make(chan os.Error) go func() { patchPlan.SetMode(errors) close(errors) }() for err := range errors { assert.Tf(t, err == nil, "%v", err) } fileinfo, err := os.Stat(filepath.Join(dstpath, "foo", "bar", "aleph", "A")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0765), fileinfo.Permission()) fileinfo, err = os.Stat(filepath.Join(dstpath, "foo", "bar")) assert.T(t, fileinfo != nil) assert.Equal(t, uint32(0711), fileinfo.Permission()) }
// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !nofilesystem package collector import ( "bufio" "fmt" "io" "os" "strings" "sync" "time" "github.com/prometheus/common/log" "golang.org/x/sys/unix" ) const ( defIgnoredMountPoints = "^/(dev|proc|sys|var/lib/docker/.+)($|/)" defIgnoredFSTypes = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$" mountTimeout = 30 * time.Second ) var stuckMounts = make(map[string]struct{}) var stuckMountsMtx = &sync.Mutex{} // GetStats returns filesystem stats. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { mps, err := mountPointDetails() if err != nil { return nil, err } stats := []filesystemStats{} for _, labels := range mps { if c.ignoredMountPointsPattern.MatchString(labels.mountPoint) { log.Debugf("Ignoring mount point: %s", labels.mountPoint) continue } if c.ignoredFSTypesPattern.MatchString(labels.fsType) { log.Debugf("Ignoring fs type: %s", labels.fsType) continue } stuckMountsMtx.Lock() if _, ok := stuckMounts[labels.mountPoint]; ok { stats = append(stats, filesystemStats{ labels: labels, deviceError: 1, }) log.Debugf("Mount point %q is in an unresponsive state", labels.mountPoint) stuckMountsMtx.Unlock() continue } stuckMountsMtx.Unlock() // The success channel is used do tell the "watcher" that the stat // finished successfully. The channel is closed on success. success := make(chan struct{}) go stuckMountWatcher(labels.mountPoint, success) buf := new(unix.Statfs_t) err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf) stuckMountsMtx.Lock() close(success) // If the mount has been marked as stuck, unmark it and log it's recovery. if _, ok := stuckMounts[labels.mountPoint]; ok { log.Debugf("Mount point %q has recovered, monitoring will resume", labels.mountPoint) delete(stuckMounts, labels.mountPoint) } stuckMountsMtx.Unlock() if err != nil { stats = append(stats, filesystemStats{ labels: labels, deviceError: 1, }) log.Debugf("Error on statfs() system call for %q: %s", rootfsFilePath(labels.mountPoint), err) continue } var ro float64 for _, option := range strings.Split(labels.options, ",") { if option == "ro" { ro = 1 break } } stats = append(stats, filesystemStats{ labels: labels, size: float64(buf.Blocks) * float64(buf.Bsize), free: float64(buf.Bfree) * float64(buf.Bsize), avail: float64(buf.Bavail) * float64(buf.Bsize), files: float64(buf.Files), filesFree: float64(buf.Ffree), ro: ro, }) } return stats, nil } // stuckMountWatcher listens on the given success channel and if the channel closes // then the watcher does nothing. If instead the timeout is reached, the // mount point that is being watched is marked as stuck. func stuckMountWatcher(mountPoint string, success chan struct{}) { select { case <-success: // Success case <-time.After(mountTimeout): // Timed out, mark mount as stuck stuckMountsMtx.Lock() select { case <-success: // Success came in just after the timeout was reached, don't label the mount as stuck default: log.Debugf("Mount point %q timed out, it is being labeled as stuck and will not be monitored", mountPoint) stuckMounts[mountPoint] = struct{}{} } stuckMountsMtx.Unlock() } } func mountPointDetails() ([]filesystemLabels, error) { file, err := os.Open(procFilePath("1/mounts")) if os.IsNotExist(err) { // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid. log.Debugf("Got %q reading root mounts, falling back to system mounts", err) file, err = os.Open(procFilePath("mounts")) } if err != nil { return nil, err } defer file.Close() return parseFilesystemLabels(file) } func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) { var filesystems []filesystemLabels scanner := bufio.NewScanner(r) for scanner.Scan() { parts := strings.Fields(scanner.Text()) if len(parts) < 4 { return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text()) } // Ensure we handle the translation of \040 and \011 // as per fstab(5). parts[1] = strings.Replace(parts[1], "\\040", " ", -1) parts[1] = strings.Replace(parts[1], "\\011", "\t", -1) filesystems = append(filesystems, filesystemLabels{ device: parts[0], mountPoint: rootfsStripPrefix(parts[1]), fsType: parts[2], options: parts[3], }) } return filesystems, scanner.Err() } Add a flag to adjust mount timeout Signed-off-by: Mark Knapp <418cb9768da76e83a917936e8a102e555d21ab2d@hudson-trading.com> // Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !nofilesystem package collector import ( "bufio" "fmt" "io" "os" "strings" "sync" "time" "github.com/prometheus/common/log" "golang.org/x/sys/unix" kingpin "gopkg.in/alecthomas/kingpin.v2" ) const ( defIgnoredMountPoints = "^/(dev|proc|sys|var/lib/docker/.+)($|/)" defIgnoredFSTypes = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$" ) var mountTimeout = kingpin.Flag("collector.filesystem.mount-timeout", "how long to wait for a mount to respond before marking it as stale"). Hidden().Default("5s").Duration() var stuckMounts = make(map[string]struct{}) var stuckMountsMtx = &sync.Mutex{} // GetStats returns filesystem stats. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { mps, err := mountPointDetails() if err != nil { return nil, err } stats := []filesystemStats{} for _, labels := range mps { if c.ignoredMountPointsPattern.MatchString(labels.mountPoint) { log.Debugf("Ignoring mount point: %s", labels.mountPoint) continue } if c.ignoredFSTypesPattern.MatchString(labels.fsType) { log.Debugf("Ignoring fs type: %s", labels.fsType) continue } stuckMountsMtx.Lock() if _, ok := stuckMounts[labels.mountPoint]; ok { stats = append(stats, filesystemStats{ labels: labels, deviceError: 1, }) log.Debugf("Mount point %q is in an unresponsive state", labels.mountPoint) stuckMountsMtx.Unlock() continue } stuckMountsMtx.Unlock() // The success channel is used do tell the "watcher" that the stat // finished successfully. The channel is closed on success. success := make(chan struct{}) go stuckMountWatcher(labels.mountPoint, success) buf := new(unix.Statfs_t) err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf) stuckMountsMtx.Lock() close(success) // If the mount has been marked as stuck, unmark it and log it's recovery. if _, ok := stuckMounts[labels.mountPoint]; ok { log.Debugf("Mount point %q has recovered, monitoring will resume", labels.mountPoint) delete(stuckMounts, labels.mountPoint) } stuckMountsMtx.Unlock() if err != nil { stats = append(stats, filesystemStats{ labels: labels, deviceError: 1, }) log.Debugf("Error on statfs() system call for %q: %s", rootfsFilePath(labels.mountPoint), err) continue } var ro float64 for _, option := range strings.Split(labels.options, ",") { if option == "ro" { ro = 1 break } } stats = append(stats, filesystemStats{ labels: labels, size: float64(buf.Blocks) * float64(buf.Bsize), free: float64(buf.Bfree) * float64(buf.Bsize), avail: float64(buf.Bavail) * float64(buf.Bsize), files: float64(buf.Files), filesFree: float64(buf.Ffree), ro: ro, }) } return stats, nil } // stuckMountWatcher listens on the given success channel and if the channel closes // then the watcher does nothing. If instead the timeout is reached, the // mount point that is being watched is marked as stuck. func stuckMountWatcher(mountPoint string, success chan struct{}) { select { case <-success: // Success case <-time.After(*mountTimeout): // Timed out, mark mount as stuck stuckMountsMtx.Lock() select { case <-success: // Success came in just after the timeout was reached, don't label the mount as stuck default: log.Debugf("Mount point %q timed out, it is being labeled as stuck and will not be monitored", mountPoint) stuckMounts[mountPoint] = struct{}{} } stuckMountsMtx.Unlock() } } func mountPointDetails() ([]filesystemLabels, error) { file, err := os.Open(procFilePath("1/mounts")) if os.IsNotExist(err) { // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid. log.Debugf("Got %q reading root mounts, falling back to system mounts", err) file, err = os.Open(procFilePath("mounts")) } if err != nil { return nil, err } defer file.Close() return parseFilesystemLabels(file) } func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) { var filesystems []filesystemLabels scanner := bufio.NewScanner(r) for scanner.Scan() { parts := strings.Fields(scanner.Text()) if len(parts) < 4 { return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text()) } // Ensure we handle the translation of \040 and \011 // as per fstab(5). parts[1] = strings.Replace(parts[1], "\\040", " ", -1) parts[1] = strings.Replace(parts[1], "\\011", "\t", -1) filesystems = append(filesystems, filesystemLabels{ device: parts[0], mountPoint: rootfsStripPrefix(parts[1]), fsType: parts[2], options: parts[3], }) } return filesystems, scanner.Err() }
package pack import ( "github.com/git-lfs/git-lfs/errors" ) // ChainDelta represents a "delta" component of a delta-base chain. type ChainDelta struct { // Base is the base delta-base chain that this delta should be applied // to. It can be a ChainBase in the simple case, or it can itself be a // ChainDelta, which resolves against another ChainBase, when the // delta-base chain is of length greater than 2. base Chain // delta is the set of copy/add instructions to apply on top of the // base. delta []byte } // Unpack applies the delta operation to the previous delta-base chain, "base". // // If any of the delta-base instructions were invalid, an error will be // returned. func (d *ChainDelta) Unpack() ([]byte, error) { base, err := d.base.Unpack() if err != nil { return nil, err } return patch(base, d.delta) } // Type returns the type of the base of the delta-base chain. func (d *ChainDelta) Type() PackedObjectType { return d.base.Type() } // patch applies the delta instructions in "delta" to the base given as "base". // It returns the result of applying those patch instructions to base, but does // not modify base itself. // // If any of the delta instructions were malformed, or otherwise could not be // applied to the given base, an error will returned, along with an empty set of // data. func patch(base, delta []byte) ([]byte, error) { srcSize, pos := patchDeltaHeader(delta, 0) if srcSize != int64(len(base)) { // The header of the delta gives the size of the source contents // that it is a patch over. // // If this does not match with the srcSize, return an error // early so as to avoid a possible bounds error below. return nil, errors.New("git/odb/pack: invalid delta data") } var dest []byte // The remainder of the delta header contains the destination size, and // moves the "pos" offset to the correct position to begin the set of // delta instructions. destSize, pos := patchDeltaHeader(delta, pos) for pos < len(delta) { c := int(delta[pos]) pos += 1 if c&0x80 != 0 { // If the most significant bit (MSB, at position 0x80) // is set, this is a copy instruction. Advance the // position one byte backwards, and initialize variables // for the copy offset and size instructions. pos -= 1 var co, cs int // The lower-half of "c" (0000 1111) defines a "bitmask" // for the copy offset. if c&0x1 != 0 { pos += 1 co = int(delta[pos]) } if c&0x2 != 0 { pos += 1 co |= (int(delta[pos]) << 8) } if c&0x4 != 0 { pos += 1 co |= (int(delta[pos]) << 16) } if c&0x8 != 0 { pos += 1 co |= (int(delta[pos]) << 24) } // The upper-half of "c" (1111 0000) defines a "bitmask" // for the size of the copy instruction. if c&0x10 != 0 { pos += 1 cs = int(delta[pos]) } if c&0x20 != 0 { pos += 1 cs |= (int(delta[pos]) << 8) } if c&0x40 != 0 { pos += 1 cs |= (int(delta[pos]) << 16) } if cs == 0 { // If the copy size is zero, we assume that it // is the next whole number after the max uint32 // value. cs = 0x10000 } pos += 1 // Once we have the copy offset and length defined, copy // that number of bytes from the base into the // destination. Since we are copying from the base and // not the delta, the position into the delta ("pos") // need not be updated. dest = append(dest, base[co:co+cs]...) } else if c != 0 { // If the most significant bit (MSB) is _not_ set, we // instead process a copy instruction, where "c" is the // number of successive bytes in the delta patch to add // to the output. // // Copy the bytes and increment the read pointer // forward. dest = append(dest, delta[pos:int(pos)+c]...) pos += int(c) } else { // Otherwise, "c" is 0, and is an invalid delta // instruction. // // Return immediately. return nil, errors.New( "git/odb/pack: invalid delta data") } } if destSize != int64(len(dest)) { // If after patching the delta against the base, the destination // size is different than the expected destination size, we have // an invalid set of patch instructions. // // Return immediately. return nil, errors.New("git/odb/pack: invalid delta data") } return dest, nil } // patchDeltaHeader examines the header within delta at the given offset, and // returns the size encoded within it, as well as the ending offset where begins // the next header, or the patch instructions. func patchDeltaHeader(delta []byte, pos int) (size int64, end int) { var shift uint var c int64 for shift == 0 || c&0x80 != 0 { if len(delta) <= pos { panic("git/odb/pack: invalid delta header") } c = int64(delta[pos]) pos++ size |= (c & 0x7f) << shift shift += 7 } return size, pos } git/odb/pack: prevent unnecessary runtime.memmove Running 'go tool pprof' in CPU mode while Git LFS is running a migration against a large repository shows that 4.73% percent of the CPU time is spent in a function called 'runtime.memmove'. This is the function called when append()-ing to a slice causes the slice to grow, and memory occupied by the existing slice must be moved into a different contiguous group. ``` $ go tool pprof $(which git-lfs) git-lfs-1504820145.pprof dot File: git-lfs Type: cpu Time: Sep 7, 2017 at 5:35pm (EDT) Duration: 35.17s, Total samples = 30.42s (86.49%) Entering interactive mode (type "help" for commands, "o" for options) (pprof) list runtime\.memmove Total: 30.42s ROUTINE ======================== runtime.memmove in ... 1.44s 1.44s (flat, cum) 4.73% of Total . . 30:// void runtime·memmove(void*, void*, uintptr) . . 31:TEXT runtime·memmove(SB), NOSPLIT, $0-24 . . 32: ... ``` The implementation of 'runtime.memmove' is [fairly complex][1] and takes a relatively long amount of time to execute. Unfortunately, the 'patch' function in package 'git/odb/pack' is one of the smaller contributors: ``` (pprof) tree runtime\.memmove ----------------------------------------------------------+------------- 0.01s 100% | git/odb/pack.patch 0 0% 4.73% 0.01s 0.033% | runtime.growslice 0.01s 100% | runtime.gcAssistAlloc ----------------------------------------------------------+------------- ``` That said, the delta instructions do hint at the size of the patched result of applying 'delta' to 'base', which we can use to eagerly allocate a contiguous block of memory for. [1]: https://github.com/golang/go/blob/go1.9/src/runtime/memmove_amd64.s package pack import ( "github.com/git-lfs/git-lfs/errors" ) // ChainDelta represents a "delta" component of a delta-base chain. type ChainDelta struct { // Base is the base delta-base chain that this delta should be applied // to. It can be a ChainBase in the simple case, or it can itself be a // ChainDelta, which resolves against another ChainBase, when the // delta-base chain is of length greater than 2. base Chain // delta is the set of copy/add instructions to apply on top of the // base. delta []byte } // Unpack applies the delta operation to the previous delta-base chain, "base". // // If any of the delta-base instructions were invalid, an error will be // returned. func (d *ChainDelta) Unpack() ([]byte, error) { base, err := d.base.Unpack() if err != nil { return nil, err } return patch(base, d.delta) } // Type returns the type of the base of the delta-base chain. func (d *ChainDelta) Type() PackedObjectType { return d.base.Type() } // patch applies the delta instructions in "delta" to the base given as "base". // It returns the result of applying those patch instructions to base, but does // not modify base itself. // // If any of the delta instructions were malformed, or otherwise could not be // applied to the given base, an error will returned, along with an empty set of // data. func patch(base, delta []byte) ([]byte, error) { srcSize, pos := patchDeltaHeader(delta, 0) if srcSize != int64(len(base)) { // The header of the delta gives the size of the source contents // that it is a patch over. // // If this does not match with the srcSize, return an error // early so as to avoid a possible bounds error below. return nil, errors.New("git/odb/pack: invalid delta data") } // The remainder of the delta header contains the destination size, and // moves the "pos" offset to the correct position to begin the set of // delta instructions. destSize, pos := patchDeltaHeader(delta, pos) dest := make([]byte, 0, destSize) for pos < len(delta) { c := int(delta[pos]) pos += 1 if c&0x80 != 0 { // If the most significant bit (MSB, at position 0x80) // is set, this is a copy instruction. Advance the // position one byte backwards, and initialize variables // for the copy offset and size instructions. pos -= 1 var co, cs int // The lower-half of "c" (0000 1111) defines a "bitmask" // for the copy offset. if c&0x1 != 0 { pos += 1 co = int(delta[pos]) } if c&0x2 != 0 { pos += 1 co |= (int(delta[pos]) << 8) } if c&0x4 != 0 { pos += 1 co |= (int(delta[pos]) << 16) } if c&0x8 != 0 { pos += 1 co |= (int(delta[pos]) << 24) } // The upper-half of "c" (1111 0000) defines a "bitmask" // for the size of the copy instruction. if c&0x10 != 0 { pos += 1 cs = int(delta[pos]) } if c&0x20 != 0 { pos += 1 cs |= (int(delta[pos]) << 8) } if c&0x40 != 0 { pos += 1 cs |= (int(delta[pos]) << 16) } if cs == 0 { // If the copy size is zero, we assume that it // is the next whole number after the max uint32 // value. cs = 0x10000 } pos += 1 // Once we have the copy offset and length defined, copy // that number of bytes from the base into the // destination. Since we are copying from the base and // not the delta, the position into the delta ("pos") // need not be updated. dest = append(dest, base[co:co+cs]...) } else if c != 0 { // If the most significant bit (MSB) is _not_ set, we // instead process a copy instruction, where "c" is the // number of successive bytes in the delta patch to add // to the output. // // Copy the bytes and increment the read pointer // forward. dest = append(dest, delta[pos:int(pos)+c]...) pos += int(c) } else { // Otherwise, "c" is 0, and is an invalid delta // instruction. // // Return immediately. return nil, errors.New( "git/odb/pack: invalid delta data") } } if destSize != int64(len(dest)) { // If after patching the delta against the base, the destination // size is different than the expected destination size, we have // an invalid set of patch instructions. // // Return immediately. return nil, errors.New("git/odb/pack: invalid delta data") } return dest, nil } // patchDeltaHeader examines the header within delta at the given offset, and // returns the size encoded within it, as well as the ending offset where begins // the next header, or the patch instructions. func patchDeltaHeader(delta []byte, pos int) (size int64, end int) { var shift uint var c int64 for shift == 0 || c&0x80 != 0 { if len(delta) <= pos { panic("git/odb/pack: invalid delta header") } c = int64(delta[pos]) pos++ size |= (c & 0x7f) << shift shift += 7 } return size, pos }
// +build selinux,linux package selinux import ( "bufio" "bytes" "crypto/rand" "encoding/binary" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "regexp" "strconv" "strings" "sync" "github.com/opencontainers/selinux/pkg/pwalk" "github.com/pkg/errors" "github.com/willf/bitset" "golang.org/x/sys/unix" ) const ( minSensLen = 2 contextFile = "/usr/share/containers/selinux/contexts" selinuxDir = "/etc/selinux/" selinuxConfig = selinuxDir + "config" selinuxfsMount = "/sys/fs/selinux" selinuxTypeTag = "SELINUXTYPE" selinuxTag = "SELINUX" xattrNameSelinux = "security.selinux" ) type selinuxState struct { enabledSet bool enabled bool selinuxfsOnce sync.Once selinuxfs string mcsList map[string]bool sync.Mutex } type level struct { sens uint cats *bitset.BitSet } type mlsRange struct { low *level high *level } type levelItem byte const ( sensitivity levelItem = 's' category levelItem = 'c' ) var ( assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) readOnlyFileLabel string state = selinuxState{ mcsList: make(map[string]bool), } // for attrPath() attrPathOnce sync.Once haveThreadSelf bool ) func (s *selinuxState) setEnable(enabled bool) bool { s.Lock() defer s.Unlock() s.enabledSet = true s.enabled = enabled return s.enabled } func (s *selinuxState) getEnabled() bool { s.Lock() enabled := s.enabled enabledSet := s.enabledSet s.Unlock() if enabledSet { return enabled } enabled = false if fs := getSelinuxMountPoint(); fs != "" { if con, _ := CurrentLabel(); con != "kernel" { enabled = true } } return s.setEnable(enabled) } // setDisabled disables SELinux support for the package func setDisabled() { state.setEnable(false) } func verifySELinuxfsMount(mnt string) bool { var buf unix.Statfs_t for { err := unix.Statfs(mnt, &buf) if err == nil { break } if err == unix.EAGAIN { continue } return false } if uint32(buf.Type) != uint32(unix.SELINUX_MAGIC) { return false } if (buf.Flags & unix.ST_RDONLY) != 0 { return false } return true } func findSELinuxfs() string { // fast path: check the default mount first if verifySELinuxfsMount(selinuxfsMount) { return selinuxfsMount } // check if selinuxfs is available before going the slow path fs, err := ioutil.ReadFile("/proc/filesystems") if err != nil { return "" } if !bytes.Contains(fs, []byte("\tselinuxfs\n")) { return "" } // slow path: try to find among the mounts f, err := os.Open("/proc/self/mountinfo") if err != nil { return "" } defer f.Close() scanner := bufio.NewScanner(f) for { mnt := findSELinuxfsMount(scanner) if mnt == "" { // error or not found return "" } if verifySELinuxfsMount(mnt) { return mnt } } } // findSELinuxfsMount returns a next selinuxfs mount point found, // if there is one, or an empty string in case of EOF or error. func findSELinuxfsMount(s *bufio.Scanner) string { for s.Scan() { txt := s.Bytes() // The first field after - is fs type. // Safe as spaces in mountpoints are encoded as \040 if !bytes.Contains(txt, []byte(" - selinuxfs ")) { continue } const mPos = 5 // mount point is 5th field fields := bytes.SplitN(txt, []byte(" "), mPos+1) if len(fields) < mPos+1 { continue } return string(fields[mPos-1]) } return "" } func (s *selinuxState) getSELinuxfs() string { s.selinuxfsOnce.Do(func() { s.selinuxfs = findSELinuxfs() }) return s.selinuxfs } // getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs // filesystem or an empty string if no mountpoint is found. Selinuxfs is // a proc-like pseudo-filesystem that exposes the SELinux policy API to // processes. The existence of an selinuxfs mount is used to determine // whether SELinux is currently enabled or not. func getSelinuxMountPoint() string { return state.getSELinuxfs() } // getEnabled returns whether SELinux is currently enabled. func getEnabled() bool { return state.getEnabled() } func readConfig(target string) string { var ( val, key string bufin *bufio.Reader ) in, err := os.Open(selinuxConfig) if err != nil { return "" } defer in.Close() bufin = bufio.NewReader(in) for done := false; !done; { var line string if line, err = bufin.ReadString('\n'); err != nil { if err != io.EOF { return "" } done = true } line = strings.TrimSpace(line) if len(line) == 0 { // Skip blank lines continue } if line[0] == ';' || line[0] == '#' { // Skip comments continue } if groups := assignRegex.FindStringSubmatch(line); groups != nil { key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) if key == target { return strings.Trim(val, "\"") } } } return "" } func getSELinuxPolicyRoot() string { return filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) } func isProcHandle(fh *os.File) error { var buf unix.Statfs_t err := unix.Fstatfs(int(fh.Fd()), &buf) if err != nil { return errors.Wrapf(err, "statfs(%q) failed", fh.Name()) } if buf.Type != unix.PROC_SUPER_MAGIC { return errors.Errorf("file %q is not on procfs", fh.Name()) } return nil } func readCon(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } in, err := os.Open(fpath) if err != nil { return "", err } defer in.Close() if err := isProcHandle(in); err != nil { return "", err } var retval string if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { return "", err } return strings.Trim(retval, "\x00"), nil } // classIndex returns the int index for an object class in the loaded policy, // or -1 and an error func classIndex(class string) (int, error) { permpath := fmt.Sprintf("class/%s/index", class) indexpath := filepath.Join(getSelinuxMountPoint(), permpath) indexB, err := ioutil.ReadFile(indexpath) if err != nil { return -1, err } index, err := strconv.Atoi(string(indexB)) if err != nil { return -1, err } return index, nil } // setFileLabel sets the SELinux label for this path or returns an error. func setFileLabel(fpath string, label string) error { if fpath == "" { return ErrEmptyPath } if err := unix.Lsetxattr(fpath, xattrNameSelinux, []byte(label), 0); err != nil { return errors.Wrapf(err, "failed to set file label on %s", fpath) } return nil } // fileLabel returns the SELinux label for this path or returns an error. func fileLabel(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } label, err := lgetxattr(fpath, xattrNameSelinux) if err != nil { return "", err } // Trim the NUL byte at the end of the byte buffer, if present. if len(label) > 0 && label[len(label)-1] == '\x00' { label = label[:len(label)-1] } return string(label), nil } // setFSCreateLabel tells kernel the label to create all file system objects // created by this task. Setting label="" to return to default. func setFSCreateLabel(label string) error { return writeAttr("fscreate", label) } // fsCreateLabel returns the default label the kernel which the kernel is using // for file system objects created by this task. "" indicates default. func fsCreateLabel() (string, error) { return readAttr("fscreate") } // currentLabel returns the SELinux label of the current process thread, or an error. func currentLabel() (string, error) { return readAttr("current") } // pidLabel returns the SELinux label of the given pid, or an error. func pidLabel(pid int) (string, error) { return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) } // ExecLabel returns the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func execLabel() (string, error) { return readAttr("exec") } func writeCon(fpath, val string) error { if fpath == "" { return ErrEmptyPath } if val == "" { if !getEnabled() { return nil } } out, err := os.OpenFile(fpath, os.O_WRONLY, 0) if err != nil { return err } defer out.Close() if err := isProcHandle(out); err != nil { return err } if val != "" { _, err = out.Write([]byte(val)) } else { _, err = out.Write(nil) } if err != nil { return errors.Wrapf(err, "failed to set %s on procfs", fpath) } return nil } func attrPath(attr string) string { // Linux >= 3.17 provides this const threadSelfPrefix = "/proc/thread-self/attr" attrPathOnce.Do(func() { st, err := os.Stat(threadSelfPrefix) if err == nil && st.Mode().IsDir() { haveThreadSelf = true } }) if haveThreadSelf { return path.Join(threadSelfPrefix, attr) } return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr) } func readAttr(attr string) (string, error) { return readCon(attrPath(attr)) } func writeAttr(attr, val string) error { return writeCon(attrPath(attr), val) } // canonicalizeContext takes a context string and writes it to the kernel // the function then returns the context that the kernel will use. Use this // function to check if two contexts are equivalent func canonicalizeContext(val string) (string, error) { return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) } // computeCreateContext requests the type transition from source to target for // class from the kernel. func computeCreateContext(source string, target string, class string) (string, error) { classidx, err := classIndex(class) if err != nil { return "", err } return readWriteCon(filepath.Join(getSelinuxMountPoint(), "create"), fmt.Sprintf("%s %s %d", source, target, classidx)) } // catsToBitset stores categories in a bitset. func catsToBitset(cats string) (*bitset.BitSet, error) { bitset := &bitset.BitSet{} catlist := strings.Split(cats, ",") for _, r := range catlist { ranges := strings.SplitN(r, ".", 2) if len(ranges) > 1 { catstart, err := parseLevelItem(ranges[0], category) if err != nil { return nil, err } catend, err := parseLevelItem(ranges[1], category) if err != nil { return nil, err } for i := catstart; i <= catend; i++ { bitset.Set(i) } } else { cat, err := parseLevelItem(ranges[0], category) if err != nil { return nil, err } bitset.Set(cat) } } return bitset, nil } // parseLevelItem parses and verifies that a sensitivity or category are valid func parseLevelItem(s string, sep levelItem) (uint, error) { if len(s) < minSensLen || levelItem(s[0]) != sep { return 0, ErrLevelSyntax } val, err := strconv.ParseUint(s[1:], 10, 32) if err != nil { return 0, err } return uint(val), nil } // parseLevel fills a level from a string that contains // a sensitivity and categories func (l *level) parseLevel(levelStr string) error { lvl := strings.SplitN(levelStr, ":", 2) sens, err := parseLevelItem(lvl[0], sensitivity) if err != nil { return errors.Wrap(err, "failed to parse sensitivity") } l.sens = sens if len(lvl) > 1 { cats, err := catsToBitset(lvl[1]) if err != nil { return errors.Wrap(err, "failed to parse categories") } l.cats = cats } return nil } // rangeStrToMLSRange marshals a string representation of a range. func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { mlsRange := &mlsRange{} levelSlice := strings.SplitN(rangeStr, "-", 2) switch len(levelSlice) { // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 case 2: mlsRange.high = &level{} if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { return nil, errors.Wrapf(err, "failed to parse high level %q", levelSlice[1]) } fallthrough // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 case 1: mlsRange.low = &level{} if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { return nil, errors.Wrapf(err, "failed to parse low level %q", levelSlice[0]) } } if mlsRange.high == nil { mlsRange.high = mlsRange.low } return mlsRange, nil } // bitsetToStr takes a category bitset and returns it in the // canonical selinux syntax func bitsetToStr(c *bitset.BitSet) string { var str string i, e := c.NextSet(0) len := 0 for e { if len == 0 { if str != "" { str += "," } str += "c" + strconv.Itoa(int(i)) } next, e := c.NextSet(i + 1) if e { // consecutive cats if next == i+1 { len++ i = next continue } } if len == 1 { str += ",c" + strconv.Itoa(int(i)) } else if len > 1 { str += ".c" + strconv.Itoa(int(i)) } if !e { break } len = 0 i = next } return str } func (l1 *level) equal(l2 *level) bool { if l2 == nil || l1 == nil { return l1 == l2 } if l1.sens != l2.sens { return false } return l1.cats.Equal(l2.cats) } // String returns an mlsRange as a string. func (m mlsRange) String() string { low := "s" + strconv.Itoa(int(m.low.sens)) if m.low.cats != nil && m.low.cats.Count() > 0 { low += ":" + bitsetToStr(m.low.cats) } if m.low.equal(m.high) { return low } high := "s" + strconv.Itoa(int(m.high.sens)) if m.high.cats != nil && m.high.cats.Count() > 0 { high += ":" + bitsetToStr(m.high.cats) } return low + "-" + high } func max(a, b uint) uint { if a > b { return a } return b } func min(a, b uint) uint { if a < b { return a } return b } // calculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) // of a source and target range. // The glblub is calculated as the greater of the low sensitivities and // the lower of the high sensitivities and the and of each category bitset. func calculateGlbLub(sourceRange, targetRange string) (string, error) { s, err := rangeStrToMLSRange(sourceRange) if err != nil { return "", err } t, err := rangeStrToMLSRange(targetRange) if err != nil { return "", err } if s.high.sens < t.low.sens || t.high.sens < s.low.sens { /* these ranges have no common sensitivities */ return "", ErrIncomparable } outrange := &mlsRange{low: &level{}, high: &level{}} /* take the greatest of the low */ outrange.low.sens = max(s.low.sens, t.low.sens) /* take the least of the high */ outrange.high.sens = min(s.high.sens, t.high.sens) /* find the intersecting categories */ if s.low.cats != nil && t.low.cats != nil { outrange.low.cats = s.low.cats.Intersection(t.low.cats) } if s.high.cats != nil && t.high.cats != nil { outrange.high.cats = s.high.cats.Intersection(t.high.cats) } return outrange.String(), nil } func readWriteCon(fpath string, val string) (string, error) { if fpath == "" { return "", ErrEmptyPath } f, err := os.OpenFile(fpath, os.O_RDWR, 0) if err != nil { return "", err } defer f.Close() _, err = f.Write([]byte(val)) if err != nil { return "", err } var retval string if _, err := fmt.Fscanf(f, "%s", &retval); err != nil { return "", err } return strings.Trim(retval, "\x00"), nil } // setExecLabel sets the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func setExecLabel(label string) error { return writeAttr("exec", label) } // setTaskLabel sets the SELinux label for the current thread, or an error. // This requires the dyntransition permission. func setTaskLabel(label string) error { return writeAttr("current", label) } // setSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created func setSocketLabel(label string) error { return writeAttr("sockcreate", label) } // socketLabel retrieves the current socket label setting func socketLabel() (string, error) { return readAttr("sockcreate") } // peerLabel retrieves the label of the client on the other side of a socket func peerLabel(fd uintptr) (string, error) { return unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) } // setKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created func setKeyLabel(label string) error { err := writeCon("/proc/self/attr/keycreate", label) if os.IsNotExist(errors.Cause(err)) { return nil } if label == "" && os.IsPermission(errors.Cause(err)) { return nil } return err } // keyLabel retrieves the current kernel keyring label setting func keyLabel() (string, error) { return readCon("/proc/self/attr/keycreate") } // get returns the Context as a string func (c Context) get() string { if c["level"] != "" { return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) } return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"]) } // newContext creates a new Context struct from the specified label func newContext(label string) (Context, error) { c := make(Context) if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) < 3 { return c, InvalidLabel } c["user"] = con[0] c["role"] = con[1] c["type"] = con[2] if len(con) > 3 { c["level"] = con[3] } } return c, nil } // clearLabels clears all reserved labels func clearLabels() { state.Lock() state.mcsList = make(map[string]bool) state.Unlock() } // reserveLabel reserves the MLS/MCS level component of the specified label func reserveLabel(label string) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) > 3 { mcsAdd(con[3]) } } } func selinuxEnforcePath() string { return path.Join(getSelinuxMountPoint(), "enforce") } // enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func enforceMode() int { var enforce int enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) if err != nil { return -1 } enforce, err = strconv.Atoi(string(enforceB)) if err != nil { return -1 } return enforce } // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0644) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, // Permissive or Disabled. Note this is is just the default at boot time. // EnforceMode tells you the systems current mode. func defaultEnforceMode() int { switch readConfig(selinuxTag) { case "enforcing": return Enforcing case "permissive": return Permissive } return Disabled } func mcsAdd(mcs string) error { if mcs == "" { return nil } state.Lock() defer state.Unlock() if state.mcsList[mcs] { return ErrMCSAlreadyExists } state.mcsList[mcs] = true return nil } func mcsDelete(mcs string) { if mcs == "" { return } state.Lock() defer state.Unlock() state.mcsList[mcs] = false } func intToMcs(id int, catRange uint32) string { var ( SETSIZE = int(catRange) TIER = SETSIZE ORD = id ) if id < 1 || id > 523776 { return "" } for ORD > TIER { ORD = ORD - TIER TIER-- } TIER = SETSIZE - TIER ORD = ORD + TIER return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) } func uniqMcs(catRange uint32) string { var ( n uint32 c1, c2 uint32 mcs string ) for { binary.Read(rand.Reader, binary.LittleEndian, &n) c1 = n % catRange binary.Read(rand.Reader, binary.LittleEndian, &n) c2 = n % catRange if c1 == c2 { continue } else { if c1 > c2 { c1, c2 = c2, c1 } } mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) if err := mcsAdd(mcs); err != nil { continue } break } return mcs } // releaseLabel un-reserves the MLS/MCS Level field of the specified label, // allowing it to be used by another process. func releaseLabel(label string) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) > 3 { mcsDelete(con[3]) } } } // roFileLabel returns the specified SELinux readonly file label func roFileLabel() string { return readOnlyFileLabel } func openContextFile() (*os.File, error) { if f, err := os.Open(contextFile); err == nil { return f, nil } lxcPath := filepath.Join(getSELinuxPolicyRoot(), "/contexts/lxc_contexts") return os.Open(lxcPath) } var labels = loadLabels() func loadLabels() map[string]string { var ( val, key string bufin *bufio.Reader ) labels := make(map[string]string) in, err := openContextFile() if err != nil { return labels } defer in.Close() bufin = bufio.NewReader(in) for done := false; !done; { var line string if line, err = bufin.ReadString('\n'); err != nil { if err == io.EOF { done = true } else { break } } line = strings.TrimSpace(line) if len(line) == 0 { // Skip blank lines continue } if line[0] == ';' || line[0] == '#' { // Skip comments continue } if groups := assignRegex.FindStringSubmatch(line); groups != nil { key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) labels[key] = strings.Trim(val, "\"") } } return labels } // kvmContainerLabels returns the default processLabel and mountLabel to be used // for kvm containers by the calling process. func kvmContainerLabels() (string, string) { processLabel := labels["kvm_process"] if processLabel == "" { processLabel = labels["process"] } return addMcs(processLabel, labels["file"]) } // initContainerLabels returns the default processLabel and file labels to be // used for containers running an init system like systemd by the calling process. func initContainerLabels() (string, string) { processLabel := labels["init_process"] if processLabel == "" { processLabel = labels["process"] } return addMcs(processLabel, labels["file"]) } // containerLabels returns an allocated processLabel and fileLabel to be used for // container labeling by the calling process. func containerLabels() (processLabel string, fileLabel string) { if !getEnabled() { return "", "" } processLabel = labels["process"] fileLabel = labels["file"] readOnlyFileLabel = labels["ro_file"] if processLabel == "" || fileLabel == "" { return "", fileLabel } if readOnlyFileLabel == "" { readOnlyFileLabel = fileLabel } return addMcs(processLabel, fileLabel) } func addMcs(processLabel, fileLabel string) (string, string) { scon, _ := NewContext(processLabel) if scon["level"] != "" { mcs := uniqMcs(CategoryRange) scon["level"] = mcs processLabel = scon.Get() scon, _ = NewContext(fileLabel) scon["level"] = mcs fileLabel = scon.Get() } return processLabel, fileLabel } // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0644) } // copyLevel returns a label with the MLS/MCS level from src label replaced on // the dest label. func copyLevel(src, dest string) (string, error) { if src == "" { return "", nil } if err := SecurityCheckContext(src); err != nil { return "", err } if err := SecurityCheckContext(dest); err != nil { return "", err } scon, err := NewContext(src) if err != nil { return "", err } tcon, err := NewContext(dest) if err != nil { return "", err } mcsDelete(tcon["level"]) mcsAdd(scon["level"]) tcon["level"] = scon["level"] return tcon.Get(), nil } // Prevent users from relabeling system files func badPrefix(fpath string) error { if fpath == "" { return ErrEmptyPath } badPrefixes := []string{"/usr"} for _, prefix := range badPrefixes { if strings.HasPrefix(fpath, prefix) { return errors.Errorf("relabeling content in %s is not allowed", prefix) } } return nil } // chcon changes the fpath file object to the SELinux label label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. func chcon(fpath string, label string, recurse bool) error { if fpath == "" { return ErrEmptyPath } if label == "" { return nil } if err := badPrefix(fpath); err != nil { return err } if !recurse { return SetFileLabel(fpath, label) } return pwalk.Walk(fpath, func(p string, info os.FileInfo, err error) error { e := SetFileLabel(p, label) // Walk a file tree can race with removal, so ignore ENOENT if os.IsNotExist(errors.Cause(e)) { return nil } return e }) } // dupSecOpt takes an SELinux process label and returns security options that // can be used to set the SELinux Type and Level for future container processes. func dupSecOpt(src string) ([]string, error) { if src == "" { return nil, nil } con, err := NewContext(src) if err != nil { return nil, err } if con["user"] == "" || con["role"] == "" || con["type"] == "" { return nil, nil } dup := []string{"user:" + con["user"], "role:" + con["role"], "type:" + con["type"], } if con["level"] != "" { dup = append(dup, "level:"+con["level"]) } return dup, nil } // disableSecOpt returns a security opt that can be used to disable SELinux // labeling support for future container processes. func disableSecOpt() []string { return []string{"disable"} } selinux: minor reformatting Signed-off-by: Sebastiaan van Stijn <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@gone.nl> // +build selinux,linux package selinux import ( "bufio" "bytes" "crypto/rand" "encoding/binary" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "regexp" "strconv" "strings" "sync" "github.com/opencontainers/selinux/pkg/pwalk" "github.com/pkg/errors" "github.com/willf/bitset" "golang.org/x/sys/unix" ) const ( minSensLen = 2 contextFile = "/usr/share/containers/selinux/contexts" selinuxDir = "/etc/selinux/" selinuxConfig = selinuxDir + "config" selinuxfsMount = "/sys/fs/selinux" selinuxTypeTag = "SELINUXTYPE" selinuxTag = "SELINUX" xattrNameSelinux = "security.selinux" ) type selinuxState struct { enabledSet bool enabled bool selinuxfsOnce sync.Once selinuxfs string mcsList map[string]bool sync.Mutex } type level struct { sens uint cats *bitset.BitSet } type mlsRange struct { low *level high *level } type levelItem byte const ( sensitivity levelItem = 's' category levelItem = 'c' ) var ( assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) readOnlyFileLabel string state = selinuxState{ mcsList: make(map[string]bool), } // for attrPath() attrPathOnce sync.Once haveThreadSelf bool ) func (s *selinuxState) setEnable(enabled bool) bool { s.Lock() defer s.Unlock() s.enabledSet = true s.enabled = enabled return s.enabled } func (s *selinuxState) getEnabled() bool { s.Lock() enabled := s.enabled enabledSet := s.enabledSet s.Unlock() if enabledSet { return enabled } enabled = false if fs := getSelinuxMountPoint(); fs != "" { if con, _ := CurrentLabel(); con != "kernel" { enabled = true } } return s.setEnable(enabled) } // setDisabled disables SELinux support for the package func setDisabled() { state.setEnable(false) } func verifySELinuxfsMount(mnt string) bool { var buf unix.Statfs_t for { err := unix.Statfs(mnt, &buf) if err == nil { break } if err == unix.EAGAIN { continue } return false } if uint32(buf.Type) != uint32(unix.SELINUX_MAGIC) { return false } if (buf.Flags & unix.ST_RDONLY) != 0 { return false } return true } func findSELinuxfs() string { // fast path: check the default mount first if verifySELinuxfsMount(selinuxfsMount) { return selinuxfsMount } // check if selinuxfs is available before going the slow path fs, err := ioutil.ReadFile("/proc/filesystems") if err != nil { return "" } if !bytes.Contains(fs, []byte("\tselinuxfs\n")) { return "" } // slow path: try to find among the mounts f, err := os.Open("/proc/self/mountinfo") if err != nil { return "" } defer f.Close() scanner := bufio.NewScanner(f) for { mnt := findSELinuxfsMount(scanner) if mnt == "" { // error or not found return "" } if verifySELinuxfsMount(mnt) { return mnt } } } // findSELinuxfsMount returns a next selinuxfs mount point found, // if there is one, or an empty string in case of EOF or error. func findSELinuxfsMount(s *bufio.Scanner) string { for s.Scan() { txt := s.Bytes() // The first field after - is fs type. // Safe as spaces in mountpoints are encoded as \040 if !bytes.Contains(txt, []byte(" - selinuxfs ")) { continue } const mPos = 5 // mount point is 5th field fields := bytes.SplitN(txt, []byte(" "), mPos+1) if len(fields) < mPos+1 { continue } return string(fields[mPos-1]) } return "" } func (s *selinuxState) getSELinuxfs() string { s.selinuxfsOnce.Do(func() { s.selinuxfs = findSELinuxfs() }) return s.selinuxfs } // getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs // filesystem or an empty string if no mountpoint is found. Selinuxfs is // a proc-like pseudo-filesystem that exposes the SELinux policy API to // processes. The existence of an selinuxfs mount is used to determine // whether SELinux is currently enabled or not. func getSelinuxMountPoint() string { return state.getSELinuxfs() } // getEnabled returns whether SELinux is currently enabled. func getEnabled() bool { return state.getEnabled() } func readConfig(target string) string { var ( val, key string bufin *bufio.Reader ) in, err := os.Open(selinuxConfig) if err != nil { return "" } defer in.Close() bufin = bufio.NewReader(in) for done := false; !done; { var line string if line, err = bufin.ReadString('\n'); err != nil { if err != io.EOF { return "" } done = true } line = strings.TrimSpace(line) if len(line) == 0 { // Skip blank lines continue } if line[0] == ';' || line[0] == '#' { // Skip comments continue } if groups := assignRegex.FindStringSubmatch(line); groups != nil { key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) if key == target { return strings.Trim(val, "\"") } } } return "" } func getSELinuxPolicyRoot() string { return filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) } func isProcHandle(fh *os.File) error { var buf unix.Statfs_t err := unix.Fstatfs(int(fh.Fd()), &buf) if err != nil { return errors.Wrapf(err, "statfs(%q) failed", fh.Name()) } if buf.Type != unix.PROC_SUPER_MAGIC { return errors.Errorf("file %q is not on procfs", fh.Name()) } return nil } func readCon(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } in, err := os.Open(fpath) if err != nil { return "", err } defer in.Close() if err := isProcHandle(in); err != nil { return "", err } var retval string if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { return "", err } return strings.Trim(retval, "\x00"), nil } // classIndex returns the int index for an object class in the loaded policy, // or -1 and an error func classIndex(class string) (int, error) { permpath := fmt.Sprintf("class/%s/index", class) indexpath := filepath.Join(getSelinuxMountPoint(), permpath) indexB, err := ioutil.ReadFile(indexpath) if err != nil { return -1, err } index, err := strconv.Atoi(string(indexB)) if err != nil { return -1, err } return index, nil } // setFileLabel sets the SELinux label for this path or returns an error. func setFileLabel(fpath string, label string) error { if fpath == "" { return ErrEmptyPath } if err := unix.Lsetxattr(fpath, xattrNameSelinux, []byte(label), 0); err != nil { return errors.Wrapf(err, "failed to set file label on %s", fpath) } return nil } // fileLabel returns the SELinux label for this path or returns an error. func fileLabel(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } label, err := lgetxattr(fpath, xattrNameSelinux) if err != nil { return "", err } // Trim the NUL byte at the end of the byte buffer, if present. if len(label) > 0 && label[len(label)-1] == '\x00' { label = label[:len(label)-1] } return string(label), nil } // setFSCreateLabel tells kernel the label to create all file system objects // created by this task. Setting label="" to return to default. func setFSCreateLabel(label string) error { return writeAttr("fscreate", label) } // fsCreateLabel returns the default label the kernel which the kernel is using // for file system objects created by this task. "" indicates default. func fsCreateLabel() (string, error) { return readAttr("fscreate") } // currentLabel returns the SELinux label of the current process thread, or an error. func currentLabel() (string, error) { return readAttr("current") } // pidLabel returns the SELinux label of the given pid, or an error. func pidLabel(pid int) (string, error) { return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) } // ExecLabel returns the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func execLabel() (string, error) { return readAttr("exec") } func writeCon(fpath, val string) error { if fpath == "" { return ErrEmptyPath } if val == "" { if !getEnabled() { return nil } } out, err := os.OpenFile(fpath, os.O_WRONLY, 0) if err != nil { return err } defer out.Close() if err := isProcHandle(out); err != nil { return err } if val != "" { _, err = out.Write([]byte(val)) } else { _, err = out.Write(nil) } if err != nil { return errors.Wrapf(err, "failed to set %s on procfs", fpath) } return nil } func attrPath(attr string) string { // Linux >= 3.17 provides this const threadSelfPrefix = "/proc/thread-self/attr" attrPathOnce.Do(func() { st, err := os.Stat(threadSelfPrefix) if err == nil && st.Mode().IsDir() { haveThreadSelf = true } }) if haveThreadSelf { return path.Join(threadSelfPrefix, attr) } return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr) } func readAttr(attr string) (string, error) { return readCon(attrPath(attr)) } func writeAttr(attr, val string) error { return writeCon(attrPath(attr), val) } // canonicalizeContext takes a context string and writes it to the kernel // the function then returns the context that the kernel will use. Use this // function to check if two contexts are equivalent func canonicalizeContext(val string) (string, error) { return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) } // computeCreateContext requests the type transition from source to target for // class from the kernel. func computeCreateContext(source string, target string, class string) (string, error) { classidx, err := classIndex(class) if err != nil { return "", err } return readWriteCon(filepath.Join(getSelinuxMountPoint(), "create"), fmt.Sprintf("%s %s %d", source, target, classidx)) } // catsToBitset stores categories in a bitset. func catsToBitset(cats string) (*bitset.BitSet, error) { bitset := &bitset.BitSet{} catlist := strings.Split(cats, ",") for _, r := range catlist { ranges := strings.SplitN(r, ".", 2) if len(ranges) > 1 { catstart, err := parseLevelItem(ranges[0], category) if err != nil { return nil, err } catend, err := parseLevelItem(ranges[1], category) if err != nil { return nil, err } for i := catstart; i <= catend; i++ { bitset.Set(i) } } else { cat, err := parseLevelItem(ranges[0], category) if err != nil { return nil, err } bitset.Set(cat) } } return bitset, nil } // parseLevelItem parses and verifies that a sensitivity or category are valid func parseLevelItem(s string, sep levelItem) (uint, error) { if len(s) < minSensLen || levelItem(s[0]) != sep { return 0, ErrLevelSyntax } val, err := strconv.ParseUint(s[1:], 10, 32) if err != nil { return 0, err } return uint(val), nil } // parseLevel fills a level from a string that contains // a sensitivity and categories func (l *level) parseLevel(levelStr string) error { lvl := strings.SplitN(levelStr, ":", 2) sens, err := parseLevelItem(lvl[0], sensitivity) if err != nil { return errors.Wrap(err, "failed to parse sensitivity") } l.sens = sens if len(lvl) > 1 { cats, err := catsToBitset(lvl[1]) if err != nil { return errors.Wrap(err, "failed to parse categories") } l.cats = cats } return nil } // rangeStrToMLSRange marshals a string representation of a range. func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { mlsRange := &mlsRange{} levelSlice := strings.SplitN(rangeStr, "-", 2) switch len(levelSlice) { // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 case 2: mlsRange.high = &level{} if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { return nil, errors.Wrapf(err, "failed to parse high level %q", levelSlice[1]) } fallthrough // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 case 1: mlsRange.low = &level{} if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { return nil, errors.Wrapf(err, "failed to parse low level %q", levelSlice[0]) } } if mlsRange.high == nil { mlsRange.high = mlsRange.low } return mlsRange, nil } // bitsetToStr takes a category bitset and returns it in the // canonical selinux syntax func bitsetToStr(c *bitset.BitSet) string { var str string i, e := c.NextSet(0) len := 0 for e { if len == 0 { if str != "" { str += "," } str += "c" + strconv.Itoa(int(i)) } next, e := c.NextSet(i + 1) if e { // consecutive cats if next == i+1 { len++ i = next continue } } if len == 1 { str += ",c" + strconv.Itoa(int(i)) } else if len > 1 { str += ".c" + strconv.Itoa(int(i)) } if !e { break } len = 0 i = next } return str } func (l1 *level) equal(l2 *level) bool { if l2 == nil || l1 == nil { return l1 == l2 } if l1.sens != l2.sens { return false } return l1.cats.Equal(l2.cats) } // String returns an mlsRange as a string. func (m mlsRange) String() string { low := "s" + strconv.Itoa(int(m.low.sens)) if m.low.cats != nil && m.low.cats.Count() > 0 { low += ":" + bitsetToStr(m.low.cats) } if m.low.equal(m.high) { return low } high := "s" + strconv.Itoa(int(m.high.sens)) if m.high.cats != nil && m.high.cats.Count() > 0 { high += ":" + bitsetToStr(m.high.cats) } return low + "-" + high } func max(a, b uint) uint { if a > b { return a } return b } func min(a, b uint) uint { if a < b { return a } return b } // calculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) // of a source and target range. // The glblub is calculated as the greater of the low sensitivities and // the lower of the high sensitivities and the and of each category bitset. func calculateGlbLub(sourceRange, targetRange string) (string, error) { s, err := rangeStrToMLSRange(sourceRange) if err != nil { return "", err } t, err := rangeStrToMLSRange(targetRange) if err != nil { return "", err } if s.high.sens < t.low.sens || t.high.sens < s.low.sens { /* these ranges have no common sensitivities */ return "", ErrIncomparable } outrange := &mlsRange{low: &level{}, high: &level{}} /* take the greatest of the low */ outrange.low.sens = max(s.low.sens, t.low.sens) /* take the least of the high */ outrange.high.sens = min(s.high.sens, t.high.sens) /* find the intersecting categories */ if s.low.cats != nil && t.low.cats != nil { outrange.low.cats = s.low.cats.Intersection(t.low.cats) } if s.high.cats != nil && t.high.cats != nil { outrange.high.cats = s.high.cats.Intersection(t.high.cats) } return outrange.String(), nil } func readWriteCon(fpath string, val string) (string, error) { if fpath == "" { return "", ErrEmptyPath } f, err := os.OpenFile(fpath, os.O_RDWR, 0) if err != nil { return "", err } defer f.Close() _, err = f.Write([]byte(val)) if err != nil { return "", err } var retval string if _, err := fmt.Fscanf(f, "%s", &retval); err != nil { return "", err } return strings.Trim(retval, "\x00"), nil } // setExecLabel sets the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func setExecLabel(label string) error { return writeAttr("exec", label) } // setTaskLabel sets the SELinux label for the current thread, or an error. // This requires the dyntransition permission. func setTaskLabel(label string) error { return writeAttr("current", label) } // setSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created func setSocketLabel(label string) error { return writeAttr("sockcreate", label) } // socketLabel retrieves the current socket label setting func socketLabel() (string, error) { return readAttr("sockcreate") } // peerLabel retrieves the label of the client on the other side of a socket func peerLabel(fd uintptr) (string, error) { return unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) } // setKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created func setKeyLabel(label string) error { err := writeCon("/proc/self/attr/keycreate", label) if os.IsNotExist(errors.Cause(err)) { return nil } if label == "" && os.IsPermission(errors.Cause(err)) { return nil } return err } // keyLabel retrieves the current kernel keyring label setting func keyLabel() (string, error) { return readCon("/proc/self/attr/keycreate") } // get returns the Context as a string func (c Context) get() string { if c["level"] != "" { return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) } return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"]) } // newContext creates a new Context struct from the specified label func newContext(label string) (Context, error) { c := make(Context) if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) < 3 { return c, InvalidLabel } c["user"] = con[0] c["role"] = con[1] c["type"] = con[2] if len(con) > 3 { c["level"] = con[3] } } return c, nil } // clearLabels clears all reserved labels func clearLabels() { state.Lock() state.mcsList = make(map[string]bool) state.Unlock() } // reserveLabel reserves the MLS/MCS level component of the specified label func reserveLabel(label string) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) > 3 { mcsAdd(con[3]) } } } func selinuxEnforcePath() string { return path.Join(getSelinuxMountPoint(), "enforce") } // enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func enforceMode() int { var enforce int enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) if err != nil { return -1 } enforce, err = strconv.Atoi(string(enforceB)) if err != nil { return -1 } return enforce } // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0644) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, // Permissive or Disabled. Note this is is just the default at boot time. // EnforceMode tells you the systems current mode. func defaultEnforceMode() int { switch readConfig(selinuxTag) { case "enforcing": return Enforcing case "permissive": return Permissive } return Disabled } func mcsAdd(mcs string) error { if mcs == "" { return nil } state.Lock() defer state.Unlock() if state.mcsList[mcs] { return ErrMCSAlreadyExists } state.mcsList[mcs] = true return nil } func mcsDelete(mcs string) { if mcs == "" { return } state.Lock() defer state.Unlock() state.mcsList[mcs] = false } func intToMcs(id int, catRange uint32) string { var ( SETSIZE = int(catRange) TIER = SETSIZE ORD = id ) if id < 1 || id > 523776 { return "" } for ORD > TIER { ORD = ORD - TIER TIER-- } TIER = SETSIZE - TIER ORD = ORD + TIER return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) } func uniqMcs(catRange uint32) string { var ( n uint32 c1, c2 uint32 mcs string ) for { binary.Read(rand.Reader, binary.LittleEndian, &n) c1 = n % catRange binary.Read(rand.Reader, binary.LittleEndian, &n) c2 = n % catRange if c1 == c2 { continue } else { if c1 > c2 { c1, c2 = c2, c1 } } mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) if err := mcsAdd(mcs); err != nil { continue } break } return mcs } // releaseLabel un-reserves the MLS/MCS Level field of the specified label, // allowing it to be used by another process. func releaseLabel(label string) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) > 3 { mcsDelete(con[3]) } } } // roFileLabel returns the specified SELinux readonly file label func roFileLabel() string { return readOnlyFileLabel } func openContextFile() (*os.File, error) { if f, err := os.Open(contextFile); err == nil { return f, nil } lxcPath := filepath.Join(getSELinuxPolicyRoot(), "/contexts/lxc_contexts") return os.Open(lxcPath) } var labels = loadLabels() func loadLabels() map[string]string { var ( val, key string bufin *bufio.Reader ) labels := make(map[string]string) in, err := openContextFile() if err != nil { return labels } defer in.Close() bufin = bufio.NewReader(in) for done := false; !done; { var line string if line, err = bufin.ReadString('\n'); err != nil { if err == io.EOF { done = true } else { break } } line = strings.TrimSpace(line) if len(line) == 0 { // Skip blank lines continue } if line[0] == ';' || line[0] == '#' { // Skip comments continue } if groups := assignRegex.FindStringSubmatch(line); groups != nil { key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) labels[key] = strings.Trim(val, "\"") } } return labels } // kvmContainerLabels returns the default processLabel and mountLabel to be used // for kvm containers by the calling process. func kvmContainerLabels() (string, string) { processLabel := labels["kvm_process"] if processLabel == "" { processLabel = labels["process"] } return addMcs(processLabel, labels["file"]) } // initContainerLabels returns the default processLabel and file labels to be // used for containers running an init system like systemd by the calling process. func initContainerLabels() (string, string) { processLabel := labels["init_process"] if processLabel == "" { processLabel = labels["process"] } return addMcs(processLabel, labels["file"]) } // containerLabels returns an allocated processLabel and fileLabel to be used for // container labeling by the calling process. func containerLabels() (processLabel string, fileLabel string) { if !getEnabled() { return "", "" } processLabel = labels["process"] fileLabel = labels["file"] readOnlyFileLabel = labels["ro_file"] if processLabel == "" || fileLabel == "" { return "", fileLabel } if readOnlyFileLabel == "" { readOnlyFileLabel = fileLabel } return addMcs(processLabel, fileLabel) } func addMcs(processLabel, fileLabel string) (string, string) { scon, _ := NewContext(processLabel) if scon["level"] != "" { mcs := uniqMcs(CategoryRange) scon["level"] = mcs processLabel = scon.Get() scon, _ = NewContext(fileLabel) scon["level"] = mcs fileLabel = scon.Get() } return processLabel, fileLabel } // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0644) } // copyLevel returns a label with the MLS/MCS level from src label replaced on // the dest label. func copyLevel(src, dest string) (string, error) { if src == "" { return "", nil } if err := SecurityCheckContext(src); err != nil { return "", err } if err := SecurityCheckContext(dest); err != nil { return "", err } scon, err := NewContext(src) if err != nil { return "", err } tcon, err := NewContext(dest) if err != nil { return "", err } mcsDelete(tcon["level"]) mcsAdd(scon["level"]) tcon["level"] = scon["level"] return tcon.Get(), nil } // Prevent users from relabeling system files func badPrefix(fpath string) error { if fpath == "" { return ErrEmptyPath } badPrefixes := []string{"/usr"} for _, prefix := range badPrefixes { if strings.HasPrefix(fpath, prefix) { return errors.Errorf("relabeling content in %s is not allowed", prefix) } } return nil } // chcon changes the fpath file object to the SELinux label label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. func chcon(fpath string, label string, recurse bool) error { if fpath == "" { return ErrEmptyPath } if label == "" { return nil } if err := badPrefix(fpath); err != nil { return err } if !recurse { return SetFileLabel(fpath, label) } return pwalk.Walk(fpath, func(p string, info os.FileInfo, err error) error { e := SetFileLabel(p, label) // Walk a file tree can race with removal, so ignore ENOENT if os.IsNotExist(errors.Cause(e)) { return nil } return e }) } // dupSecOpt takes an SELinux process label and returns security options that // can be used to set the SELinux Type and Level for future container processes. func dupSecOpt(src string) ([]string, error) { if src == "" { return nil, nil } con, err := NewContext(src) if err != nil { return nil, err } if con["user"] == "" || con["role"] == "" || con["type"] == "" { return nil, nil } dup := []string{"user:" + con["user"], "role:" + con["role"], "type:" + con["type"], } if con["level"] != "" { dup = append(dup, "level:"+con["level"]) } return dup, nil } // disableSecOpt returns a security opt that can be used to disable SELinux // labeling support for future container processes. func disableSecOpt() []string { return []string{"disable"} }
// Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // vt tablet server: Serves queries and performs housekeeping jobs. package main import ( "compress/gzip" "expvar" "flag" "fmt" "io" "log" "net/http" _ "net/http/pprof" "os" "os/exec" "os/signal" "path" "path/filepath" "strings" "syscall" "time" "code.google.com/p/vitess/go/jscfg" "code.google.com/p/vitess/go/relog" rpc "code.google.com/p/vitess/go/rpcplus" "code.google.com/p/vitess/go/rpcwrap/auth" "code.google.com/p/vitess/go/rpcwrap/bsonrpc" "code.google.com/p/vitess/go/rpcwrap/jsonrpc" _ "code.google.com/p/vitess/go/snitch" "code.google.com/p/vitess/go/umgmt" "code.google.com/p/vitess/go/vt/dbconfigs" vtenv "code.google.com/p/vitess/go/vt/env" "code.google.com/p/vitess/go/vt/mysqlctl" "code.google.com/p/vitess/go/vt/servenv" tm "code.google.com/p/vitess/go/vt/tabletmanager" ts "code.google.com/p/vitess/go/vt/tabletserver" "code.google.com/p/vitess/go/zk" ) const ( DefaultLameDuckPeriod = 30.0 DefaultRebindDelay = 0.01 ) var ( port = flag.Int("port", 6509, "port for the server") lameDuckPeriod = flag.Float64("lame-duck-period", DefaultLameDuckPeriod, "how long to give in-flight transactions to finish") rebindDelay = flag.Float64("rebind-delay", DefaultRebindDelay, "artificial delay before rebinding a hijacked listener") tabletPath = flag.String("tablet-path", "", "path to zk node representing the tablet") qsConfigFile = flag.String("queryserver-config-file", "", "config file name for the query service") mycnfFile = flag.String("mycnf-file", "", "my.cnf file") authConfig = flag.String("auth-credentials", "", "name of file containing auth credentials") queryLog = flag.String("debug-querylog-file", "", "for testing: log all queries to this file") ) // Default values for the config // // The value for StreamBufferSize was chosen after trying out a few of // them. Too small buffers force too many packets to be sent. Too big // buffers force the clients to read them in multiple chunks and make // memory copies. so with the encoding overhead, this seems to work // great. (the overhead makes the final packets on the wire about // twice bigger than this). var qsConfig = ts.Config{ CachePoolCap: 1000, PoolSize: 16, StreamPoolSize: 750, TransactionCap: 20, TransactionTimeout: 30, MaxResultSize: 10000, QueryCacheSize: 5000, SchemaReloadTime: 30 * 60, QueryTimeout: 0, IdleTimeout: 30 * 60, StreamBufferSize: 32 * 1024, } func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() servenv.Init("vttablet") _, tabletidStr := path.Split(*tabletPath) tabletId, err := tm.ParseUid(tabletidStr) if err != nil { relog.Fatal("%s", err) } mycnf := readMycnf(tabletId) dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Warning("%s", err) } initQueryService(dbcfgs) initUpdateStreamService(mycnf) initAgent(dbcfgs, mycnf, *dbConfigsFile, *dbCredentialsFile) // depends on both query and updateStream rpc.HandleHTTP() // NOTE(szopa): Changing credentials requires a server // restart. if *authConfig != "" { if err := auth.LoadCredentials(*authConfig); err != nil { relog.Error("could not load authentication credentials, not starting rpc servers: %v", err) } serveAuthRPC() } serveRPC() // make a list of paths we can serve HTTP traffic from. // we don't resolve them here to real paths, as they might not exits yet snapshotDir := mysqlctl.SnapshotDir(uint32(tabletId)) allowedPaths := []string{ path.Join(vtenv.VtDataRoot(), "data"), mysqlctl.TabletDir(uint32(tabletId)), snapshotDir, mycnf.DataDir, mycnf.InnodbDataHomeDir, mycnf.InnodbLogGroupHomeDir, } // NOTE: trailing slash in pattern means we handle all paths with this prefix http.Handle(mysqlctl.SnapshotURLPath+"/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handleSnapshot(w, r, snapshotDir, allowedPaths) })) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) relog.Info("started vttablet %v", *port) umgmtSocket := fmt.Sprintf("/tmp/vttablet-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") } func serveAuthRPC() { bsonrpc.ServeAuthRPC() jsonrpc.ServeAuthRPC() } func serveRPC() { jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() } func readMycnf(tabletId uint32) *mysqlctl.Mycnf { if *mycnfFile == "" { *mycnfFile = mysqlctl.MycnfFile(tabletId) } mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } return mycnf } func initAgent(dbcfgs dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, dbConfigsFile, dbCredentialsFile string) { zconn := zk.NewMetaConn(false) expvar.Publish("ZkMetaConn", zconn) umgmt.AddCloseCallback(func() { zconn.Close() }) bindAddr := fmt.Sprintf(":%v", *port) // Action agent listens to changes in zookeeper and makes // modifications to this tablet. agent := tm.NewActionAgent(zconn, *tabletPath, *mycnfFile, dbConfigsFile, dbCredentialsFile) agent.AddChangeCallback(func(oldTablet, newTablet tm.Tablet) { if newTablet.IsServingType() { if dbcfgs.App.Dbname == "" { dbcfgs.App.Dbname = newTablet.DbName() } // Transitioning from replica to master, first disconnect // existing connections. "false" indicateds that clients must // re-resolve their endpoint before reconnecting. if newTablet.Type == tm.TYPE_MASTER && oldTablet.Type != tm.TYPE_MASTER { ts.DisallowQueries(false) } ts.AllowQueries(dbcfgs.App) mysqlctl.EnableUpdateStreamService(string(newTablet.Type), dbcfgs) } else { ts.DisallowQueries(false) mysqlctl.DisableUpdateStreamService() } }) agent.Start(bindAddr, mycnf.MysqlAddr()) umgmt.AddCloseCallback(func() { agent.Stop() }) mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) // The TabletManager service exports read-only management related // data. tm := tm.NewTabletManager(bindAddr, nil, mysqld) rpc.Register(tm) } func initQueryService(dbcfgs dbconfigs.DBConfigs) { if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } ts.SqlQueryLogger.ServeLogs("/debug/vt/querylog") if err := jscfg.ReadJson(*qsConfigFile, &qsConfig); err != nil { relog.Warning("%s", err) } ts.RegisterQueryService(qsConfig) usefulLameDuckPeriod := float64(qsConfig.QueryTimeout + 1) if usefulLameDuckPeriod > *lameDuckPeriod { *lameDuckPeriod = usefulLameDuckPeriod relog.Info("readjusted -lame-duck-period to %f", *lameDuckPeriod) } if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } umgmt.AddCloseCallback(func() { ts.DisallowQueries(true) }) } func handleSnapshot(rw http.ResponseWriter, req *http.Request, snapshotDir string, allowedPaths []string) { // /snapshot must be rewritten to the actual location of the snapshot. relative, err := filepath.Rel(mysqlctl.SnapshotURLPath, req.URL.Path) if err != nil { relog.Error("bad snapshot relative path %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Make sure that realPath is absolute and resolve any escaping from // snapshotDir through a symlink. realPath, err := filepath.Abs(path.Join(snapshotDir, relative)) if err != nil { relog.Error("bad snapshot absolute path %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } realPath, err = filepath.EvalSymlinks(realPath) if err != nil { relog.Error("bad snapshot symlink eval %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Resolve all the possible roots and make sure we're serving // from one of them for _, allowedPath := range allowedPaths { // eval the symlinks of the allowed path allowedPath, err := filepath.EvalSymlinks(allowedPath) if err != nil { continue } if strings.HasPrefix(realPath, allowedPath) { sendFile(rw, req, realPath) return } } relog.Error("bad snapshot real path %v %v", req.URL.Path, realPath) http.Error(rw, "400 bad request", http.StatusBadRequest) } // custom function to serve files func sendFile(rw http.ResponseWriter, req *http.Request, path string) { relog.Info("serve %v %v", req.URL.Path, path) file, err := os.Open(path) if err != nil { http.NotFound(rw, req) return } defer file.Close() fileinfo, err := file.Stat() if err != nil { http.NotFound(rw, req) return } // for directories, or for files smaller than 1k, use library if fileinfo.Mode().IsDir() || fileinfo.Size() < 1024 { http.ServeFile(rw, req, path) return } // supports If-Modified-Since header if t, err := time.Parse(http.TimeFormat, req.Header.Get("If-Modified-Since")); err == nil && fileinfo.ModTime().Before(t.Add(1*time.Second)) { rw.WriteHeader(http.StatusNotModified) return } // support Accept-Encoding header var writer io.Writer = rw var reader io.Reader = file if !strings.HasSuffix(path, ".gz") { ae := req.Header.Get("Accept-Encoding") if strings.Contains(ae, "fgzip") { relog.Info("Forking gzip to serve %v", path) cmd := exec.Command("gzip", "--fast", "-c", path) stdout, err := cmd.StdoutPipe() if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } if err = cmd.Start(); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } rw.Header().Set("Content-Encoding", "gzip") defer func() { cmd.Wait() relog.Info("Gzip done for %v", path) }() reader = stdout } else if strings.Contains(ae, "gzip") { gz, err := gzip.NewWriterLevel(rw, gzip.BestSpeed) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } rw.Header().Set("Content-Encoding", "gzip") defer gz.Close() writer = gz } } // add content-length if we know it if writer == rw && reader == file { rw.Header().Set("Content-Length", fmt.Sprintf("%v", fileinfo.Size())) } // and just copy content out rw.Header().Set("Last-Modified", fileinfo.ModTime().UTC().Format(http.TimeFormat)) rw.WriteHeader(http.StatusOK) if _, err := io.Copy(writer, reader); err != nil { relog.Warning("transfer failed %v: %v", path, err) } } func initUpdateStreamService(mycnf *mysqlctl.Mycnf) { mysqlctl.RegisterUpdateStreamService(mycnf) umgmt.AddCloseCallback(func() { mysqlctl.DisableUpdateStreamService() }) } fix zombie gzip processes // Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // vt tablet server: Serves queries and performs housekeeping jobs. package main import ( "compress/gzip" "expvar" "flag" "fmt" "io" "log" "net/http" _ "net/http/pprof" "os" "os/exec" "os/signal" "path" "path/filepath" "strings" "syscall" "time" "code.google.com/p/vitess/go/jscfg" "code.google.com/p/vitess/go/relog" rpc "code.google.com/p/vitess/go/rpcplus" "code.google.com/p/vitess/go/rpcwrap/auth" "code.google.com/p/vitess/go/rpcwrap/bsonrpc" "code.google.com/p/vitess/go/rpcwrap/jsonrpc" _ "code.google.com/p/vitess/go/snitch" "code.google.com/p/vitess/go/umgmt" "code.google.com/p/vitess/go/vt/dbconfigs" vtenv "code.google.com/p/vitess/go/vt/env" "code.google.com/p/vitess/go/vt/mysqlctl" "code.google.com/p/vitess/go/vt/servenv" tm "code.google.com/p/vitess/go/vt/tabletmanager" ts "code.google.com/p/vitess/go/vt/tabletserver" "code.google.com/p/vitess/go/zk" ) const ( DefaultLameDuckPeriod = 30.0 DefaultRebindDelay = 0.01 ) var ( port = flag.Int("port", 6509, "port for the server") lameDuckPeriod = flag.Float64("lame-duck-period", DefaultLameDuckPeriod, "how long to give in-flight transactions to finish") rebindDelay = flag.Float64("rebind-delay", DefaultRebindDelay, "artificial delay before rebinding a hijacked listener") tabletPath = flag.String("tablet-path", "", "path to zk node representing the tablet") qsConfigFile = flag.String("queryserver-config-file", "", "config file name for the query service") mycnfFile = flag.String("mycnf-file", "", "my.cnf file") authConfig = flag.String("auth-credentials", "", "name of file containing auth credentials") queryLog = flag.String("debug-querylog-file", "", "for testing: log all queries to this file") ) // Default values for the config // // The value for StreamBufferSize was chosen after trying out a few of // them. Too small buffers force too many packets to be sent. Too big // buffers force the clients to read them in multiple chunks and make // memory copies. so with the encoding overhead, this seems to work // great. (the overhead makes the final packets on the wire about // twice bigger than this). var qsConfig = ts.Config{ CachePoolCap: 1000, PoolSize: 16, StreamPoolSize: 750, TransactionCap: 20, TransactionTimeout: 30, MaxResultSize: 10000, QueryCacheSize: 5000, SchemaReloadTime: 30 * 60, QueryTimeout: 0, IdleTimeout: 30 * 60, StreamBufferSize: 32 * 1024, } func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() servenv.Init("vttablet") _, tabletidStr := path.Split(*tabletPath) tabletId, err := tm.ParseUid(tabletidStr) if err != nil { relog.Fatal("%s", err) } mycnf := readMycnf(tabletId) dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Warning("%s", err) } initQueryService(dbcfgs) initUpdateStreamService(mycnf) initAgent(dbcfgs, mycnf, *dbConfigsFile, *dbCredentialsFile) // depends on both query and updateStream rpc.HandleHTTP() // NOTE(szopa): Changing credentials requires a server // restart. if *authConfig != "" { if err := auth.LoadCredentials(*authConfig); err != nil { relog.Error("could not load authentication credentials, not starting rpc servers: %v", err) } serveAuthRPC() } serveRPC() // make a list of paths we can serve HTTP traffic from. // we don't resolve them here to real paths, as they might not exits yet snapshotDir := mysqlctl.SnapshotDir(uint32(tabletId)) allowedPaths := []string{ path.Join(vtenv.VtDataRoot(), "data"), mysqlctl.TabletDir(uint32(tabletId)), snapshotDir, mycnf.DataDir, mycnf.InnodbDataHomeDir, mycnf.InnodbLogGroupHomeDir, } // NOTE: trailing slash in pattern means we handle all paths with this prefix http.Handle(mysqlctl.SnapshotURLPath+"/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handleSnapshot(w, r, snapshotDir, allowedPaths) })) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) relog.Info("started vttablet %v", *port) umgmtSocket := fmt.Sprintf("/tmp/vttablet-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") } func serveAuthRPC() { bsonrpc.ServeAuthRPC() jsonrpc.ServeAuthRPC() } func serveRPC() { jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() } func readMycnf(tabletId uint32) *mysqlctl.Mycnf { if *mycnfFile == "" { *mycnfFile = mysqlctl.MycnfFile(tabletId) } mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } return mycnf } func initAgent(dbcfgs dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, dbConfigsFile, dbCredentialsFile string) { zconn := zk.NewMetaConn(false) expvar.Publish("ZkMetaConn", zconn) umgmt.AddCloseCallback(func() { zconn.Close() }) bindAddr := fmt.Sprintf(":%v", *port) // Action agent listens to changes in zookeeper and makes // modifications to this tablet. agent := tm.NewActionAgent(zconn, *tabletPath, *mycnfFile, dbConfigsFile, dbCredentialsFile) agent.AddChangeCallback(func(oldTablet, newTablet tm.Tablet) { if newTablet.IsServingType() { if dbcfgs.App.Dbname == "" { dbcfgs.App.Dbname = newTablet.DbName() } // Transitioning from replica to master, first disconnect // existing connections. "false" indicateds that clients must // re-resolve their endpoint before reconnecting. if newTablet.Type == tm.TYPE_MASTER && oldTablet.Type != tm.TYPE_MASTER { ts.DisallowQueries(false) } ts.AllowQueries(dbcfgs.App) mysqlctl.EnableUpdateStreamService(string(newTablet.Type), dbcfgs) } else { ts.DisallowQueries(false) mysqlctl.DisableUpdateStreamService() } }) agent.Start(bindAddr, mycnf.MysqlAddr()) umgmt.AddCloseCallback(func() { agent.Stop() }) mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) // The TabletManager service exports read-only management related // data. tm := tm.NewTabletManager(bindAddr, nil, mysqld) rpc.Register(tm) } func initQueryService(dbcfgs dbconfigs.DBConfigs) { if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } ts.SqlQueryLogger.ServeLogs("/debug/vt/querylog") if err := jscfg.ReadJson(*qsConfigFile, &qsConfig); err != nil { relog.Warning("%s", err) } ts.RegisterQueryService(qsConfig) usefulLameDuckPeriod := float64(qsConfig.QueryTimeout + 1) if usefulLameDuckPeriod > *lameDuckPeriod { *lameDuckPeriod = usefulLameDuckPeriod relog.Info("readjusted -lame-duck-period to %f", *lameDuckPeriod) } if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } umgmt.AddCloseCallback(func() { ts.DisallowQueries(true) }) } func handleSnapshot(rw http.ResponseWriter, req *http.Request, snapshotDir string, allowedPaths []string) { // /snapshot must be rewritten to the actual location of the snapshot. relative, err := filepath.Rel(mysqlctl.SnapshotURLPath, req.URL.Path) if err != nil { relog.Error("bad snapshot relative path %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Make sure that realPath is absolute and resolve any escaping from // snapshotDir through a symlink. realPath, err := filepath.Abs(path.Join(snapshotDir, relative)) if err != nil { relog.Error("bad snapshot absolute path %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } realPath, err = filepath.EvalSymlinks(realPath) if err != nil { relog.Error("bad snapshot symlink eval %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Resolve all the possible roots and make sure we're serving // from one of them for _, allowedPath := range allowedPaths { // eval the symlinks of the allowed path allowedPath, err := filepath.EvalSymlinks(allowedPath) if err != nil { continue } if strings.HasPrefix(realPath, allowedPath) { sendFile(rw, req, realPath) return } } relog.Error("bad snapshot real path %v %v", req.URL.Path, realPath) http.Error(rw, "400 bad request", http.StatusBadRequest) } // custom function to serve files func sendFile(rw http.ResponseWriter, req *http.Request, path string) { relog.Info("serve %v %v", req.URL.Path, path) file, err := os.Open(path) if err != nil { http.NotFound(rw, req) return } defer file.Close() fileinfo, err := file.Stat() if err != nil { http.NotFound(rw, req) return } // for directories, or for files smaller than 1k, use library if fileinfo.Mode().IsDir() || fileinfo.Size() < 1024 { http.ServeFile(rw, req, path) return } // supports If-Modified-Since header if t, err := time.Parse(http.TimeFormat, req.Header.Get("If-Modified-Since")); err == nil && fileinfo.ModTime().Before(t.Add(1*time.Second)) { rw.WriteHeader(http.StatusNotModified) return } // support Accept-Encoding header var writer io.Writer = rw var reader io.Reader = file if !strings.HasSuffix(path, ".gz") { ae := req.Header.Get("Accept-Encoding") if strings.Contains(ae, "fgzip") { relog.Info("Forking gzip to serve %v", path) cmd := exec.Command("gzip", "--fast", "-c", path) stdout, err := cmd.StdoutPipe() if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } defer stdout.Close() if err = cmd.Start(); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } rw.Header().Set("Content-Encoding", "gzip") defer func() { // An early abort leaves a process dangling. cmd.Process.Kill() if err := cmd.Wait(); err != nil { relog.Warning("gzip err for %v: %v", path, err) } else { relog.Info("gzip done for %v", path) } }() reader = stdout } else if strings.Contains(ae, "gzip") { gz, err := gzip.NewWriterLevel(rw, gzip.BestSpeed) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } rw.Header().Set("Content-Encoding", "gzip") defer gz.Close() writer = gz } } // add content-length if we know it if writer == rw && reader == file { rw.Header().Set("Content-Length", fmt.Sprintf("%v", fileinfo.Size())) } // and just copy content out rw.Header().Set("Last-Modified", fileinfo.ModTime().UTC().Format(http.TimeFormat)) rw.WriteHeader(http.StatusOK) if _, err := io.Copy(writer, reader); err != nil { relog.Warning("transfer failed %v: %v", path, err) } } func initUpdateStreamService(mycnf *mysqlctl.Mycnf) { mysqlctl.RegisterUpdateStreamService(mycnf) umgmt.AddCloseCallback(func() { mysqlctl.DisableUpdateStreamService() }) }
// Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // vt tablet server: Serves queries and performs housekeeping jobs. package main import ( "compress/gzip" "expvar" "flag" "fmt" "io" "log" "net/http" _ "net/http/pprof" "os" "os/signal" "path" "path/filepath" "strings" "syscall" "code.google.com/p/vitess/go/jscfg" "code.google.com/p/vitess/go/relog" rpc "code.google.com/p/vitess/go/rpcplus" "code.google.com/p/vitess/go/rpcwrap/auth" "code.google.com/p/vitess/go/rpcwrap/bsonrpc" "code.google.com/p/vitess/go/rpcwrap/jsonrpc" _ "code.google.com/p/vitess/go/snitch" "code.google.com/p/vitess/go/umgmt" "code.google.com/p/vitess/go/vt/dbconfigs" "code.google.com/p/vitess/go/vt/mysqlctl" "code.google.com/p/vitess/go/vt/servenv" tm "code.google.com/p/vitess/go/vt/tabletmanager" ts "code.google.com/p/vitess/go/vt/tabletserver" "code.google.com/p/vitess/go/zk" ) const ( DefaultLameDuckPeriod = 30.0 DefaultRebindDelay = 0.01 ) var ( port = flag.Int("port", 6509, "port for the server") lameDuckPeriod = flag.Float64("lame-duck-period", DefaultLameDuckPeriod, "how long to give in-flight transactions to finish") rebindDelay = flag.Float64("rebind-delay", DefaultRebindDelay, "artificial delay before rebinding a hijacked listener") tabletPath = flag.String("tablet-path", "", "path to zk node representing the tablet") qsConfigFile = flag.String("queryserver-config-file", "", "config file name for the query service") mycnfFile = flag.String("mycnf-file", "", "my.cnf file") authConfig = flag.String("auth-credentials", "", "name of file containing auth credentials") queryLog = flag.String("debug-querylog-file", "", "for testing: log all queries to this file") ) // Default values for the config // // The value for StreamBufferSize was chosen after trying out a few of // them. Too small buffers force too many packets to be sent. Too big // buffers force the clients to read them in multiple chunks and make // memory copies. so with the encoding overhead, this seems to work // great. (the overhead makes the final packets on the wire about // twice bigger than this). var qsConfig = ts.Config{ CachePoolCap: 1000, PoolSize: 16, StreamPoolSize: 750, TransactionCap: 20, TransactionTimeout: 30, MaxResultSize: 10000, QueryCacheSize: 5000, SchemaReloadTime: 30 * 60, QueryTimeout: 0, IdleTimeout: 30 * 60, StreamBufferSize: 32 * 1024, } // this is a http.ResponseWriter adapter / proxy layer // to support gzipping on the fly. Both listed interfaces have mostly different // methods, so the anonymous member variables work great. // Only 'Write' needs to be special-cased to the gzip Writer. // See: http://nf.id.au/roll-your-own-gzip-encoded-http-handler type gzipResponseWriter struct { io.Writer http.ResponseWriter } func (w gzipResponseWriter) Write(b []byte) (int, error) { return w.Writer.Write(b) } func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() servenv.Init("vttablet") _, tabletidStr := path.Split(*tabletPath) tabletId, err := tm.ParseUid(tabletidStr) if err != nil { relog.Fatal("%s", err) } mycnf := readMycnf(tabletId) dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Warning("%s", err) } initQueryService(dbcfgs) initUpdateStreamService(mycnf) initAgent(dbcfgs, mycnf, *dbConfigsFile, *dbCredentialsFile) // depends on both query and updateStream rpc.HandleHTTP() // NOTE(szopa): Changing credentials requires a server // restart. if *authConfig != "" { if err := auth.LoadCredentials(*authConfig); err != nil { relog.Error("could not load authentication credentials, not starting rpc servers: %v", err) } serveAuthRPC() } serveRPC() // NOTE: trailing slash in pattern means we handle all paths with this prefix http.Handle(mysqlctl.SnapshotURLPath+"/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // let's support gzip Accept-Encoding for files whose name // doesn't end in .gz (no double-zipping!) if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && !strings.HasSuffix(r.URL.Path, ".gz") { gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Encoding", "gzip") w = gzipResponseWriter{Writer: gz, ResponseWriter: w} defer gz.Close() } handleSnapshot(w, r, mysqlctl.TabletDir(uint32(tabletId)), mysqlctl.SnapshotDir(uint32(tabletId))) })) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) relog.Info("started vttablet %v", *port) umgmtSocket := fmt.Sprintf("/tmp/vttablet-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") } func serveAuthRPC() { bsonrpc.ServeAuthRPC() jsonrpc.ServeAuthRPC() } func serveRPC() { jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() } func readMycnf(tabletId uint32) *mysqlctl.Mycnf { if *mycnfFile == "" { *mycnfFile = mysqlctl.MycnfFile(tabletId) } mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } return mycnf } func initAgent(dbcfgs dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, dbConfigsFile, dbCredentialsFile string) { zconn := zk.NewMetaConn(false) expvar.Publish("ZkMetaConn", zconn) umgmt.AddCloseCallback(func() { zconn.Close() }) bindAddr := fmt.Sprintf(":%v", *port) // Action agent listens to changes in zookeeper and makes // modifications to this tablet. agent := tm.NewActionAgent(zconn, *tabletPath, *mycnfFile, dbConfigsFile, dbCredentialsFile) agent.AddChangeCallback(func(oldTablet, newTablet tm.Tablet) { if newTablet.IsServingType() { if dbcfgs.App.Dbname == "" { dbcfgs.App.Dbname = newTablet.DbName() } // Transitioning from replica to master, first disconnect // existing connections. "false" indicateds that clients must // re-resolve their endpoint before reconnecting. if newTablet.Type == tm.TYPE_MASTER && oldTablet.Type != tm.TYPE_MASTER { ts.DisallowQueries(false) } ts.AllowQueries(dbcfgs.App) mysqlctl.EnableUpdateStreamService(string(newTablet.Type), dbcfgs) } else { ts.DisallowQueries(false) mysqlctl.DisableUpdateStreamService() } }) agent.Start(bindAddr, mycnf.MysqlAddr()) umgmt.AddCloseCallback(func() { agent.Stop() }) mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) // The TabletManager service exports read-only management related // data. tm := tm.NewTabletManager(bindAddr, nil, mysqld) rpc.Register(tm) } func initQueryService(dbcfgs dbconfigs.DBConfigs) { if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } if err := jscfg.ReadJson(*qsConfigFile, &qsConfig); err != nil { relog.Warning("%s", err) } ts.RegisterQueryService(qsConfig) usefulLameDuckPeriod := float64(qsConfig.QueryTimeout + 1) if usefulLameDuckPeriod > *lameDuckPeriod { *lameDuckPeriod = usefulLameDuckPeriod relog.Info("readjusted -lame-duck-period to %f", *lameDuckPeriod) } if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } umgmt.AddCloseCallback(func() { ts.DisallowQueries(true) }) } func handleSnapshot(rw http.ResponseWriter, req *http.Request, tabletDir, snapshotDir string) { // /snapshot must be rewritten to the actual location of the snapshot. relative, err := filepath.Rel(mysqlctl.SnapshotURLPath, req.URL.Path) if err != nil { relog.Error("bad request %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Make sure that realPath is absolute and isn't escaping from // snapshotDir through a symlink. realPath, err := filepath.Abs(path.Join(snapshotDir, relative)) if err != nil { relog.Error("bad request %v", req.URL.Path) http.Error(rw, "400 bad request", http.StatusBadRequest) return } realPath, err = filepath.EvalSymlinks(realPath) if err != nil { relog.Error("bad request %v", req.URL.Path) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Make sure that we are not serving something like // /snapshot/../../../etc/passwd. // by making sure we only serve files from: // - the tablet directory (for symlinked data files) // - the snapshot directory if strings.HasPrefix(realPath, tabletDir) || strings.HasPrefix(realPath, snapshotDir) { relog.Info("serve %v %v", req.URL.Path, realPath) http.ServeFile(rw, req, realPath) } else { relog.Error("bad request %v", req.URL.Path) http.Error(rw, "400 bad request", http.StatusBadRequest) } } func initUpdateStreamService(mycnf *mysqlctl.Mycnf) { mysqlctl.RegisterUpdateStreamService(mycnf) umgmt.AddCloseCallback(func() { mysqlctl.DisableUpdateStreamService() }) } Better security audit of the allowed paths for HTTP serving. LGTM Mike. // Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // vt tablet server: Serves queries and performs housekeeping jobs. package main import ( "compress/gzip" "expvar" "flag" "fmt" "io" "log" "net/http" _ "net/http/pprof" "os" "os/signal" "path" "path/filepath" "strings" "syscall" "code.google.com/p/vitess/go/jscfg" "code.google.com/p/vitess/go/relog" rpc "code.google.com/p/vitess/go/rpcplus" "code.google.com/p/vitess/go/rpcwrap/auth" "code.google.com/p/vitess/go/rpcwrap/bsonrpc" "code.google.com/p/vitess/go/rpcwrap/jsonrpc" _ "code.google.com/p/vitess/go/snitch" "code.google.com/p/vitess/go/umgmt" "code.google.com/p/vitess/go/vt/dbconfigs" "code.google.com/p/vitess/go/vt/mysqlctl" "code.google.com/p/vitess/go/vt/servenv" tm "code.google.com/p/vitess/go/vt/tabletmanager" ts "code.google.com/p/vitess/go/vt/tabletserver" "code.google.com/p/vitess/go/zk" ) const ( DefaultLameDuckPeriod = 30.0 DefaultRebindDelay = 0.01 ) var ( port = flag.Int("port", 6509, "port for the server") lameDuckPeriod = flag.Float64("lame-duck-period", DefaultLameDuckPeriod, "how long to give in-flight transactions to finish") rebindDelay = flag.Float64("rebind-delay", DefaultRebindDelay, "artificial delay before rebinding a hijacked listener") tabletPath = flag.String("tablet-path", "", "path to zk node representing the tablet") qsConfigFile = flag.String("queryserver-config-file", "", "config file name for the query service") mycnfFile = flag.String("mycnf-file", "", "my.cnf file") authConfig = flag.String("auth-credentials", "", "name of file containing auth credentials") queryLog = flag.String("debug-querylog-file", "", "for testing: log all queries to this file") ) // Default values for the config // // The value for StreamBufferSize was chosen after trying out a few of // them. Too small buffers force too many packets to be sent. Too big // buffers force the clients to read them in multiple chunks and make // memory copies. so with the encoding overhead, this seems to work // great. (the overhead makes the final packets on the wire about // twice bigger than this). var qsConfig = ts.Config{ CachePoolCap: 1000, PoolSize: 16, StreamPoolSize: 750, TransactionCap: 20, TransactionTimeout: 30, MaxResultSize: 10000, QueryCacheSize: 5000, SchemaReloadTime: 30 * 60, QueryTimeout: 0, IdleTimeout: 30 * 60, StreamBufferSize: 32 * 1024, } // this is a http.ResponseWriter adapter / proxy layer // to support gzipping on the fly. Both listed interfaces have mostly different // methods, so the anonymous member variables work great. // Only 'Write' needs to be special-cased to the gzip Writer. // See: http://nf.id.au/roll-your-own-gzip-encoded-http-handler type gzipResponseWriter struct { io.Writer http.ResponseWriter } func (w gzipResponseWriter) Write(b []byte) (int, error) { return w.Writer.Write(b) } func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() servenv.Init("vttablet") _, tabletidStr := path.Split(*tabletPath) tabletId, err := tm.ParseUid(tabletidStr) if err != nil { relog.Fatal("%s", err) } mycnf := readMycnf(tabletId) dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Warning("%s", err) } initQueryService(dbcfgs) initUpdateStreamService(mycnf) initAgent(dbcfgs, mycnf, *dbConfigsFile, *dbCredentialsFile) // depends on both query and updateStream rpc.HandleHTTP() // NOTE(szopa): Changing credentials requires a server // restart. if *authConfig != "" { if err := auth.LoadCredentials(*authConfig); err != nil { relog.Error("could not load authentication credentials, not starting rpc servers: %v", err) } serveAuthRPC() } serveRPC() // make a list of paths we can serve HTTP traffic from. // we don't resolve them here to real paths, as they might not exits yet snapshotDir := mysqlctl.SnapshotDir(uint32(tabletId)) allowedPaths := []string{ mysqlctl.TabletDir(uint32(tabletId)), snapshotDir, mycnf.DataDir, mycnf.InnodbDataHomeDir, mycnf.InnodbLogGroupHomeDir, } // NOTE: trailing slash in pattern means we handle all paths with this prefix http.Handle(mysqlctl.SnapshotURLPath+"/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // let's support gzip Accept-Encoding for files whose name // doesn't end in .gz (no double-zipping!) if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && !strings.HasSuffix(r.URL.Path, ".gz") { gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Encoding", "gzip") w = gzipResponseWriter{Writer: gz, ResponseWriter: w} defer gz.Close() } handleSnapshot(w, r, snapshotDir, allowedPaths) })) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) relog.Info("started vttablet %v", *port) umgmtSocket := fmt.Sprintf("/tmp/vttablet-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") } func serveAuthRPC() { bsonrpc.ServeAuthRPC() jsonrpc.ServeAuthRPC() } func serveRPC() { jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() } func readMycnf(tabletId uint32) *mysqlctl.Mycnf { if *mycnfFile == "" { *mycnfFile = mysqlctl.MycnfFile(tabletId) } mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } return mycnf } func initAgent(dbcfgs dbconfigs.DBConfigs, mycnf *mysqlctl.Mycnf, dbConfigsFile, dbCredentialsFile string) { zconn := zk.NewMetaConn(false) expvar.Publish("ZkMetaConn", zconn) umgmt.AddCloseCallback(func() { zconn.Close() }) bindAddr := fmt.Sprintf(":%v", *port) // Action agent listens to changes in zookeeper and makes // modifications to this tablet. agent := tm.NewActionAgent(zconn, *tabletPath, *mycnfFile, dbConfigsFile, dbCredentialsFile) agent.AddChangeCallback(func(oldTablet, newTablet tm.Tablet) { if newTablet.IsServingType() { if dbcfgs.App.Dbname == "" { dbcfgs.App.Dbname = newTablet.DbName() } // Transitioning from replica to master, first disconnect // existing connections. "false" indicateds that clients must // re-resolve their endpoint before reconnecting. if newTablet.Type == tm.TYPE_MASTER && oldTablet.Type != tm.TYPE_MASTER { ts.DisallowQueries(false) } ts.AllowQueries(dbcfgs.App) mysqlctl.EnableUpdateStreamService(string(newTablet.Type), dbcfgs) } else { ts.DisallowQueries(false) mysqlctl.DisableUpdateStreamService() } }) agent.Start(bindAddr, mycnf.MysqlAddr()) umgmt.AddCloseCallback(func() { agent.Stop() }) mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) // The TabletManager service exports read-only management related // data. tm := tm.NewTabletManager(bindAddr, nil, mysqld) rpc.Register(tm) } func initQueryService(dbcfgs dbconfigs.DBConfigs) { if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } if err := jscfg.ReadJson(*qsConfigFile, &qsConfig); err != nil { relog.Warning("%s", err) } ts.RegisterQueryService(qsConfig) usefulLameDuckPeriod := float64(qsConfig.QueryTimeout + 1) if usefulLameDuckPeriod > *lameDuckPeriod { *lameDuckPeriod = usefulLameDuckPeriod relog.Info("readjusted -lame-duck-period to %f", *lameDuckPeriod) } if *queryLog != "" { if f, err := os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } else { relog.Fatal("Error opening file %v: %v", *queryLog, err) } } umgmt.AddCloseCallback(func() { ts.DisallowQueries(true) }) } func handleSnapshot(rw http.ResponseWriter, req *http.Request, snapshotDir string, allowedPaths []string) { // /snapshot must be rewritten to the actual location of the snapshot. relative, err := filepath.Rel(mysqlctl.SnapshotURLPath, req.URL.Path) if err != nil { relog.Error("bad request %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Make sure that realPath is absolute and resolve any escaping from // snapshotDir through a symlink. realPath, err := filepath.Abs(path.Join(snapshotDir, relative)) if err != nil { relog.Error("bad request %v", req.URL.Path) http.Error(rw, "400 bad request", http.StatusBadRequest) return } realPath, err = filepath.EvalSymlinks(realPath) if err != nil { relog.Error("bad request %v", req.URL.Path) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Resolve all the possible roots and make sure we're serving // from one of them for _, allowedPath := range allowedPaths { // eval the symlinks of the allowed path allowedPath, err := filepath.EvalSymlinks(allowedPath) if err != nil { continue } if strings.HasPrefix(realPath, allowedPath) { relog.Info("serve %v %v", req.URL.Path, realPath) http.ServeFile(rw, req, realPath) return } } relog.Error("bad request %v", req.URL.Path) http.Error(rw, "400 bad request", http.StatusBadRequest) } func initUpdateStreamService(mycnf *mysqlctl.Mycnf) { mysqlctl.RegisterUpdateStreamService(mycnf) umgmt.AddCloseCallback(func() { mysqlctl.DisableUpdateStreamService() }) }
// vttestserver is a native Go implementation of `run_local_server.py`. // It allows users to spawn a self-contained Vitess server for local testing/CI package main import ( "encoding/json" "flag" "fmt" "os" "strconv" "strings" "github.com/golang/protobuf/proto" "vitess.io/vitess/go/vt/log" vttestpb "vitess.io/vitess/go/vt/proto/vttest" "vitess.io/vitess/go/vt/vttest" ) type topoFlags struct { cells string keyspaces string shards string replicas int rdonly int } func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) { topo := &vttestpb.VTTestTopology{} topo.Cells = strings.Split(t.cells, ",") keyspaces := strings.Split(t.keyspaces, ",") shardCounts := strings.Split(t.shards, ",") if len(keyspaces) != len(shardCounts) { return nil, fmt.Errorf("--keyspaces must be same length as --shards") } for i := range keyspaces { name := keyspaces[i] numshards, err := strconv.ParseInt(shardCounts[i], 10, 32) if err != nil { return nil, err } ks := &vttestpb.Keyspace{ Name: name, ReplicaCount: int32(t.replicas), RdonlyCount: int32(t.rdonly), } for _, shardname := range vttest.GetShardNames(int(numshards)) { ks.Shards = append(ks.Shards, &vttestpb.Shard{ Name: shardname, }) } topo.Keyspaces = append(topo.Keyspaces, ks) } return topo, nil } func parseFlags() (config vttest.Config, env vttest.Environment, err error) { var seed vttest.SeedConfig var topo topoFlags var basePort int var protoTopo string var doSeed bool var mycnf string flag.IntVar(&basePort, "port", 0, "Port to use for vtcombo. If this is 0, a random port will be chosen.") flag.StringVar(&protoTopo, "proto_topo", "", "Define the fake cluster topology as a compact text format encoded"+ " vttest proto. See vttest.proto for more information.") flag.StringVar(&config.SchemaDir, "schema_dir", "", "Directory for initial schema files. Within this dir,"+ " there should be a subdir for each keyspace. Within"+ " each keyspace dir, each file is executed as SQL"+ " after the database is created on each shard."+ " If the directory contains a vschema.json file, it"+ " will be used as the vschema for the V3 API.") flag.StringVar(&config.DefaultSchemaDir, "default_schema_dir", "", "Default directory for initial schema files. If no schema is found"+ " in schema_dir, default to this location.") flag.BoolVar(&config.OnlyMySQL, "mysql_only", false, "If this flag is set only mysql is initialized."+ " The rest of the vitess components are not started."+ " Also, the output specifies the mysql unix socket"+ " instead of the vtgate port.") flag.BoolVar(&doSeed, "initialize_with_random_data", false, "If this flag is each table-shard will be initialized"+ " with random data. See also the 'rng_seed' and 'min_shard_size'"+ " and 'max_shard_size' flags.") flag.IntVar(&seed.RngSeed, "rng_seed", 123, "The random number generator seed to use when initializing"+ " with random data (see also --initialize_with_random_data)."+ " Multiple runs with the same seed will result with the same"+ " initial data.") flag.IntVar(&seed.MinSize, "min_table_shard_size", 1000, "The minimum number of initial rows in a table shard. Ignored if"+ "--initialize_with_random_data is false. The actual number is chosen"+ " randomly.") flag.IntVar(&seed.MaxSize, "max_table_shard_size", 10000, "The maximum number of initial rows in a table shard. Ignored if"+ "--initialize_with_random_data is false. The actual number is chosen"+ " randomly") flag.Float64Var(&seed.NullProbability, "null_probability", 0.1, "The probability to initialize a field with 'NULL' "+ " if --initialize_with_random_data is true. Only applies to fields"+ " that can contain NULL values.") flag.StringVar(&config.WebDir, "web_dir", "", "location of the vtctld web server files.") flag.StringVar(&config.WebDir2, "web_dir2", "", "location of the vtctld2 web server files.") flag.StringVar(&mycnf, "extra_my_cnf", "", "extra files to add to the config, separated by ':'") flag.StringVar(&topo.cells, "cells", "test", "Comma separated list of cells") flag.StringVar(&topo.keyspaces, "keyspaces", "test_keyspace", "Comma separated list of keyspaces") flag.StringVar(&topo.shards, "num_shards", "2", "Comma separated shard count (one per keyspace)") flag.IntVar(&topo.replicas, "replica_count", 2, "Replica tablets per shard (includes master)") flag.IntVar(&topo.rdonly, "rdonly_count", 1, "Rdonly tablets per shard") flag.StringVar(&config.Charset, "charset", "utf8", "MySQL charset") flag.StringVar(&config.SnapshotFile, "snapshot_file", "", "A MySQL DB snapshot file") flag.Parse() if basePort != 0 { env, err = vttest.NewLocalTestEnv("", basePort) if err != nil { return } } if protoTopo == "" { config.Topology, err = topo.buildTopology() if err != nil { return } } else { var topology vttestpb.VTTestTopology err = proto.UnmarshalText(protoTopo, &topology) if err != nil { return } if len(topology.Cells) == 0 { topology.Cells = append(topology.Cells, "test") } config.Topology = &topology } if doSeed { config.Seed = &seed } if mycnf != "" { config.ExtraMyCnf = strings.Split(mycnf, ":") } return } func main() { config, env, err := parseFlags() if err != nil { log.Fatal(err) } log.Infof("Starting local cluster...") log.Infof("config: %#v", config) cluster := vttest.LocalCluster{ Config: config, Env: env, } err = cluster.Setup() defer cluster.TearDown() if err != nil { log.Fatal(err) } kvconf := cluster.JSONConfig() if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil { log.Fatal(err) } log.Info("Local cluster started. Waiting for stdin input...") _, err = fmt.Scanln() if err != nil { log.Fatal(err) } log.Info("Shutting down cleanly") } Support specifying mysql_bind_host for vttestserver Signed-off-by: Jon Tirsen <a563f38b2a987e2aee263634a499e77cd89fe2fc@squareup.com> // vttestserver is a native Go implementation of `run_local_server.py`. // It allows users to spawn a self-contained Vitess server for local testing/CI package main import ( "encoding/json" "flag" "fmt" "os" "strconv" "strings" "github.com/golang/protobuf/proto" "vitess.io/vitess/go/vt/log" vttestpb "vitess.io/vitess/go/vt/proto/vttest" "vitess.io/vitess/go/vt/vttest" ) type topoFlags struct { cells string keyspaces string shards string replicas int rdonly int } func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) { topo := &vttestpb.VTTestTopology{} topo.Cells = strings.Split(t.cells, ",") keyspaces := strings.Split(t.keyspaces, ",") shardCounts := strings.Split(t.shards, ",") if len(keyspaces) != len(shardCounts) { return nil, fmt.Errorf("--keyspaces must be same length as --shards") } for i := range keyspaces { name := keyspaces[i] numshards, err := strconv.ParseInt(shardCounts[i], 10, 32) if err != nil { return nil, err } ks := &vttestpb.Keyspace{ Name: name, ReplicaCount: int32(t.replicas), RdonlyCount: int32(t.rdonly), } for _, shardname := range vttest.GetShardNames(int(numshards)) { ks.Shards = append(ks.Shards, &vttestpb.Shard{ Name: shardname, }) } topo.Keyspaces = append(topo.Keyspaces, ks) } return topo, nil } func parseFlags() (config vttest.Config, env vttest.Environment, err error) { var seed vttest.SeedConfig var topo topoFlags var basePort int var protoTopo string var doSeed bool var mycnf string flag.IntVar(&basePort, "port", 0, "Port to use for vtcombo. If this is 0, a random port will be chosen.") flag.StringVar(&protoTopo, "proto_topo", "", "Define the fake cluster topology as a compact text format encoded"+ " vttest proto. See vttest.proto for more information.") flag.StringVar(&config.SchemaDir, "schema_dir", "", "Directory for initial schema files. Within this dir,"+ " there should be a subdir for each keyspace. Within"+ " each keyspace dir, each file is executed as SQL"+ " after the database is created on each shard."+ " If the directory contains a vschema.json file, it"+ " will be used as the vschema for the V3 API.") flag.StringVar(&config.DefaultSchemaDir, "default_schema_dir", "", "Default directory for initial schema files. If no schema is found"+ " in schema_dir, default to this location.") flag.BoolVar(&config.OnlyMySQL, "mysql_only", false, "If this flag is set only mysql is initialized."+ " The rest of the vitess components are not started."+ " Also, the output specifies the mysql unix socket"+ " instead of the vtgate port.") flag.BoolVar(&doSeed, "initialize_with_random_data", false, "If this flag is each table-shard will be initialized"+ " with random data. See also the 'rng_seed' and 'min_shard_size'"+ " and 'max_shard_size' flags.") flag.IntVar(&seed.RngSeed, "rng_seed", 123, "The random number generator seed to use when initializing"+ " with random data (see also --initialize_with_random_data)."+ " Multiple runs with the same seed will result with the same"+ " initial data.") flag.IntVar(&seed.MinSize, "min_table_shard_size", 1000, "The minimum number of initial rows in a table shard. Ignored if"+ "--initialize_with_random_data is false. The actual number is chosen"+ " randomly.") flag.IntVar(&seed.MaxSize, "max_table_shard_size", 10000, "The maximum number of initial rows in a table shard. Ignored if"+ "--initialize_with_random_data is false. The actual number is chosen"+ " randomly") flag.Float64Var(&seed.NullProbability, "null_probability", 0.1, "The probability to initialize a field with 'NULL' "+ " if --initialize_with_random_data is true. Only applies to fields"+ " that can contain NULL values.") flag.StringVar(&config.WebDir, "web_dir", "", "location of the vtctld web server files.") flag.StringVar(&config.WebDir2, "web_dir2", "", "location of the vtctld2 web server files.") flag.StringVar(&config.MySQLBindHost, "mysql_bind_host", "localhost", "which host to bind vtgate mysql listener to") flag.StringVar(&mycnf, "extra_my_cnf", "", "extra files to add to the config, separated by ':'") flag.StringVar(&topo.cells, "cells", "test", "Comma separated list of cells") flag.StringVar(&topo.keyspaces, "keyspaces", "test_keyspace", "Comma separated list of keyspaces") flag.StringVar(&topo.shards, "num_shards", "2", "Comma separated shard count (one per keyspace)") flag.IntVar(&topo.replicas, "replica_count", 2, "Replica tablets per shard (includes master)") flag.IntVar(&topo.rdonly, "rdonly_count", 1, "Rdonly tablets per shard") flag.StringVar(&config.Charset, "charset", "utf8", "MySQL charset") flag.StringVar(&config.SnapshotFile, "snapshot_file", "", "A MySQL DB snapshot file") flag.Parse() if basePort != 0 { env, err = vttest.NewLocalTestEnv("", basePort) if err != nil { return } } if protoTopo == "" { config.Topology, err = topo.buildTopology() if err != nil { return } } else { var topology vttestpb.VTTestTopology err = proto.UnmarshalText(protoTopo, &topology) if err != nil { return } if len(topology.Cells) == 0 { topology.Cells = append(topology.Cells, "test") } config.Topology = &topology } if doSeed { config.Seed = &seed } if mycnf != "" { config.ExtraMyCnf = strings.Split(mycnf, ":") } return } func main() { config, env, err := parseFlags() if err != nil { log.Fatal(err) } log.Infof("Starting local cluster...") log.Infof("config: %#v", config) cluster := vttest.LocalCluster{ Config: config, Env: env, } err = cluster.Setup() defer cluster.TearDown() if err != nil { log.Fatal(err) } kvconf := cluster.JSONConfig() if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil { log.Fatal(err) } log.Info("Local cluster started. Waiting for stdin input...") _, err = fmt.Scanln() if err != nil { log.Fatal(err) } log.Info("Shutting down cleanly") }
// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // protoc invokes the protobuf compiler and captures the resulting .pb.go file. package main import ( "bytes" "errors" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" ) type genFileInfo struct { base string // The basename of the path path string // The full path to the final file expected bool // Whether the file is expected by the rules created bool // Whether the file was created by protoc from *genFileInfo // The actual file protoc produced if not Path unique bool // True if this base name is unique in expected results ambiguious bool // True if there were more than one possible outputs that matched this file } func run(args []string) error { // process the args options := multiFlag{} descriptors := multiFlag{} expected := multiFlag{} imports := multiFlag{} flags := flag.NewFlagSet("protoc", flag.ExitOnError) protoc := flags.String("protoc", "", "The path to the real protoc.") outPath := flags.String("out_path", "", "The base output path to write to.") plugin := flags.String("plugin", "", "The go plugin to use.") importpath := flags.String("importpath", "", "The importpath for the generated sources.") compilerPath:= flags.String("compiler_path", "", "The value for PATH.") flags.Var(&options, "option", "The plugin options.") flags.Var(&descriptors, "descriptor_set", "The descriptor set to read.") flags.Var(&expected, "expected", "The expected output files.") flags.Var(&imports, "import", "Map a proto file to an import path.") if err := flags.Parse(args); err != nil { return err } // Output to a temporary folder and then move the contents into place below. // This is to work around long file paths on Windows. tmpDir, err := ioutil.TempDir("", "go_proto") if err != nil { return err } tmpDir = abs(tmpDir) // required to work with long paths on Windows absOutPath := abs(*outPath) // required to work with long paths on Windows defer os.RemoveAll(tmpDir) pluginBase := filepath.Base(*plugin) pluginName := strings.TrimSuffix( strings.TrimPrefix(filepath.Base(*plugin), "protoc-gen-"), ".exe") for _, m := range imports { options = append(options, fmt.Sprintf("M%v", m)) } protoc_args := []string{ fmt.Sprintf("--%v_out=%v:%v", pluginName, strings.Join(options, ","), tmpDir), "--plugin", fmt.Sprintf("%v=%v", strings.TrimSuffix(pluginBase, ".exe"), *plugin), "--descriptor_set_in", strings.Join(descriptors, string(os.PathListSeparator)), } protoc_args = append(protoc_args, flags.Args()...) cmd := exec.Command(*protoc, protoc_args...) cmd.Env = append(os.Environ(), fmt.Sprintf("PATH=%s", *compilerPath)) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return fmt.Errorf("error running protoc: %v", err) } // Build our file map, and test for existance files := map[string]*genFileInfo{} byBase := map[string]*genFileInfo{} for _, path := range expected { info := &genFileInfo{ path: path, base: filepath.Base(path), expected: true, unique: true, } files[info.path] = info if byBase[info.base] != nil { info.unique = false byBase[info.base].unique = false } else { byBase[info.base] = info } } // Walk the generated files filepath.Walk(tmpDir, func(path string, f os.FileInfo, err error) error { relPath, err := filepath.Rel(tmpDir, path) if err != nil { return err } if relPath == "." { return nil } if f.IsDir() { if err := os.Mkdir(filepath.Join(absOutPath, relPath), f.Mode()); !os.IsExist(err) { return err } return nil } if !strings.HasSuffix(path, ".go") { return nil } info := &genFileInfo{ path: path, base: filepath.Base(path), created: true, } if foundInfo, ok := files[relPath]; ok { foundInfo.created = true foundInfo.from = info return nil } files[relPath] = info copyTo := byBase[info.base] switch { case copyTo == nil: // Unwanted output case !copyTo.unique: // not unique, no copy allowed case copyTo.from != nil: copyTo.ambiguious = true info.ambiguious = true default: copyTo.from = info copyTo.created = true info.expected = true } return nil }) buf := &bytes.Buffer{} for _, f := range files { switch { case f.expected && !f.created: fmt.Fprintf(buf, "Failed to create %v.\n", f.path) case f.expected && f.ambiguious: fmt.Fprintf(buf, "Ambiguious output %v.\n", f.path) case f.from != nil: data, err := ioutil.ReadFile(f.from.path) if err != nil { return err } if err := ioutil.WriteFile(abs(f.path), data, 0644); err != nil { return err } case !f.expected: //fmt.Fprintf(buf, "Unexpected output %v.\n", f.path) } if buf.Len() > 0 { fmt.Fprintf(buf, "Check that the go_package option is %q.", *importpath) return errors.New(buf.String()) } } return nil } func main() { if err := run(os.Args[1:]); err != nil { log.Fatal(err) } } protoc: create trivial files for missing outputs (#1394) The grpc_gateway plugin only creates output files for .proto files that includes service definitions. We still need to declare output .go files, since we don't know which .proto files have services during analysis, but we don't need to report errors. Related #1388 // Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // protoc invokes the protobuf compiler and captures the resulting .pb.go file. package main import ( "bytes" "errors" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" ) type genFileInfo struct { base string // The basename of the path path string // The full path to the final file expected bool // Whether the file is expected by the rules created bool // Whether the file was created by protoc from *genFileInfo // The actual file protoc produced if not Path unique bool // True if this base name is unique in expected results ambiguious bool // True if there were more than one possible outputs that matched this file } func run(args []string) error { // process the args options := multiFlag{} descriptors := multiFlag{} expected := multiFlag{} imports := multiFlag{} flags := flag.NewFlagSet("protoc", flag.ExitOnError) protoc := flags.String("protoc", "", "The path to the real protoc.") outPath := flags.String("out_path", "", "The base output path to write to.") plugin := flags.String("plugin", "", "The go plugin to use.") importpath := flags.String("importpath", "", "The importpath for the generated sources.") compilerPath := flags.String("compiler_path", "", "The value for PATH.") flags.Var(&options, "option", "The plugin options.") flags.Var(&descriptors, "descriptor_set", "The descriptor set to read.") flags.Var(&expected, "expected", "The expected output files.") flags.Var(&imports, "import", "Map a proto file to an import path.") if err := flags.Parse(args); err != nil { return err } // Output to a temporary folder and then move the contents into place below. // This is to work around long file paths on Windows. tmpDir, err := ioutil.TempDir("", "go_proto") if err != nil { return err } tmpDir = abs(tmpDir) // required to work with long paths on Windows absOutPath := abs(*outPath) // required to work with long paths on Windows defer os.RemoveAll(tmpDir) pluginBase := filepath.Base(*plugin) pluginName := strings.TrimSuffix( strings.TrimPrefix(filepath.Base(*plugin), "protoc-gen-"), ".exe") for _, m := range imports { options = append(options, fmt.Sprintf("M%v", m)) } protoc_args := []string{ fmt.Sprintf("--%v_out=%v:%v", pluginName, strings.Join(options, ","), tmpDir), "--plugin", fmt.Sprintf("%v=%v", strings.TrimSuffix(pluginBase, ".exe"), *plugin), "--descriptor_set_in", strings.Join(descriptors, string(os.PathListSeparator)), } protoc_args = append(protoc_args, flags.Args()...) cmd := exec.Command(*protoc, protoc_args...) cmd.Env = append(os.Environ(), fmt.Sprintf("PATH=%s", *compilerPath)) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return fmt.Errorf("error running protoc: %v", err) } // Build our file map, and test for existance files := map[string]*genFileInfo{} byBase := map[string]*genFileInfo{} for _, path := range expected { info := &genFileInfo{ path: path, base: filepath.Base(path), expected: true, unique: true, } files[info.path] = info if byBase[info.base] != nil { info.unique = false byBase[info.base].unique = false } else { byBase[info.base] = info } } // Walk the generated files filepath.Walk(tmpDir, func(path string, f os.FileInfo, err error) error { relPath, err := filepath.Rel(tmpDir, path) if err != nil { return err } if relPath == "." { return nil } if f.IsDir() { if err := os.Mkdir(filepath.Join(absOutPath, relPath), f.Mode()); !os.IsExist(err) { return err } return nil } if !strings.HasSuffix(path, ".go") { return nil } info := &genFileInfo{ path: path, base: filepath.Base(path), created: true, } if foundInfo, ok := files[relPath]; ok { foundInfo.created = true foundInfo.from = info return nil } files[relPath] = info copyTo := byBase[info.base] switch { case copyTo == nil: // Unwanted output case !copyTo.unique: // not unique, no copy allowed case copyTo.from != nil: copyTo.ambiguious = true info.ambiguious = true default: copyTo.from = info copyTo.created = true info.expected = true } return nil }) buf := &bytes.Buffer{} for _, f := range files { switch { case f.expected && !f.created: // Some plugins only create output files if the proto source files have // have relevant definitions (e.g., services for grpc_gateway). Create // trivial files that the compiler will ignore for missing outputs. data := []byte("// +build ignore\n\npackage ignore") if err := ioutil.WriteFile(abs(f.path), data, 0644); err != nil { return err } case f.expected && f.ambiguious: fmt.Fprintf(buf, "Ambiguious output %v.\n", f.path) case f.from != nil: data, err := ioutil.ReadFile(f.from.path) if err != nil { return err } if err := ioutil.WriteFile(abs(f.path), data, 0644); err != nil { return err } case !f.expected: //fmt.Fprintf(buf, "Unexpected output %v.\n", f.path) } if buf.Len() > 0 { fmt.Fprintf(buf, "Check that the go_package option is %q.", *importpath) return errors.New(buf.String()) } } return nil } func main() { if err := run(os.Args[1:]); err != nil { log.Fatal(err) } }
package impl import ( m "../" "database/sql" "errors" "fmt" "github.com/go-sql-driver/mysql" ) const ( select_trading = "SELECT id,company_id,title_type,subject," + "work_from,work_to,total," + "quotation_date,quotation_number," + "bill_date,bill_number," + "tax_rate,assignee,product," + "created_time, modified_time FROM trading" ) type tradingDAO struct { connection *Connection logger m.Logger } func NewTradingDAO(connection *Connection, logger m.Logger) *tradingDAO { return &tradingDAO{ connection: connection, logger: logger, } } func (d *tradingDAO) GetListByUser(userId string) ([]*m.Trading, error) { db := d.connection.Connect() st, err := db.Prepare(select_trading + " WHERE assignee=? AND deleted <> 1 ORDER BY id ASC") if err != nil { return nil, err } defer st.Close() rows, err := st.Query(userId) if err != nil { return nil, err } defer rows.Close() var list []*m.Trading for rows.Next() { item := d.scanTrading(rows) list = append(list, &item) } return list, nil } func (d *tradingDAO) GetById(id, userId string) (*m.Trading, error) { db := d.connection.Connect() st, err := db.Prepare(select_trading + " WHERE id=? AND assignee=? AND deleted <> 1 LIMIT 1") if err != nil { return nil, err } defer st.Close() rows, err := st.Query(id, userId) if err != nil { return nil, err } defer rows.Close() if !rows.Next() { return nil, nil } item := d.scanTrading(rows) return &item, nil } func (d *tradingDAO) Create(companyId, subject string, titleType int, workFrom, workTo, total, quotationDate, billDate int64, taxRate float32, assignee, product string) (*m.Trading, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("INSERT INTO trading(" + "id,company_id,subject,title_type," + "work_from,work_to,total," + "quotation_date,quotation_number," + "bill_date,bill_number," + "tax_rate,assignee,product," + "created_time,modified_time,deleted)" + "VALUES(?,?,?,?," + "?,?,?," + "?,''," + "?,''," + "?,?,?," + "unix_timestamp(now()),unix_timestamp(now()),0)") if err != nil { return nil, err } defer st.Close() id, err := insertWithUUID(32, func(id string) error { _, err = st.Exec(id, companyId, subject, titleType, workFrom, workTo, total, quotationDate, billDate, taxRate, assignee, product) return err }) if err != nil { return nil, err } tr.Commit() return &m.Trading{ Id: id, CompanyId: companyId, Subject: subject, TitleType: titleType, WorkFrom: workFrom, WorkTo: workTo, QuotationDate: quotationDate, BillDate: billDate, TaxRate: taxRate, AssigneeId: assignee, Product: product, }, nil } func (d *tradingDAO) Update(trading m.Trading) (*m.Trading, error) { //id, companyId, subject string, titleType int, workFrom, workTo, total, quotationDate, billDate int64, taxRate float32, assignee, product string tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("UPDATE trading SET " + "company_id=?,title_type=?,subject=?," + "work_from=?,work_to=?,total=?,quotation_date=?,bill_date=?," + "tax_rate=?,assignee=?,product=?," + "modified_time=unix_timestamp(now()) " + "WHERE id=? AND deleted <> 1") if err != nil { return nil, err } defer st.Close() _, err = st.Exec(trading.CompanyId, trading.TitleType, trading.Subject, trading.WorkFrom, trading.WorkTo, trading.Total, trading.QuotationDate, trading.BillDate, trading.TaxRate, trading.AssigneeId, trading.Product, trading.Id) if err != nil { return nil, err } tr.Commit() return &trading, nil } func (d *tradingDAO) GetItemsById(tradingId string) ([]*m.TradingItem, error) { db := d.connection.Connect() st, err := db.Prepare("SELECT id,sort_order,subject,unit_price,amount," + "degree,tax_type,memo FROM trading_item " + "WHERE trading_id=? AND deleted <> 1 ORDER BY sort_order ASC") if err != nil { return nil, err } defer st.Close() rows, err := st.Query(tradingId) if err != nil { return nil, err } defer rows.Close() var list []*m.TradingItem var id, subject, degree, memo string var sortOrder, unitPrice, amount, taxType int for rows.Next() { rows.Scan(&id, &sortOrder, &subject, &unitPrice, &amount, &degree, &taxType, &memo) list = append(list, &m.TradingItem{ Id: id, TradingId: tradingId, SortOrder: sortOrder, Subject: subject, UnitPrice: unitPrice, Amount: amount, Degree: degree, TaxType: taxType, Memo: memo, }) } return list, nil } func (d *tradingDAO) CreateItem(tradingId, subject, degree, memo string, sortOrder, unitPrice, amount, taxType int) (*m.TradingItem, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("INSERT INTO trading_item(" + "id,trading_id,sort_order,subject," + "unit_price,amount,degree," + "tax_type,memo," + "created_time,modified_time,deleted)" + "VALUES(?,?,?,?," + "?,?,?," + "?,?," + "unix_timestamp(now()),unix_timestamp(now()),0)") if err != nil { return nil, err } defer st.Close() // generate ID var id string for i := 0; i < 10; i++ { id = generateId(32) _, err = st.Exec(id, tradingId, sortOrder, subject, unitPrice, amount, degree, taxType, memo) if err == nil { break } id = "" if err2, ok := err.(*mysql.MySQLError); ok { if err2.Number != 1062 { return nil, err2 } } else { return nil, err } } if len(id) == 0 { return nil, errors.New("Failed to create") } tr.Commit() return &m.TradingItem{ Id: id, TradingId: tradingId, SortOrder: sortOrder, Subject: subject, UnitPrice: unitPrice, Amount: amount, Degree: degree, TaxType: taxType, Memo: memo, }, nil } func (d *tradingDAO) UpdateItem(id, tradingId, subject, degree, memo string, sortOrder, unitPrice, amount, taxType int) (*m.TradingItem, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("UPDATE trading_item SET " + "sort_order=?,subject=?," + "unit_price=?,amount=?,degree=?," + "tax_type=?,memo=?," + "modified_time=unix_timestamp(now()) " + "WHERE id=? AND trading_id=? AND deleted <> 1") if err != nil { return nil, err } defer st.Close() // execute _, err = st.Exec(sortOrder, subject, unitPrice, amount, degree, taxType, memo, id, tradingId) if err != nil { return nil, err } tr.Commit() return &m.TradingItem{ Id: id, TradingId: tradingId, SortOrder: sortOrder, Subject: subject, UnitPrice: unitPrice, Amount: amount, Degree: degree, TaxType: taxType, Memo: memo, }, nil } func (d *tradingDAO) SoftDeleteItem(id, tradingId string) error { tr, err := d.connection.Begin() if err != nil { return err } defer tr.Rollback() st, err := tr.Prepare("UPDATE trading_item SET " + "deleted=1 " + "WHERE id=? AND trading_id=? AND deleted <> 1") if err != nil { return err } defer st.Close() // execute _, err = st.Exec(id, tradingId) if err != nil { return err } tr.Commit() return nil } func (d *tradingDAO) generateNextId(tr *sql.Tx, date string) (string, error) { num, err := d.getId(tr, date) if err != nil { return "", err } if num == -1 { err = d.insertId(tr, date) if err != nil { return "", err } return fmt.Sprintf("%s001", date), nil } else { num += 1 err = d.updateId(tr, date, num) if err != nil { return "", err } return fmt.Sprintf("%s%03d", date, num), nil } } func (d *tradingDAO) getId(tr *sql.Tx, date string) (int, error) { st, err := tr.Prepare("SELECT num FROM trading_id WHERE date=?") if err != nil { return -1, err } defer st.Close() rows, err := st.Query(date) if err != nil { return -1, err } defer rows.Close() if !rows.Next() { return -1, nil } var num int rows.Scan(&num) return num, nil } func (d *tradingDAO) insertId(tr *sql.Tx, date string) error { st, err := tr.Prepare("INSERT INTO trading_id" + "(date,num) VALUES(?,1)") if err != nil { return err } defer st.Close() _, err = st.Exec(date) return err } func (d *tradingDAO) updateId(tr *sql.Tx, date string, num int) error { st, err := tr.Prepare("UPDATe trading_id " + "SET num=? WHERE date=?") if err != nil { return err } defer st.Close() _, err = st.Exec(num, date) return err } func (d *tradingDAO) scanTrading(rows *sql.Rows) m.Trading { var id, companyId, subject, product string var titleType int var taxRate float32 var assignee, quotationNumber, billNumber string var workFrom, workTo, total, quotationDate, billDate, created, modified int64 err := rows.Scan(&id, &companyId, &titleType, &subject, &workFrom, &workTo, &total, &quotationDate, &quotationNumber, &billDate, &billNumber, &taxRate, &assignee, &product, &created, &modified) if err != nil { d.logger.Errorf("Failed to scan trading :%s", err) } return m.Trading{ Id: id, CompanyId: companyId, TitleType: titleType, Subject: subject, WorkFrom: workFrom, WorkTo: workTo, Total: total, QuotationDate: quotationDate, QuotationNumber: quotationNumber, BillDate: billDate, BillNumber: billNumber, TaxRate: taxRate, AssigneeId: assignee, Product: product, CreatedTime: created, ModifiedTime: modified, } } Adds quotation/bill number package impl import ( m "../" "database/sql" "errors" "fmt" "github.com/go-sql-driver/mysql" ) const ( select_trading = "SELECT id,company_id,title_type,subject," + "work_from,work_to,total," + "quotation_date,quotation_number," + "bill_date,bill_number," + "tax_rate,assignee,product," + "created_time, modified_time FROM trading" ) type tradingDAO struct { connection *Connection logger m.Logger } func NewTradingDAO(connection *Connection, logger m.Logger) *tradingDAO { return &tradingDAO{ connection: connection, logger: logger, } } func (d *tradingDAO) GetListByUser(userId string) ([]*m.Trading, error) { db := d.connection.Connect() st, err := db.Prepare(select_trading + " WHERE assignee=? AND deleted <> 1 ORDER BY id ASC") if err != nil { return nil, err } defer st.Close() rows, err := st.Query(userId) if err != nil { return nil, err } defer rows.Close() var list []*m.Trading for rows.Next() { item := d.scanTrading(rows) list = append(list, &item) } return list, nil } func (d *tradingDAO) GetById(id, userId string) (*m.Trading, error) { db := d.connection.Connect() st, err := db.Prepare(select_trading + " WHERE id=? AND assignee=? AND deleted <> 1 LIMIT 1") if err != nil { return nil, err } defer st.Close() rows, err := st.Query(id, userId) if err != nil { return nil, err } defer rows.Close() if !rows.Next() { return nil, nil } item := d.scanTrading(rows) return &item, nil } func (d *tradingDAO) Create(companyId, subject string, titleType int, workFrom, workTo, total, quotationDate, billDate int64, taxRate float32, assignee, product string) (*m.Trading, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("INSERT INTO trading(" + "id,company_id,subject,title_type," + "work_from,work_to,total," + "quotation_date,quotation_number," + "bill_date,bill_number," + "tax_rate,assignee,product," + "created_time,modified_time,deleted)" + "VALUES(?,?,?,?," + "?,?,?," + "?,''," + "?,''," + "?,?,?," + "unix_timestamp(now()),unix_timestamp(now()),0)") if err != nil { return nil, err } defer st.Close() id, err := insertWithUUID(32, func(id string) error { _, err = st.Exec(id, companyId, subject, titleType, workFrom, workTo, total, quotationDate, billDate, taxRate, assignee, product) return err }) if err != nil { return nil, err } tr.Commit() return &m.Trading{ Id: id, CompanyId: companyId, Subject: subject, TitleType: titleType, WorkFrom: workFrom, WorkTo: workTo, QuotationDate: quotationDate, BillDate: billDate, TaxRate: taxRate, AssigneeId: assignee, Product: product, }, nil } func (d *tradingDAO) Update(trading m.Trading) (*m.Trading, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("UPDATE trading SET " + "company_id=?,title_type=?,subject=?," + "work_from=?,work_to=?,total=?," + "quotation_date=?,quotation_number=?," + "bill_date=?,bill_number=?," + "tax_rate=?,assignee=?,product=?," + "modified_time=unix_timestamp(now()) " + "WHERE id=? AND deleted <> 1") if err != nil { return nil, err } defer st.Close() _, err = st.Exec(trading.CompanyId, trading.TitleType, trading.Subject, trading.WorkFrom, trading.WorkTo, trading.Total, trading.QuotationDate, trading.QuotationNumber, trading.BillDate, trading.BillNumber, trading.TaxRate, trading.AssigneeId, trading.Product, trading.Id) if err != nil { return nil, err } tr.Commit() return &trading, nil } func (d *tradingDAO) GetItemsById(tradingId string) ([]*m.TradingItem, error) { db := d.connection.Connect() st, err := db.Prepare("SELECT id,sort_order,subject,unit_price,amount," + "degree,tax_type,memo FROM trading_item " + "WHERE trading_id=? AND deleted <> 1 ORDER BY sort_order ASC") if err != nil { return nil, err } defer st.Close() rows, err := st.Query(tradingId) if err != nil { return nil, err } defer rows.Close() var list []*m.TradingItem var id, subject, degree, memo string var sortOrder, unitPrice, amount, taxType int for rows.Next() { rows.Scan(&id, &sortOrder, &subject, &unitPrice, &amount, &degree, &taxType, &memo) list = append(list, &m.TradingItem{ Id: id, TradingId: tradingId, SortOrder: sortOrder, Subject: subject, UnitPrice: unitPrice, Amount: amount, Degree: degree, TaxType: taxType, Memo: memo, }) } return list, nil } func (d *tradingDAO) CreateItem(tradingId, subject, degree, memo string, sortOrder, unitPrice, amount, taxType int) (*m.TradingItem, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("INSERT INTO trading_item(" + "id,trading_id,sort_order,subject," + "unit_price,amount,degree," + "tax_type,memo," + "created_time,modified_time,deleted)" + "VALUES(?,?,?,?," + "?,?,?," + "?,?," + "unix_timestamp(now()),unix_timestamp(now()),0)") if err != nil { return nil, err } defer st.Close() // generate ID var id string for i := 0; i < 10; i++ { id = generateId(32) _, err = st.Exec(id, tradingId, sortOrder, subject, unitPrice, amount, degree, taxType, memo) if err == nil { break } id = "" if err2, ok := err.(*mysql.MySQLError); ok { if err2.Number != 1062 { return nil, err2 } } else { return nil, err } } if len(id) == 0 { return nil, errors.New("Failed to create") } tr.Commit() return &m.TradingItem{ Id: id, TradingId: tradingId, SortOrder: sortOrder, Subject: subject, UnitPrice: unitPrice, Amount: amount, Degree: degree, TaxType: taxType, Memo: memo, }, nil } func (d *tradingDAO) UpdateItem(id, tradingId, subject, degree, memo string, sortOrder, unitPrice, amount, taxType int) (*m.TradingItem, error) { tr, err := d.connection.Begin() if err != nil { return nil, err } defer tr.Rollback() st, err := tr.Prepare("UPDATE trading_item SET " + "sort_order=?,subject=?," + "unit_price=?,amount=?,degree=?," + "tax_type=?,memo=?," + "modified_time=unix_timestamp(now()) " + "WHERE id=? AND trading_id=? AND deleted <> 1") if err != nil { return nil, err } defer st.Close() // execute _, err = st.Exec(sortOrder, subject, unitPrice, amount, degree, taxType, memo, id, tradingId) if err != nil { return nil, err } tr.Commit() return &m.TradingItem{ Id: id, TradingId: tradingId, SortOrder: sortOrder, Subject: subject, UnitPrice: unitPrice, Amount: amount, Degree: degree, TaxType: taxType, Memo: memo, }, nil } func (d *tradingDAO) SoftDeleteItem(id, tradingId string) error { tr, err := d.connection.Begin() if err != nil { return err } defer tr.Rollback() st, err := tr.Prepare("UPDATE trading_item SET " + "deleted=1 " + "WHERE id=? AND trading_id=? AND deleted <> 1") if err != nil { return err } defer st.Close() // execute _, err = st.Exec(id, tradingId) if err != nil { return err } tr.Commit() return nil } func (d *tradingDAO) generateNextId(tr *sql.Tx, date string) (string, error) { num, err := d.getId(tr, date) if err != nil { return "", err } if num == -1 { err = d.insertId(tr, date) if err != nil { return "", err } return fmt.Sprintf("%s001", date), nil } else { num += 1 err = d.updateId(tr, date, num) if err != nil { return "", err } return fmt.Sprintf("%s%03d", date, num), nil } } func (d *tradingDAO) getId(tr *sql.Tx, date string) (int, error) { st, err := tr.Prepare("SELECT num FROM trading_id WHERE date=?") if err != nil { return -1, err } defer st.Close() rows, err := st.Query(date) if err != nil { return -1, err } defer rows.Close() if !rows.Next() { return -1, nil } var num int rows.Scan(&num) return num, nil } func (d *tradingDAO) insertId(tr *sql.Tx, date string) error { st, err := tr.Prepare("INSERT INTO trading_id" + "(date,num) VALUES(?,1)") if err != nil { return err } defer st.Close() _, err = st.Exec(date) return err } func (d *tradingDAO) updateId(tr *sql.Tx, date string, num int) error { st, err := tr.Prepare("UPDATe trading_id " + "SET num=? WHERE date=?") if err != nil { return err } defer st.Close() _, err = st.Exec(num, date) return err } func (d *tradingDAO) scanTrading(rows *sql.Rows) m.Trading { var id, companyId, subject, product string var titleType int var taxRate float32 var assignee, quotationNumber, billNumber string var workFrom, workTo, total, quotationDate, billDate, created, modified int64 err := rows.Scan(&id, &companyId, &titleType, &subject, &workFrom, &workTo, &total, &quotationDate, &quotationNumber, &billDate, &billNumber, &taxRate, &assignee, &product, &created, &modified) if err != nil { d.logger.Errorf("Failed to scan trading :%s", err) } return m.Trading{ Id: id, CompanyId: companyId, TitleType: titleType, Subject: subject, WorkFrom: workFrom, WorkTo: workTo, Total: total, QuotationDate: quotationDate, QuotationNumber: quotationNumber, BillDate: billDate, BillNumber: billNumber, TaxRate: taxRate, AssigneeId: assignee, Product: product, CreatedTime: created, ModifiedTime: modified, } }
package main import ( "bufio" "fmt" "net" "os" "syscall" ) func handleClient(conn net.Conn) error { r := bufio.NewReader(conn) _, err := r.ReadByte() if err != nil { return err } w := bufio.NewWriter(conn) _, err = w.WriteString("HTTP/1.0 200 OK\n") if err != nil { return err } w.Flush() return nil } func Run(host string, port int) int { fmt.Printf("--> binding to %s:%d\n", host, port) addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", host, port)) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) return -1 } ln, err := net.ListenTCP("tcp", addr) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) return -1 } for { conn, err := ln.Accept() if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) continue } fmt.Printf(".") ret, _, errno := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0) if errno != 0 { return int(errno) } if ret != 0 { continue } if err := handleClient(conn); err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) continue } conn.Close() } return 0 } Fix that fin packet doesn't reach client package main import ( "bufio" "fmt" "net" "os" "syscall" ) func handleClient(conn net.Conn) error { r := bufio.NewReader(conn) _, err := r.ReadByte() if err != nil { return err } w := bufio.NewWriter(conn) _, err = w.WriteString("HTTP/1.0 200 OK\n") if err != nil { return err } w.Flush() return nil } func Run(host string, port int) int { fmt.Printf("--> binding to %s:%d\n", host, port) addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", host, port)) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) return -1 } ln, err := net.ListenTCP("tcp", addr) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) return -1 } for { conn, err := ln.Accept() if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) continue } fmt.Printf(".") ret, _, errno := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0) if errno != 0 { return int(errno) } if ret != 0 { conn.Close() // parent process close continue } if err := handleClient(conn); err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) continue } conn.Close() // child process close } return 0 }
package loader import ( "fmt" "io" "io/ioutil" "log" "os" "os/exec" "path" "strconv" "strings" "time" "github.com/brnstz/bus/internal/conf" "github.com/brnstz/bus/internal/etc" "github.com/brnstz/bus/internal/models" ) var ( days = []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"} datefmt = "20060102" loaderBreak = time.Hour * 24 views = []string{"here_trip", "service", "service_exception"} logp = 1000 ) // rskey is the unique key for a route_shape type rskey struct { routeID string directionID int headsign string } type latlon struct { lat float64 lon float64 } type Loader struct { // the dir from which we load google transit files dir string // mapping from trip id to a trip object trips map[string]*models.Trip // mapping from stop_id to a slice of trip_ids stopTrips map[string][]string // mapping of trip_id to service object tripService map[string]*models.Service // mapping of service_id to map of unique route_id serviceRoute map[string]map[string]bool routeIDs map[string]bool // routeAgency contains the agency for each route after we loadRoutes() routeAgency map[string]string // mapping trip_id to route_id tripRoute map[string]string // shapeRoute maps shape_id to route_id (for purposes of adding agency_id // to shapes table) shapeRoute map[string]string // maxTripSeq maxTripSeq map[string]int // mapping of stop ids to lat/lon pairs stopLocation map[string]*latlon // routeShapeCount keeps a running tab of the biggest shape for this // route/dir/headsign combo /* routeShapeCount map[rskey]int routeShapeID map[rskey] */ } func newLoader(dir string) *Loader { l := Loader{ dir: dir, trips: map[string]*models.Trip{}, stopTrips: map[string][]string{}, tripRoute: map[string]string{}, tripService: map[string]*models.Service{}, serviceRoute: map[string]map[string]bool{}, routeAgency: map[string]string{}, shapeRoute: map[string]string{}, maxTripSeq: map[string]int{}, stopLocation: map[string]*latlon{}, } // Checking the length of the 0th entry ensures we ignore the case where // BUS_ROUTE_FILTER was an empty string (resulting in []string{""}). // Possibly we want to check this with the conf package, but doing this for // now. if len(conf.Loader.RouteFilter) > 0 && len(conf.Loader.RouteFilter[0]) > 0 { l.routeIDs = map[string]bool{} for _, v := range conf.Loader.RouteFilter { l.routeIDs[v] = true } } return &l } func (l *Loader) load() { l.loadRoutes() l.loadTrips() l.loadStopLocations() l.loadStopTimes() l.loadUniqueStop() l.loadCalendars() l.loadCalendarDates() l.loadShapes() l.updateRouteShapes() } // skipRoute returns true if we should skip this route given our routeFilter // config func (l *Loader) skipRoute(routeID string) bool { if l.routeIDs != nil && l.routeIDs[routeID] == false { return true } else { return false } } func (l *Loader) loadRoutes() { var i int f, fh := getcsv(l.dir, "routes.txt") defer fh.Close() header, err := f.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } routeIdx := find(header, "route_id") routeTypeIdx := find(header, "route_type") routeColorIdx := find(header, "route_color") routeTextColorIdx := find(header, "route_text_color") routeAgencyIdx := find(header, "agency_id") routeShortNameIdx := maybeFind(header, "route_short_name") routeLongNameIdx := maybeFind(header, "route_long_name") for i = 0; ; i++ { rec, err := f.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } route := rec[routeIdx] if l.skipRoute(route) { continue } routeType, err := strconv.Atoi(rec[routeTypeIdx]) if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } routeColor := rec[routeColorIdx] routeTextColor := rec[routeTextColorIdx] agencyID := rec[routeAgencyIdx] var shortName, longName string if routeShortNameIdx < 0 { shortName = "" } else { shortName = rec[routeShortNameIdx] } if routeLongNameIdx < 0 { longName = "" } else { longName = rec[routeLongNameIdx] } r, err := models.NewRoute( route, routeType, routeColor, routeTextColor, agencyID, shortName, longName, ) if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } err = r.Save() if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } l.routeAgency[route] = agencyID } log.Printf("loaded %v routes", i) } func (l *Loader) loadTrips() { var i int f, fh := getcsv(l.dir, "trips.txt") defer fh.Close() header, err := f.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } tripIdx := find(header, "trip_id") dirIdx := find(header, "direction_id") headIdx := find(header, "trip_headsign") serviceIdx := find(header, "service_id") routeIdx := find(header, "route_id") shapeIdx := find(header, "shape_id") for i = 0; ; i++ { rec, err := f.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } direction, err := strconv.Atoi(rec[dirIdx]) if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } id := rec[tripIdx] service := rec[serviceIdx] route := rec[routeIdx] shape := rec[shapeIdx] agency := l.routeAgency[route] if l.skipRoute(route) { continue } trip, err := models.NewTrip( id, route, agency, service, shape, rec[headIdx], direction, ) if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } l.trips[trip.TripID] = trip serviceObj := &models.Service{ ID: service, RouteID: route, } l.tripService[trip.TripID] = serviceObj if l.serviceRoute[service] == nil { l.serviceRoute[service] = map[string]bool{} } l.serviceRoute[service][route] = true err = trip.Save() if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } l.tripRoute[id] = route l.shapeRoute[shape] = route if i%logp == 0 { log.Printf("loaded %v trips", i) } } log.Printf("loaded %v trips", i) } func (l *Loader) loadStopTimes() { var i int var err error // Read the unsorted file first so we can get headers index values stopTimesUnsorted, unsortedFh := getcsv(l.dir, "stop_times.txt") defer unsortedFh.Close() header, err := stopTimesUnsorted.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } stopIdx := find(header, "stop_id") tripIdx := find(header, "trip_id") arrivalIdx := find(header, "arrival_time") depatureIdx := find(header, "departure_time") sequenceIdx := find(header, "stop_sequence") // Create a file in the same dir that we've guaranteed is sorted // by trip_id and stop_sequence. It probably already is, but let's // be sure noFirstLine, err := ioutil.TempFile(l.dir, "") if err != nil { log.Fatal("can't create first line file", err) } defer noFirstLine.Close() defer os.Remove(noFirstLine.Name()) sorted, err := ioutil.TempFile(l.dir, "") if err != nil { log.Fatal("can't create sorted file", err) } defer sorted.Close() defer os.Remove(sorted.Name()) // Remove first line of file (we don't want the header to be sorted // in the middle of the file) cmd := exec.Command("tail", "-n", "+2", path.Join(l.dir, "stop_times.txt")) cmd.Stdout = noFirstLine err = cmd.Run() if err != nil { log.Fatal("can't create file with no first line", err) } err = noFirstLine.Close() if err != nil { log.Fatal("can't close no first line file", err) } // Sort primarily by trip then by sequence id // sort -s -t, -k1,1 -k5,5n cmd = exec.Command("sort", "-s", "-t,", fmt.Sprintf("-k%d,%d", tripIdx+1, tripIdx+1), fmt.Sprintf("-k%d,%dn", sequenceIdx+1, sequenceIdx+1), noFirstLine.Name(), ) cmd.Stdout = sorted err = cmd.Run() if err != nil { log.Fatal("can't sort file", err, cmd) } err = sorted.Close() if err != nil { log.Fatal("can't close sorted file") } // Open the sorted file and process stopTimes, fh := getcsv(l.dir, path.Base(sorted.Name())) defer fh.Close() // read once to get max sequence ids for i := 0; ; i++ { rec, err := stopTimes.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } trip := rec[tripIdx] sequenceStr := rec[sequenceIdx] sequence, err := strconv.Atoi(sequenceStr) if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } if sequence > l.maxTripSeq[trip] { l.maxTripSeq[trip] = sequence } } err = fh.Close() if err != nil { log.Fatal("can't close", err) } stopTimes, fh = getcsv(l.dir, path.Base(sorted.Name())) defer fh.Close() // read again for actual processing var rec []string var sst *models.ScheduledStopTime var stop string for i = 0; ; i++ { rec, err = stopTimes.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } stop = rec[stopIdx] trip := rec[tripIdx] arrivalStr := rec[arrivalIdx] departureStr := rec[depatureIdx] agencyID := l.routeAgency[l.tripRoute[trip]] sequenceStr := rec[sequenceIdx] sequence, err := strconv.Atoi(sequenceStr) if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } l.stopTrips[stop] = append(l.stopTrips[stop], trip) service, exists := l.tripService[trip] if !exists { continue } lastStop := false maxSeq := l.maxTripSeq[trip] if maxSeq == sequence { lastStop = true } // Save the sst from the previous iteration if sst != nil { ll := l.stopLocation[stop] if ll == nil { log.Fatal("can't get lat lon", stop) } if sst.TripID == trip { sst.NextStopID.Scan(stop) sst.NextStopLat.Scan(ll.lat) sst.NextStopLon.Scan(ll.lon) } else { sst.NextStopID.Scan(nil) sst.NextStopLat.Scan(0.0) sst.NextStopLon.Scan(0.0) } err = sst.Save() if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } } sst, err = models.NewScheduledStopTime( service.RouteID, stop, service.ID, arrivalStr, departureStr, agencyID, trip, sequence, lastStop, ) if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } if i%logp == 0 { log.Printf("loaded %v stop times", i) } } // Make sure we get the last stop if sst != nil { // This will always be nil sst.NextStopID.Scan(nil) err = sst.Save() if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } } log.Printf("loaded %v stop times", i) } func (l *Loader) loadStopLocations() { var i int stops, fh := getcsv(l.dir, "stops.txt") defer fh.Close() header, err := stops.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } stopIdx := find(header, "stop_id") stopLatIdx := find(header, "stop_lat") stopLonIdx := find(header, "stop_lon") for i = 0; ; i++ { rec, err := stops.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLat, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLatIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLon, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLonIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } ll := &latlon{ lat: stopLat, lon: stopLon, } l.stopLocation[rec[stopIdx]] = ll } log.Printf("loaded %v stops locations", i) } func (l *Loader) loadUniqueStop() { var i int stops, fh := getcsv(l.dir, "stops.txt") defer fh.Close() header, err := stops.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } stopIdx := find(header, "stop_id") stopNameIdx := find(header, "stop_name") stopLatIdx := find(header, "stop_lat") stopLonIdx := find(header, "stop_lon") for i = 0; ; i++ { rec, err := stops.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLat, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLatIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLon, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLonIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } trips, exists := l.stopTrips[rec[stopIdx]] if exists { for _, trip := range trips { if l.skipRoute(l.tripRoute[trip]) { continue } obj := models.Stop{ StopID: rec[stopIdx], Name: rec[stopNameIdx], RouteID: l.tripRoute[trip], DirectionID: l.trips[trip].DirectionID, Headsign: l.trips[trip].Headsign, AgencyID: l.routeAgency[l.tripRoute[trip]], } obj.Lat.Scan(stopLat) obj.Lon.Scan(stopLon) err = obj.Save() if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } } } if i%logp == 0 { log.Printf("loaded %v stops", i) } } log.Printf("loaded %v stops", i) } func (l *Loader) loadCalendarDates() { cal, fh := getcsv(l.dir, "calendar_dates.txt") defer fh.Close() header, err := cal.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } serviceIdx := find(header, "service_id") exceptionDateIdx := find(header, "date") exceptionTypeIdx := find(header, "exception_type") for i := 0; ; i++ { rec, err := cal.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of calendar_dates.txt", err, i) } serviceId := rec[serviceIdx] exceptionDate, err := time.Parse(datefmt, rec[exceptionDateIdx]) if err != nil { log.Fatalf("can't parse exception date %v %v", err, rec[exceptionDateIdx]) } exceptionType, err := strconv.Atoi(rec[exceptionTypeIdx]) if err != nil { log.Fatalf("can't parse exception type integer %v %v", err, rec[exceptionTypeIdx]) } if !(exceptionType == models.ServiceAdded || exceptionType == models.ServiceRemoved) { log.Fatalf("invalid value for exception_type %v", exceptionType) } for route, _ := range l.serviceRoute[serviceId] { s := models.ServiceRouteException{ AgencyID: l.routeAgency[route], ServiceID: serviceId, RouteID: route, ExceptionDate: exceptionDate, ExceptionType: exceptionType, } err = s.Save() if err != nil { log.Fatalf("%v on line %v of calendar_dates.txt with %v", err, i, s) } } } } func (l *Loader) loadCalendars() { var i int _, err := os.Stat(path.Join(l.dir, "calendar.txt")) if err != nil { log.Printf("error getting calendar, assuming doesnt exist %+v", err) return } cal, fh := getcsv(l.dir, "calendar.txt") defer fh.Close() header, err := cal.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } idxs := map[string]int{} for _, day := range days { idxs[day] = find(header, day) } serviceIdx := find(header, "service_id") startDateIdx := find(header, "start_date") endDateIdx := find(header, "end_date") for i = 0; ; i++ { rec, err := cal.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of calendar.txt", err, i) } serviceId := rec[serviceIdx] startDate, err := time.Parse(datefmt, rec[startDateIdx]) if err != nil { log.Fatalf("can't parse start date %v %v", err, rec[startDateIdx]) } endDate, err := time.Parse(datefmt, rec[endDateIdx]) if err != nil { log.Fatalf("can't parse end date %v %v", err, rec[endDateIdx]) } for day, dayIdx := range idxs { dayVal := rec[dayIdx] if dayVal != "1" { continue } for route, _ := range l.serviceRoute[serviceId] { srd := models.ServiceRouteDay{ ServiceID: serviceId, RouteID: route, AgencyID: l.routeAgency[route], Day: day, StartDate: startDate, EndDate: endDate, } err = srd.Save() if err != nil { log.Fatalf("%v on line %v of calendar.txt with %v", err, i, srd) } } } if i%logp == 0 { log.Printf("loaded %v calendars", i) } } log.Printf("loaded %v calendars", i) } func (l *Loader) loadShapes() { var i int shapes, fh := getcsv(l.dir, "shapes.txt") defer fh.Close() header, err := shapes.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } idIDX := find(header, "shape_id") latIDX := find(header, "shape_pt_lat") lonIDX := find(header, "shape_pt_lon") seqIDX := find(header, "shape_pt_sequence") for i = 0; ; i++ { rec, err := shapes.Read() if err == io.EOF { break } lat, err := strconv.ParseFloat( strings.TrimSpace(rec[latIDX]), 64, ) if err != nil { log.Fatalf("%v on line %v of shapes.txt", err, i) } lon, err := strconv.ParseFloat( strings.TrimSpace(rec[lonIDX]), 64, ) if err != nil { log.Fatalf("%v on line %v of shapes.txt", err, i) } seq, err := strconv.ParseInt( strings.TrimSpace(rec[seqIDX]), 10, 32, ) id := rec[idIDX] route := l.shapeRoute[id] if len(route) < 1 || l.skipRoute(route) { continue } agency := l.routeAgency[l.shapeRoute[id]] shape, err := models.NewShape( id, agency, int(seq), lat, lon, ) err = shape.Save(etc.DBConn) if err != nil { log.Fatalf("%v on line %v of shapes.txt", err, i) } if i%logp == 0 { log.Printf("loaded %v shapes", i) } } log.Printf("loaded %v shapes", i) } // updateRouteShapes updates the route_shape table by identifying // the "biggest" shapes typical for a route func (l *Loader) updateRouteShapes() { var err error tx, err := etc.DBConn.Beginx() if err != nil { log.Fatal(err) } defer func() { if err == nil { err = tx.Commit() if err != nil { log.Println("can't commit route shapes", err) } } else { tx.Rollback() if err != nil { log.Println("can't rollback route shapes", err) } } }() // delete existing routes within a transaction (won't take effect // unless committed) err = models.DeleteRouteShapes(tx) if err != nil { log.Fatal(err) } // Get shapes ordered from smallest to largest routeShapes, err := models.GetRouteShapes(tx) if err != nil { log.Fatal(err) } log.Printf("got %d route shapes", len(routeShapes)) for _, rs := range routeShapes { // upsert each route so we end up with the most common err = rs.Save(tx) if err != nil { log.Fatal(err) } } // delete existing routes within a transaction (won't take effect // unless committed) err = models.DeleteFakeShapes(tx) if err != nil { log.Fatal(err) } // Get shapes ordered from smallest to largest fakeShapes, err := models.GetFakeRouteShapes(tx) if err != nil { log.Fatal(err) } log.Printf("got %d fake shapes", len(fakeShapes)) for _, fs := range fakeShapes { // upsert each route so we end up with the most common err = fs.Save(tx) if err != nil { log.Fatal(err) } } } // LoadOnce loads the files in conf.Loader.GTFSURLs, possibly filtering by the // routes specified in conf.Loader.RouteFilter. If no filter is defined, // it loads all data in the specified URLs. func LoadOnce() { for _, url := range conf.Loader.GTFSURLs { if len(url) < 1 { continue } log.Printf("starting %v", url) dir, err := ioutil.TempDir(conf.Loader.TmpDir, "") if err != nil { log.Fatal(err) } err = download(url, dir) if err != nil { log.Fatal(err) } err = prepare(url, dir) if err != nil { log.Fatal(err) } func() { log.Printf("loading: %v in %v", url, dir) defer os.RemoveAll(dir) t1 := time.Now() l := newLoader(dir) l.load() t2 := time.Now() log.Printf("took %v for %v", t2.Sub(t1), url) }() } // Update materialized views. Use a transaction for each one, because // we reset each view's primary ID sequence in a separate statement. for _, view := range views { func() { var err error tx, err := etc.DBConn.Beginx() if err != nil { log.Fatal("can't create tx to update view", view, err) } defer func() { if err == nil { commitErr := tx.Commit() if commitErr != nil { log.Fatal("error committing update to view", view, err) } } else { rollbackErr := tx.Rollback() if rollbackErr != nil { log.Fatal("error rolling back update to view", view, err) } } }() statements := []string{ fmt.Sprintf("ALTER SEQUENCE %s_seq RESTART WITH 1", view), fmt.Sprintf("REFRESH MATERIALIZED VIEW CONCURRENTLY %s", view), } for _, statement := range statements { log.Println(statement) _, err = tx.Exec(statement) if err != nil { log.Println("can't exec", statement, err) return } log.Println("complete") } }() } } // LoadForever continuously runs LoadOnce, breaking for 24 hours between loads func LoadForever() { for { LoadOnce() log.Printf("finished loading, sleeping for %v", loaderBreak) time.Sleep(loaderBreak) } } remove progress debug lines package loader import ( "fmt" "io" "io/ioutil" "log" "os" "os/exec" "path" "strconv" "strings" "time" "github.com/brnstz/bus/internal/conf" "github.com/brnstz/bus/internal/etc" "github.com/brnstz/bus/internal/models" ) var ( days = []string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"} datefmt = "20060102" loaderBreak = time.Hour * 24 views = []string{"here_trip", "service", "service_exception"} logp = 1000 ) // rskey is the unique key for a route_shape type rskey struct { routeID string directionID int headsign string } type latlon struct { lat float64 lon float64 } type Loader struct { // the dir from which we load google transit files dir string // mapping from trip id to a trip object trips map[string]*models.Trip // mapping from stop_id to a slice of trip_ids stopTrips map[string][]string // mapping of trip_id to service object tripService map[string]*models.Service // mapping of service_id to map of unique route_id serviceRoute map[string]map[string]bool routeIDs map[string]bool // routeAgency contains the agency for each route after we loadRoutes() routeAgency map[string]string // mapping trip_id to route_id tripRoute map[string]string // shapeRoute maps shape_id to route_id (for purposes of adding agency_id // to shapes table) shapeRoute map[string]string // maxTripSeq maxTripSeq map[string]int // mapping of stop ids to lat/lon pairs stopLocation map[string]*latlon // routeShapeCount keeps a running tab of the biggest shape for this // route/dir/headsign combo /* routeShapeCount map[rskey]int routeShapeID map[rskey] */ } func newLoader(dir string) *Loader { l := Loader{ dir: dir, trips: map[string]*models.Trip{}, stopTrips: map[string][]string{}, tripRoute: map[string]string{}, tripService: map[string]*models.Service{}, serviceRoute: map[string]map[string]bool{}, routeAgency: map[string]string{}, shapeRoute: map[string]string{}, maxTripSeq: map[string]int{}, stopLocation: map[string]*latlon{}, } // Checking the length of the 0th entry ensures we ignore the case where // BUS_ROUTE_FILTER was an empty string (resulting in []string{""}). // Possibly we want to check this with the conf package, but doing this for // now. if len(conf.Loader.RouteFilter) > 0 && len(conf.Loader.RouteFilter[0]) > 0 { l.routeIDs = map[string]bool{} for _, v := range conf.Loader.RouteFilter { l.routeIDs[v] = true } } return &l } func (l *Loader) load() { l.loadRoutes() l.loadTrips() l.loadStopLocations() l.loadStopTimes() l.loadUniqueStop() l.loadCalendars() l.loadCalendarDates() l.loadShapes() l.updateRouteShapes() } // skipRoute returns true if we should skip this route given our routeFilter // config func (l *Loader) skipRoute(routeID string) bool { if l.routeIDs != nil && l.routeIDs[routeID] == false { return true } else { return false } } func (l *Loader) loadRoutes() { var i int f, fh := getcsv(l.dir, "routes.txt") defer fh.Close() header, err := f.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } routeIdx := find(header, "route_id") routeTypeIdx := find(header, "route_type") routeColorIdx := find(header, "route_color") routeTextColorIdx := find(header, "route_text_color") routeAgencyIdx := find(header, "agency_id") routeShortNameIdx := maybeFind(header, "route_short_name") routeLongNameIdx := maybeFind(header, "route_long_name") for i = 0; ; i++ { rec, err := f.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } route := rec[routeIdx] if l.skipRoute(route) { continue } routeType, err := strconv.Atoi(rec[routeTypeIdx]) if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } routeColor := rec[routeColorIdx] routeTextColor := rec[routeTextColorIdx] agencyID := rec[routeAgencyIdx] var shortName, longName string if routeShortNameIdx < 0 { shortName = "" } else { shortName = rec[routeShortNameIdx] } if routeLongNameIdx < 0 { longName = "" } else { longName = rec[routeLongNameIdx] } r, err := models.NewRoute( route, routeType, routeColor, routeTextColor, agencyID, shortName, longName, ) if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } err = r.Save() if err != nil { log.Fatalf("%v on line %v of routes.txt", err, i) } l.routeAgency[route] = agencyID } } func (l *Loader) loadTrips() { var i int f, fh := getcsv(l.dir, "trips.txt") defer fh.Close() header, err := f.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } tripIdx := find(header, "trip_id") dirIdx := find(header, "direction_id") headIdx := find(header, "trip_headsign") serviceIdx := find(header, "service_id") routeIdx := find(header, "route_id") shapeIdx := find(header, "shape_id") for i = 0; ; i++ { rec, err := f.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } direction, err := strconv.Atoi(rec[dirIdx]) if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } id := rec[tripIdx] service := rec[serviceIdx] route := rec[routeIdx] shape := rec[shapeIdx] agency := l.routeAgency[route] if l.skipRoute(route) { continue } trip, err := models.NewTrip( id, route, agency, service, shape, rec[headIdx], direction, ) if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } l.trips[trip.TripID] = trip serviceObj := &models.Service{ ID: service, RouteID: route, } l.tripService[trip.TripID] = serviceObj if l.serviceRoute[service] == nil { l.serviceRoute[service] = map[string]bool{} } l.serviceRoute[service][route] = true err = trip.Save() if err != nil { log.Fatalf("%v on line %v of trips.txt", err, i) } l.tripRoute[id] = route l.shapeRoute[shape] = route } } func (l *Loader) loadStopTimes() { var i int var err error // Read the unsorted file first so we can get headers index values stopTimesUnsorted, unsortedFh := getcsv(l.dir, "stop_times.txt") defer unsortedFh.Close() header, err := stopTimesUnsorted.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } stopIdx := find(header, "stop_id") tripIdx := find(header, "trip_id") arrivalIdx := find(header, "arrival_time") depatureIdx := find(header, "departure_time") sequenceIdx := find(header, "stop_sequence") // Create a file in the same dir that we've guaranteed is sorted // by trip_id and stop_sequence. It probably already is, but let's // be sure noFirstLine, err := ioutil.TempFile(l.dir, "") if err != nil { log.Fatal("can't create first line file", err) } defer noFirstLine.Close() defer os.Remove(noFirstLine.Name()) sorted, err := ioutil.TempFile(l.dir, "") if err != nil { log.Fatal("can't create sorted file", err) } defer sorted.Close() defer os.Remove(sorted.Name()) // Remove first line of file (we don't want the header to be sorted // in the middle of the file) cmd := exec.Command("tail", "-n", "+2", path.Join(l.dir, "stop_times.txt")) cmd.Stdout = noFirstLine err = cmd.Run() if err != nil { log.Fatal("can't create file with no first line", err) } err = noFirstLine.Close() if err != nil { log.Fatal("can't close no first line file", err) } // Sort primarily by trip then by sequence id // sort -s -t, -k1,1 -k5,5n cmd = exec.Command("sort", "-s", "-t,", fmt.Sprintf("-k%d,%d", tripIdx+1, tripIdx+1), fmt.Sprintf("-k%d,%dn", sequenceIdx+1, sequenceIdx+1), noFirstLine.Name(), ) cmd.Stdout = sorted err = cmd.Run() if err != nil { log.Fatal("can't sort file", err, cmd) } err = sorted.Close() if err != nil { log.Fatal("can't close sorted file") } // Open the sorted file and process stopTimes, fh := getcsv(l.dir, path.Base(sorted.Name())) defer fh.Close() // read once to get max sequence ids for i := 0; ; i++ { rec, err := stopTimes.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } trip := rec[tripIdx] sequenceStr := rec[sequenceIdx] sequence, err := strconv.Atoi(sequenceStr) if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } if sequence > l.maxTripSeq[trip] { l.maxTripSeq[trip] = sequence } } err = fh.Close() if err != nil { log.Fatal("can't close", err) } stopTimes, fh = getcsv(l.dir, path.Base(sorted.Name())) defer fh.Close() // read again for actual processing var rec []string var sst *models.ScheduledStopTime var stop string for i = 0; ; i++ { rec, err = stopTimes.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } stop = rec[stopIdx] trip := rec[tripIdx] arrivalStr := rec[arrivalIdx] departureStr := rec[depatureIdx] agencyID := l.routeAgency[l.tripRoute[trip]] sequenceStr := rec[sequenceIdx] sequence, err := strconv.Atoi(sequenceStr) if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } l.stopTrips[stop] = append(l.stopTrips[stop], trip) service, exists := l.tripService[trip] if !exists { continue } lastStop := false maxSeq := l.maxTripSeq[trip] if maxSeq == sequence { lastStop = true } // Save the sst from the previous iteration if sst != nil { ll := l.stopLocation[stop] if ll == nil { log.Fatal("can't get lat lon", stop) } if sst.TripID == trip { sst.NextStopID.Scan(stop) sst.NextStopLat.Scan(ll.lat) sst.NextStopLon.Scan(ll.lon) } else { sst.NextStopID.Scan(nil) sst.NextStopLat.Scan(0.0) sst.NextStopLon.Scan(0.0) } err = sst.Save() if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } } sst, err = models.NewScheduledStopTime( service.RouteID, stop, service.ID, arrivalStr, departureStr, agencyID, trip, sequence, lastStop, ) if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } } // Make sure we get the last stop if sst != nil { // This will always be nil sst.NextStopID.Scan(nil) err = sst.Save() if err != nil { log.Fatalf("%v on line %v of stop_times.txt", err, i) } } } func (l *Loader) loadStopLocations() { var i int stops, fh := getcsv(l.dir, "stops.txt") defer fh.Close() header, err := stops.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } stopIdx := find(header, "stop_id") stopLatIdx := find(header, "stop_lat") stopLonIdx := find(header, "stop_lon") for i = 0; ; i++ { rec, err := stops.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLat, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLatIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLon, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLonIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } ll := &latlon{ lat: stopLat, lon: stopLon, } l.stopLocation[rec[stopIdx]] = ll } } func (l *Loader) loadUniqueStop() { var i int stops, fh := getcsv(l.dir, "stops.txt") defer fh.Close() header, err := stops.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } stopIdx := find(header, "stop_id") stopNameIdx := find(header, "stop_name") stopLatIdx := find(header, "stop_lat") stopLonIdx := find(header, "stop_lon") for i = 0; ; i++ { rec, err := stops.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLat, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLatIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } stopLon, err := strconv.ParseFloat( strings.TrimSpace(rec[stopLonIdx]), 64, ) if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } trips, exists := l.stopTrips[rec[stopIdx]] if exists { for _, trip := range trips { if l.skipRoute(l.tripRoute[trip]) { continue } obj := models.Stop{ StopID: rec[stopIdx], Name: rec[stopNameIdx], RouteID: l.tripRoute[trip], DirectionID: l.trips[trip].DirectionID, Headsign: l.trips[trip].Headsign, AgencyID: l.routeAgency[l.tripRoute[trip]], } obj.Lat.Scan(stopLat) obj.Lon.Scan(stopLon) err = obj.Save() if err != nil { log.Fatalf("%v on line %v of stops.txt", err, i) } } } } } func (l *Loader) loadCalendarDates() { cal, fh := getcsv(l.dir, "calendar_dates.txt") defer fh.Close() header, err := cal.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } serviceIdx := find(header, "service_id") exceptionDateIdx := find(header, "date") exceptionTypeIdx := find(header, "exception_type") for i := 0; ; i++ { rec, err := cal.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of calendar_dates.txt", err, i) } serviceId := rec[serviceIdx] exceptionDate, err := time.Parse(datefmt, rec[exceptionDateIdx]) if err != nil { log.Fatalf("can't parse exception date %v %v", err, rec[exceptionDateIdx]) } exceptionType, err := strconv.Atoi(rec[exceptionTypeIdx]) if err != nil { log.Fatalf("can't parse exception type integer %v %v", err, rec[exceptionTypeIdx]) } if !(exceptionType == models.ServiceAdded || exceptionType == models.ServiceRemoved) { log.Fatalf("invalid value for exception_type %v", exceptionType) } for route, _ := range l.serviceRoute[serviceId] { s := models.ServiceRouteException{ AgencyID: l.routeAgency[route], ServiceID: serviceId, RouteID: route, ExceptionDate: exceptionDate, ExceptionType: exceptionType, } err = s.Save() if err != nil { log.Fatalf("%v on line %v of calendar_dates.txt with %v", err, i, s) } } } } func (l *Loader) loadCalendars() { var i int _, err := os.Stat(path.Join(l.dir, "calendar.txt")) if err != nil { log.Printf("error getting calendar, assuming doesnt exist %+v", err) return } cal, fh := getcsv(l.dir, "calendar.txt") defer fh.Close() header, err := cal.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } idxs := map[string]int{} for _, day := range days { idxs[day] = find(header, day) } serviceIdx := find(header, "service_id") startDateIdx := find(header, "start_date") endDateIdx := find(header, "end_date") for i = 0; ; i++ { rec, err := cal.Read() if err == io.EOF { break } if err != nil { log.Fatalf("%v on line %v of calendar.txt", err, i) } serviceId := rec[serviceIdx] startDate, err := time.Parse(datefmt, rec[startDateIdx]) if err != nil { log.Fatalf("can't parse start date %v %v", err, rec[startDateIdx]) } endDate, err := time.Parse(datefmt, rec[endDateIdx]) if err != nil { log.Fatalf("can't parse end date %v %v", err, rec[endDateIdx]) } for day, dayIdx := range idxs { dayVal := rec[dayIdx] if dayVal != "1" { continue } for route, _ := range l.serviceRoute[serviceId] { srd := models.ServiceRouteDay{ ServiceID: serviceId, RouteID: route, AgencyID: l.routeAgency[route], Day: day, StartDate: startDate, EndDate: endDate, } err = srd.Save() if err != nil { log.Fatalf("%v on line %v of calendar.txt with %v", err, i, srd) } } } } } func (l *Loader) loadShapes() { var i int shapes, fh := getcsv(l.dir, "shapes.txt") defer fh.Close() header, err := shapes.Read() if err != nil { log.Fatalf("unable to read header: %v", err) } idIDX := find(header, "shape_id") latIDX := find(header, "shape_pt_lat") lonIDX := find(header, "shape_pt_lon") seqIDX := find(header, "shape_pt_sequence") for i = 0; ; i++ { rec, err := shapes.Read() if err == io.EOF { break } lat, err := strconv.ParseFloat( strings.TrimSpace(rec[latIDX]), 64, ) if err != nil { log.Fatalf("%v on line %v of shapes.txt", err, i) } lon, err := strconv.ParseFloat( strings.TrimSpace(rec[lonIDX]), 64, ) if err != nil { log.Fatalf("%v on line %v of shapes.txt", err, i) } seq, err := strconv.ParseInt( strings.TrimSpace(rec[seqIDX]), 10, 32, ) id := rec[idIDX] route := l.shapeRoute[id] if len(route) < 1 || l.skipRoute(route) { continue } agency := l.routeAgency[l.shapeRoute[id]] shape, err := models.NewShape( id, agency, int(seq), lat, lon, ) err = shape.Save(etc.DBConn) if err != nil { log.Fatalf("%v on line %v of shapes.txt", err, i) } } } // updateRouteShapes updates the route_shape table by identifying // the "biggest" shapes typical for a route func (l *Loader) updateRouteShapes() { var err error tx, err := etc.DBConn.Beginx() if err != nil { log.Fatal(err) } defer func() { if err == nil { err = tx.Commit() if err != nil { log.Println("can't commit route shapes", err) } } else { tx.Rollback() if err != nil { log.Println("can't rollback route shapes", err) } } }() // delete existing routes within a transaction (won't take effect // unless committed) err = models.DeleteRouteShapes(tx) if err != nil { log.Fatal(err) } // Get shapes ordered from smallest to largest routeShapes, err := models.GetRouteShapes(tx) if err != nil { log.Fatal(err) } for _, rs := range routeShapes { // upsert each route so we end up with the most common err = rs.Save(tx) if err != nil { log.Fatal(err) } } // delete existing routes within a transaction (won't take effect // unless committed) err = models.DeleteFakeShapes(tx) if err != nil { log.Fatal(err) } // Get shapes ordered from smallest to largest fakeShapes, err := models.GetFakeRouteShapes(tx) if err != nil { log.Fatal(err) } for _, fs := range fakeShapes { // upsert each route so we end up with the most common err = fs.Save(tx) if err != nil { log.Fatal(err) } } } // LoadOnce loads the files in conf.Loader.GTFSURLs, possibly filtering by the // routes specified in conf.Loader.RouteFilter. If no filter is defined, // it loads all data in the specified URLs. func LoadOnce() { for _, url := range conf.Loader.GTFSURLs { if len(url) < 1 { continue } log.Printf("starting %v", url) dir, err := ioutil.TempDir(conf.Loader.TmpDir, "") if err != nil { log.Fatal(err) } err = download(url, dir) if err != nil { log.Fatal(err) } err = prepare(url, dir) if err != nil { log.Fatal(err) } func() { log.Printf("loading: %v in %v", url, dir) defer os.RemoveAll(dir) t1 := time.Now() l := newLoader(dir) l.load() t2 := time.Now() log.Printf("took %v for %v", t2.Sub(t1), url) }() } // Update materialized views. Use a transaction for each one, because // we reset each view's primary ID sequence in a separate statement. for _, view := range views { func() { var err error tx, err := etc.DBConn.Beginx() if err != nil { log.Fatal("can't create tx to update view", view, err) } defer func() { if err == nil { commitErr := tx.Commit() if commitErr != nil { log.Fatal("error committing update to view", view, err) } } else { rollbackErr := tx.Rollback() if rollbackErr != nil { log.Fatal("error rolling back update to view", view, err) } } }() statements := []string{ fmt.Sprintf("ALTER SEQUENCE %s_seq RESTART WITH 1", view), fmt.Sprintf("REFRESH MATERIALIZED VIEW CONCURRENTLY %s", view), } for _, statement := range statements { log.Println(statement) _, err = tx.Exec(statement) if err != nil { log.Println("can't exec", statement, err) return } log.Println("complete") } }() } } // LoadForever continuously runs LoadOnce, breaking for 24 hours between loads func LoadForever() { for { LoadOnce() log.Printf("finished loading, sleeping for %v", loaderBreak) time.Sleep(loaderBreak) } }
/* golem - lightweight Go WebSocket-framework Copyright (C) 2013 Niklas Voss This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package golem // Lobby request information holding name of the lobby and the connection, which requested. type lobbyReq struct { // Name of the lobby the request goes to. name string // Reference to the connection, which requested. conn *Connection } // Lobby messages contain information about to which lobby it is being send and the data being send. type lobbyMsg struct { // Name of the lobby the message goes to. to string // Data being send to specified lobby. data []byte } // Wrapper for normal lobbies to add a member counter. type managedLobby struct { // Reference to lobby. lobby *Lobby // Member-count to allow removing of empty lobbies. count uint } // Handles any count of lobbies by keys. Currently only strings are supported as keys (lobby names). // As soon as generics are supported any key should be able to be used. The methods are used similar to // single lobby instance but preceded by the key. type LobbyManager struct { // Map of connections mapped to lobbies joined; necessary for leave all/clean up functionality. members map[*Connection]map[string]bool // Map of all managed lobbies with their names as keys. lobbies map[string]*managedLobby // Channel of join requests. join chan *lobbyReq // Channel of leave requests. leave chan *lobbyReq // Channel of leave all requests, essentially cleaning up every trace of the specified connection. leaveAll chan *Connection // Channel of messages associated with this lobby manager send chan *lobbyMsg // Stop signal channel stop chan bool } // Creates a new LobbyManager-Instance. func NewLobbyManager() *LobbyManager { // Create instance. lm := LobbyManager{ members: make(map[*Connection]map[string]bool), lobbies: make(map[string]*managedLobby), join: make(chan *lobbyReq), leave: make(chan *lobbyReq), leaveAll: make(chan *Connection), send: make(chan *lobbyMsg), stop: make(chan bool), } // Start message loop in new routine. go lm.run() // Return reference to this lobby manager. return &lm } // Helper function to leave a lobby by name. If specified lobby has // no members after leaving, it will be cleaned up. func (lm *LobbyManager) leaveLobbyByName(name string, conn *Connection) { if m, ok := lm.lobbies[name]; ok { // Continue if getting the lobby was ok. if _, ok := lm.members[conn]; ok { // Continue if connection has map of joined lobbies. if _, ok := lm.members[conn][name]; ok { // Continue if connection actually joined specified lobby. m.lobby.leave <- conn m.count-- delete(lm.members[conn], name) if m.count == 0 { // Get rid of lobby if it is empty m.lobby.Stop() delete(lm.lobbies, name) } } } } } // Run should always be executed in a new goroutine, because it contains the // message loop. func (lm *LobbyManager) run() { for { select { // Join case req := <-lm.join: m, ok := lm.lobbies[req.name] if !ok { // If lobby was not found for join request, create it! m = &managedLobby{ lobby: NewLobby(), count: 1, // start with count 1 for first user } lm.lobbies[req.name] = m } else { // If lobby exists increase count and join. m.count++ } m.lobby.join <- req.conn if _, ok := lm.members[req.conn]; !ok { // If lobby association map for connection does not exist, create it! lm.members[req.conn] = make(map[string]bool) } lm.members[req.conn][req.name] = true // Flag this lobby on members lobby map. // Leave case req := <-lm.leave: lm.leaveLobbyByName(req.name, req.conn) // Leave all case conn := <-lm.leaveAll: if cm, ok := lm.members[conn]; ok { for name := range cm { // Iterate over all lobbies this connection joined and leave them. lm.leaveLobbyByName(name, conn) } delete(lm.members, conn) // Remove map of joined lobbies } // Send case msg := <-lm.send: if m, ok := lm.lobbies[msg.to]; ok { // If lobby exists, get it and send data to it. m.lobby.send <- msg.data } // Stop case <-lm.stop: for k, m := range lm.lobbies { // Stop all lobbies! m.lobby.Stop() delete(lm.lobbies, k) } return } } } // func (lm *LobbyManager) Join(name string, conn *Connection) { lm.join <- &lobbyReq{ name: name, conn: conn, } } func (lm *LobbyManager) Leave(name string, conn *Connection) { lm.leave <- &lobbyReq{ name: name, conn: conn, } } func (lm *LobbyManager) LeaveAll(conn *Connection) { lm.leaveAll <- conn } func (lm *LobbyManager) Send(to string, data []byte) { lm.send <- &lobbyMsg{ to: to, data: data, } } func (lm *LobbyManager) Emit(to string, what string, data interface{}) { if b, ok := pack(what, data); ok { lm.send <- &lobbyMsg{ to: to, data: b, } } } func (lm *LobbyManager) Stop() { lm.stop <- true } Updated documentation of Lobby Manager. /* golem - lightweight Go WebSocket-framework Copyright (C) 2013 Niklas Voss This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package golem // Lobby request information holding name of the lobby and the connection, which requested. type lobbyReq struct { // Name of the lobby the request goes to. name string // Reference to the connection, which requested. conn *Connection } // Lobby messages contain information about to which lobby it is being send and the data being send. type lobbyMsg struct { // Name of the lobby the message goes to. to string // Data being send to specified lobby. data []byte } // Wrapper for normal lobbies to add a member counter. type managedLobby struct { // Reference to lobby. lobby *Lobby // Member-count to allow removing of empty lobbies. count uint } // Handles any count of lobbies by keys. Currently only strings are supported as keys (lobby names). // As soon as generics are supported any key should be able to be used. The methods are used similar to // single lobby instance but preceded by the key. type LobbyManager struct { // Map of connections mapped to lobbies joined; necessary for leave all/clean up functionality. members map[*Connection]map[string]bool // Map of all managed lobbies with their names as keys. lobbies map[string]*managedLobby // Channel of join requests. join chan *lobbyReq // Channel of leave requests. leave chan *lobbyReq // Channel of leave all requests, essentially cleaning up every trace of the specified connection. leaveAll chan *Connection // Channel of messages associated with this lobby manager send chan *lobbyMsg // Stop signal channel stop chan bool } // Creates a new LobbyManager-Instance. func NewLobbyManager() *LobbyManager { // Create instance. lm := LobbyManager{ members: make(map[*Connection]map[string]bool), lobbies: make(map[string]*managedLobby), join: make(chan *lobbyReq), leave: make(chan *lobbyReq), leaveAll: make(chan *Connection), send: make(chan *lobbyMsg), stop: make(chan bool), } // Start message loop in new routine. go lm.run() // Return reference to this lobby manager. return &lm } // Helper function to leave a lobby by name. If specified lobby has // no members after leaving, it will be cleaned up. func (lm *LobbyManager) leaveLobbyByName(name string, conn *Connection) { if m, ok := lm.lobbies[name]; ok { // Continue if getting the lobby was ok. if _, ok := lm.members[conn]; ok { // Continue if connection has map of joined lobbies. if _, ok := lm.members[conn][name]; ok { // Continue if connection actually joined specified lobby. m.lobby.leave <- conn m.count-- delete(lm.members[conn], name) if m.count == 0 { // Get rid of lobby if it is empty m.lobby.Stop() delete(lm.lobbies, name) } } } } } // Run should always be executed in a new goroutine, because it contains the // message loop. func (lm *LobbyManager) run() { for { select { // Join case req := <-lm.join: m, ok := lm.lobbies[req.name] if !ok { // If lobby was not found for join request, create it! m = &managedLobby{ lobby: NewLobby(), count: 1, // start with count 1 for first user } lm.lobbies[req.name] = m } else { // If lobby exists increase count and join. m.count++ } m.lobby.join <- req.conn if _, ok := lm.members[req.conn]; !ok { // If lobby association map for connection does not exist, create it! lm.members[req.conn] = make(map[string]bool) } lm.members[req.conn][req.name] = true // Flag this lobby on members lobby map. // Leave case req := <-lm.leave: lm.leaveLobbyByName(req.name, req.conn) // Leave all case conn := <-lm.leaveAll: if cm, ok := lm.members[conn]; ok { for name := range cm { // Iterate over all lobbies this connection joined and leave them. lm.leaveLobbyByName(name, conn) } delete(lm.members, conn) // Remove map of joined lobbies } // Send case msg := <-lm.send: if m, ok := lm.lobbies[msg.to]; ok { // If lobby exists, get it and send data to it. m.lobby.send <- msg.data } // Stop case <-lm.stop: for k, m := range lm.lobbies { // Stop all lobbies! m.lobby.Stop() delete(lm.lobbies, k) } return } } } // The connection joins the lobby with the specified name. func (lm *LobbyManager) Join(name string, conn *Connection) { lm.join <- &lobbyReq{ name: name, conn: conn, } } // The connection leaves the lobby with the specified name. func (lm *LobbyManager) Leave(name string, conn *Connection) { lm.leave <- &lobbyReq{ name: name, conn: conn, } } // The connection leaves all lobbies of this manager. This is important for clean up purposes to // keep the member count accurate. This should therefore always be called when a connection is closed. func (lm *LobbyManager) LeaveAll(conn *Connection) { lm.leaveAll <- conn } // Send an array of bytes to all members of the lobby with the specified name. func (lm *LobbyManager) Send(to string, data []byte) { lm.send <- &lobbyMsg{ to: to, data: data, } } // Emit a message, that can be fetched using the golem client library. The provided // data interface will be automatically marshalled into JSON. func (lm *LobbyManager) Emit(to string, what string, data interface{}) { if b, ok := pack(what, data); ok { lm.send <- &lobbyMsg{ to: to, data: b, } } } // Stop the message loop. func (lm *LobbyManager) Stop() { lm.stop <- true }
// Copyright 2016 by caixw, All rights reserved. // Use of this source code is governed by a MIT // license that can be found in the LICENSE file. // Package locale 提供了一个本地化翻译服务。 package locale import ( "errors" "io" "golang.org/x/text/language" "golang.org/x/text/message" "github.com/caixw/apidoc/locale/syslocale" "github.com/caixw/apidoc/vars" ) // 保证有个初始化的值,部分包的测试功能依赖此变量 var localePrinter = message.NewPrinter(language.MustParse(vars.DefaultLocale)) // Init 初始化 locale 包并。 // 无论是否返回错误信息,都会初始一种语言作为其交互语言。 func Init() error { tag, err := getTag() localePrinter = NewPrinter(tag) return err } func getTag() (language.Tag, error) { found := false for id, messages := range locales { // 保证 locales 已经初始化,即要在 init() 函数之后调用 tag := language.MustParse(id) for key, val := range messages { if err := message.SetString(tag, key, val); err != nil { panic(err) } } if id == vars.DefaultLocale { found = true } } if !found { return language.Und, errors.New("vars.DefaultLocale 的值并不存在") } tag, err := syslocale.Get() if err != nil { // 此条必定成功,因为与 vars.DefaultLocale 相同的值已经在上面的 for 特环中执行过。 return language.MustParse(vars.DefaultLocale), err } return tag, nil } // NewPrinter 根据 tag 生成一个新的语言输出环境 func NewPrinter(tag language.Tag) *message.Printer { return message.NewPrinter(tag) } // Print 类似 fmt.Print,与特定的语言绑定。 func Print(v ...interface{}) (int, error) { return localePrinter.Print(v...) } // Println 类似 fmt.Println,与特定的语言绑定。 func Println(v ...interface{}) (int, error) { return localePrinter.Println(v...) } // Printf 类似 fmt.Printf,与特定的语言绑定。 func Printf(key string, v ...interface{}) (int, error) { return localePrinter.Printf(key, v...) } // Sprint 类似 fmt.Sprint,与特定的语言绑定。 func Sprint(v ...interface{}) string { return localePrinter.Sprint(v...) } // Sprintln 类似 fmt.Sprintln,与特定的语言绑定。 func Sprintln(v ...interface{}) string { return localePrinter.Sprintln(v...) } // Sprintf 类似 fmt.Sprintf,与特定的语言绑定。 func Sprintf(key message.Reference, v ...interface{}) string { return localePrinter.Sprintf(key, v...) } // Fprint 类似 fmt.Fprint,与特定的语言绑定。 func Fprint(w io.Writer, v ...interface{}) (int, error) { return localePrinter.Fprint(w, v...) } // Fprintln 类似 fmt.Fprintln,与特定的语言绑定。 func Fprintln(w io.Writer, v ...interface{}) (int, error) { return localePrinter.Fprintln(w, v...) } // Fprintf 类似 fmt.Fprintf,与特定的语言绑定。 func Fprintf(w io.Writer, key message.Reference, v ...interface{}) (int, error) { return localePrinter.Fprintf(w, key, v...) } 调整 locale 的行为 // Copyright 2016 by caixw, All rights reserved. // Use of this source code is governed by a MIT // license that can be found in the LICENSE file. // Package locale 提供了一个本地化翻译服务。 package locale import ( "io" "golang.org/x/text/language" "golang.org/x/text/message" "github.com/caixw/apidoc/locale/syslocale" "github.com/caixw/apidoc/vars" ) // 保证有个初始化的值,部分包的测试功能依赖此变量 var localePrinter = message.NewPrinter(language.MustParse(vars.DefaultLocale)) // Init 初始化 locale 包并。 // 无论是否返回错误信息,都会初始一种语言作为其交互语言。 func Init() error { tag, err := getTag() localePrinter = NewPrinter(tag) return err } func getTag() (language.Tag, error) { found := false for id, messages := range locales { // 保证 locales 已经初始化,即要在 init() 函数之后调用 tag, err := language.Parse(id) if err != nil { return language.Und, err } for key, val := range messages { if err := message.SetString(tag, key, val); err != nil { return language.Und, err } } if id == vars.DefaultLocale { found = true } } if !found { panic("vars.DefaultLocale 的值并不存在") // 这算是代码级别的错误,直接 panic } tag, err := syslocale.Get() if err != nil { // 此条必定成功,因为与 vars.DefaultLocale 相同的值已经在上面的 for 特环中执行过。 return language.MustParse(vars.DefaultLocale), err } return tag, nil } // NewPrinter 根据 tag 生成一个新的语言输出环境 func NewPrinter(tag language.Tag) *message.Printer { return message.NewPrinter(tag) } // Print 类似 fmt.Print,与特定的语言绑定。 func Print(v ...interface{}) (int, error) { return localePrinter.Print(v...) } // Println 类似 fmt.Println,与特定的语言绑定。 func Println(v ...interface{}) (int, error) { return localePrinter.Println(v...) } // Printf 类似 fmt.Printf,与特定的语言绑定。 func Printf(key string, v ...interface{}) (int, error) { return localePrinter.Printf(key, v...) } // Sprint 类似 fmt.Sprint,与特定的语言绑定。 func Sprint(v ...interface{}) string { return localePrinter.Sprint(v...) } // Sprintln 类似 fmt.Sprintln,与特定的语言绑定。 func Sprintln(v ...interface{}) string { return localePrinter.Sprintln(v...) } // Sprintf 类似 fmt.Sprintf,与特定的语言绑定。 func Sprintf(key message.Reference, v ...interface{}) string { return localePrinter.Sprintf(key, v...) } // Fprint 类似 fmt.Fprint,与特定的语言绑定。 func Fprint(w io.Writer, v ...interface{}) (int, error) { return localePrinter.Fprint(w, v...) } // Fprintln 类似 fmt.Fprintln,与特定的语言绑定。 func Fprintln(w io.Writer, v ...interface{}) (int, error) { return localePrinter.Fprintln(w, v...) } // Fprintf 类似 fmt.Fprintf,与特定的语言绑定。 func Fprintf(w io.Writer, key message.Reference, v ...interface{}) (int, error) { return localePrinter.Fprintf(w, key, v...) }
package gocassos import ( "bytes" "time" "github.com/gocql/gocql" ) func (o *Object) Remove() { o.cfg.in_progress.Add(1) NVM.Printf("REMOVE: Removing %s", o.id) go o.async_remove() return } func (o *Object) async_remove() { defer o.cfg.in_progress.Done() if err := o.cfg.Conn.Query(`DELETE FROM objects WHERE objectname = ? AND updated = ? AND nodetag = ?`, o.Objectname, o.Updated, o.Nodetag).Consistency(gocql.One).Exec(); err != nil { WTF.Printf("REMOVE: Failure removing %s - %s", o.id, err) return } //NVM.Printf("REMOVE: Removing chunks for %s", o.id) for chunk := int64(0); chunk < o.NumChunks; chunk++ { // NVM.Printf("REMOVE: Removing chunk %d/%d for %s", chunk, o.NumChunks, o.id) if err := o.cfg.Conn.Query(`DELETE FROM object_chunks WHERE objectname = ? AND updated = ? AND nodetag = ? AND chunk_num = ? `, o.Objectname, o.Updated, o.Nodetag, chunk).Consistency(gocql.One).Exec(); err != nil { WTF.Printf("REMOVE: Error removing chunk %d/%d on %s - %s", chunk, o.NumChunks, o.id, err) break } } NVM.Printf("REMOVE: Done removing chunks for %s", o.id) return } func (o *Object) CleanupDupes() { o.cfg.in_progress.Add(1) go o.async_cleanup_dupes() return } func (o *Object) async_cleanup_dupes() { defer o.cfg.in_progress.Done() time.Sleep(time.Duration(o.cfg.ScrubGraceTime) * time.Second) BTW.Printf("SCRUB: Cleaning up duplicates for %s", o.Objectname) iter := o.cfg.Conn.Query(`SELECT objectname, updated, nodetag, num_chunks FROM objects WHERE objectname = ? `, o.Objectname).Consistency(gocql.One).Iter() var latest, obj, tmp *Object latest = o var objectname string var updated, num_chunks int64 var nodetag gocql.UUID for iter.Scan(&objectname, &updated, &nodetag, &num_chunks) { obj = new(Object) obj.cfg = o.cfg obj.Objectname = objectname obj.Updated = updated obj.Nodetag = nodetag obj.NumChunks = num_chunks obj.set_id() switch { case obj.Updated > latest.Updated: // newer timestamp tmp = latest latest = obj obj = tmp case obj.Updated == latest.Updated: //same timestamp; compare nodetags switch bytes.Compare(obj.Nodetag.Bytes(), latest.Nodetag.Bytes()) { case 0: // same object obj = nil case 1: // obj nodetag greater than latest tmp = latest latest = obj obj = tmp } } // made this far, obj must be removed if obj != nil { BTW.Printf("SCRUB: Dropping duplicated object %s", obj.id) obj.Remove() } } latest.set_id() if obj != nil { BTW.Printf("SCRUB: Keeping object %s", latest.id) } return } Check for forbidden updates on removal as well package gocassos import ( "bytes" "time" "github.com/gocql/gocql" ) func (o *Object) Remove() error { if !o.cfg.AllowUpdates { FYI.Printf("[%s] PUSH: Refusing to delete object %s (%0.3fs lookup)", o.ClientId, o.id, o.LookupTime.Seconds()) return ErrRefused } o.cfg.in_progress.Add(1) NVM.Printf("REMOVE: Removing %s", o.id) go o.async_remove() return nil } func (o *Object) async_remove() { defer o.cfg.in_progress.Done() if err := o.cfg.Conn.Query(`DELETE FROM objects WHERE objectname = ? AND updated = ? AND nodetag = ?`, o.Objectname, o.Updated, o.Nodetag).Consistency(gocql.One).Exec(); err != nil { WTF.Printf("REMOVE: Failure removing %s - %s", o.id, err) return } //NVM.Printf("REMOVE: Removing chunks for %s", o.id) for chunk := int64(0); chunk < o.NumChunks; chunk++ { // NVM.Printf("REMOVE: Removing chunk %d/%d for %s", chunk, o.NumChunks, o.id) if err := o.cfg.Conn.Query(`DELETE FROM object_chunks WHERE objectname = ? AND updated = ? AND nodetag = ? AND chunk_num = ? `, o.Objectname, o.Updated, o.Nodetag, chunk).Consistency(gocql.One).Exec(); err != nil { WTF.Printf("REMOVE: Error removing chunk %d/%d on %s - %s", chunk, o.NumChunks, o.id, err) break } } NVM.Printf("REMOVE: Done removing chunks for %s", o.id) return } func (o *Object) CleanupDupes() { o.cfg.in_progress.Add(1) go o.async_cleanup_dupes() return } func (o *Object) async_cleanup_dupes() { defer o.cfg.in_progress.Done() time.Sleep(time.Duration(o.cfg.ScrubGraceTime) * time.Second) BTW.Printf("SCRUB: Cleaning up duplicates for %s", o.Objectname) iter := o.cfg.Conn.Query(`SELECT objectname, updated, nodetag, num_chunks FROM objects WHERE objectname = ? `, o.Objectname).Consistency(gocql.One).Iter() var latest, obj, tmp *Object latest = o var objectname string var updated, num_chunks int64 var nodetag gocql.UUID for iter.Scan(&objectname, &updated, &nodetag, &num_chunks) { obj = new(Object) obj.cfg = o.cfg obj.Objectname = objectname obj.Updated = updated obj.Nodetag = nodetag obj.NumChunks = num_chunks obj.set_id() switch { case obj.Updated > latest.Updated: // newer timestamp tmp = latest latest = obj obj = tmp case obj.Updated == latest.Updated: //same timestamp; compare nodetags switch bytes.Compare(obj.Nodetag.Bytes(), latest.Nodetag.Bytes()) { case 0: // same object obj = nil case 1: // obj nodetag greater than latest tmp = latest latest = obj obj = tmp } } // made this far, obj must be removed if obj != nil { BTW.Printf("SCRUB: Dropping duplicated object %s", obj.id) obj.Remove() } } latest.set_id() if obj != nil { BTW.Printf("SCRUB: Keeping object %s", latest.id) } return }
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Enhanced Logging // // This is inspired by the logging functionality in Java. Essentially, you create a Logger // object and create output filters for it. You can send whatever you want to the Logger, // and it will filter that based on your settings and send it to the outputs. This way, you // can put as much debug code in your program as you want, and when you're done you can filter // out the mundane messages so only the import ones show up. // // Utility functions are provided to make life easier. Here is some example code to get started: // // log := elog.NewLogger() // log.AddFilter("stdout", elog.DEBUG, new(elog.ConsoleLogWriter)) // log.AddFilter("log", elog.FINE, elog.NewFileLogWriter("example.log", true)) // log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) // // The first two lines can be combined with the utility NewConsoleLogger: // // log := elog.NewConsoleLogger(elog.DEBUG) // log.AddFilter("log", elog.FINE, elog.NewFileLogWriter("example.log", true)) // log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) // // Usage notes: // - The ConsoleLogWriter does not display the source to standard output, but the FileLogWriter does. // - The utility functions (Info, Debug, Warn, etc) derive their source from the calling function // // Future work: (please let me know if you think I should work on any of these particularly) // - Log file rotation // - Logging configuration files ala log4j // - Have the ability to remove filters? // - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows for another method of logging // - Add an XML filter type package log4go import ( "os" "fmt" "time" "runtime" "container/vector" ) // Version information const ( L4G_VERSION = "log4go-v1.0.0" L4G_MAJOR = 1 L4G_MINOR = 0 L4G_BUILD = 0 ) /****** Constants ******/ // These are the integer logging levels used by the logger const ( FINEST = iota FINE DEBUG TRACE INFO WARNING ERROR CRITICAL ) // Logging level strings var ( levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} ) /****** LogRecord ******/ // This is the lifeblood of the package; it contains all of the pertinent information for each message type LogRecord struct { Level int // The log level Created *time.Time // The time at which the log message was created Source string // The message source Message string // The log message } func newLogRecord(lv int, src string, msg string) *LogRecord { lr := new(LogRecord) lr.Created = time.LocalTime() lr.Level = lv lr.Source = src lr.Message = msg return lr } /****** LogWriter ******/ // This is an interface for anything that should be able to write logs type LogWriter interface { // This will be called to log a LogRecord message. // If necessary. this function should be *INTERNALLY* synchronzied, // and should spawn a separate goroutine if it could hang the program or take a long time. // TODO: This may be changed to have an Init() call that returns a // channel similar to <-chan *LogRecord for a more go-like internal setup LogWrite(rec *LogRecord) (n int, err os.Error) // This should return, at any given time, if the LogWriter is still in a good state. // A good state is defined as having the ability to dispatch a log message immediately. // if a LogWriter is not in a good state, the log message is simply not dispatched. Good() bool // This should clean up anything lingering about the LogWriter, as it is called before // the LogWriter is removed. If possible, this should guarantee that all LogWrites // have been completed. Close() } /****** Logger ******/ // If LogRecord is the blood of the package, is the heart. type Logger struct { // All filters have an entry in each of the following filterLevels map[string]int filterLogWriters map[string]LogWriter } // Create a new logger func NewLogger() *Logger { log := new(Logger) log.filterLevels = make(map[string]int) log.filterLogWriters = make(map[string]LogWriter) return log } // Closes all log writers in preparation for exiting the program. // Calling this is not really imperative, unless you want to guarantee that all log messages are written. func (log *Logger) Close() { // Close all open loggers for key := range log.filterLogWriters { log.filterLogWriters[key].Close() log.filterLogWriters[key] = nil, false log.filterLevels[key] = 0, false } } // Add the standard filter. // This function is NOT INTERNALLY THREAD SAFE. If you plan on // calling this function from multiple goroutines, you will want // to synchronize it yourself somehow. func (log *Logger) AddFilter(name string, level int, writer LogWriter) { if writer == nil || !writer.Good() { return } log.filterLevels[name] = level log.filterLogWriters[name] = writer } // Create a new logger with the standard stdout func NewConsoleLogger(level int) *Logger { log := NewLogger() log.AddFilter("stdout", level, new(ConsoleLogWriter)) return log } /******* Logging *******/ // Send a log message manually func (log *Logger) Log(level int, source, message string) { // Create a vector long enough to not require resizing var logto vector.StringVector logto.Resize(0, len(log.filterLevels)) // Determine if any logging will be done for filt := range log.filterLevels { if level >= log.filterLevels[filt] { logto.Push(filt) } } // Only log if a filter requires it if len(logto) > 0 { // Make the log record rec := newLogRecord(level, source, message) // Dispatch the logs for _,filt := range logto { lw := log.filterLogWriters[filt] if lw.Good() { lw.LogWrite(rec) } } } } // Send a formatted log message easily func (log *Logger) intLogf(level int, format string, args ...interface{}) { // Create a vector long enough to not require resizing var logto vector.StringVector logto.Resize(0, len(log.filterLevels)) // Determine if any logging will be done for filt := range log.filterLevels { if level >= log.filterLevels[filt] { logto.Push(filt) } } // Only log if a filter requires it if len(logto) > 0 { // Determine caller func pc, _, lineno, ok := runtime.Caller(2) src := "" if ok { src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) } // Make the log record rec := newLogRecord(level, src, fmt.Sprintf(format, args)) // Dispatch the logs for _,filt := range logto { log.filterLogWriters[filt].LogWrite(rec) } } } // Send a formatted log message easily func (log *Logger) Logf(level int, format string, args ...interface{}) { log.intLogf(level, format, args) } // Utility for finest log messages func (log *Logger) Finest(format string, args ...interface{}) { log.intLogf(FINEST, format, args) } // Utility for fine log messages func (log *Logger) Fine(format string, args ...interface{}) { log.intLogf(FINE, format, args) } // Utility for debug log messages func (log *Logger) Debug(format string, args ...interface{}) { log.intLogf(DEBUG, format, args) } // Utility for trace log messages func (log *Logger) Trace(format string, args ...interface{}) { log.intLogf(TRACE, format, args) } // Utility for info log messages func (log *Logger) Info(format string, args ...interface{}) { log.intLogf(INFO, format, args) } // Utility for warn log messages (returns an os.Error for easy function returns) func (log *Logger) Warn(format string, args ...interface{}) os.Error { log.intLogf(WARNING, format, args) return os.NewError(fmt.Sprintf(format, args)) } // Utility for error log messages (returns an os.Error for easy function returns) func (log *Logger) Error(format string, args ...interface{}) os.Error { log.intLogf(ERROR, format, args) return os.NewError(fmt.Sprintf(format, args)) } // Utility for critical log messages (returns an os.Error for easy function returns) func (log *Logger) Critical(format string, args ...interface{}) os.Error { log.intLogf(CRITICAL, format, args) return os.NewError(fmt.Sprintf(format, args)) } Version bump // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Enhanced Logging // // This is inspired by the logging functionality in Java. Essentially, you create a Logger // object and create output filters for it. You can send whatever you want to the Logger, // and it will filter that based on your settings and send it to the outputs. This way, you // can put as much debug code in your program as you want, and when you're done you can filter // out the mundane messages so only the import ones show up. // // Utility functions are provided to make life easier. Here is some example code to get started: // // log := elog.NewLogger() // log.AddFilter("stdout", elog.DEBUG, new(elog.ConsoleLogWriter)) // log.AddFilter("log", elog.FINE, elog.NewFileLogWriter("example.log", true)) // log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) // // The first two lines can be combined with the utility NewConsoleLogger: // // log := elog.NewConsoleLogger(elog.DEBUG) // log.AddFilter("log", elog.FINE, elog.NewFileLogWriter("example.log", true)) // log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) // // Usage notes: // - The ConsoleLogWriter does not display the source to standard output, but the FileLogWriter does. // - The utility functions (Info, Debug, Warn, etc) derive their source from the calling function // // Future work: (please let me know if you think I should work on any of these particularly) // - Log file rotation // - Logging configuration files ala log4j // - Have the ability to remove filters? // - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows for another method of logging // - Add an XML filter type package log4go import ( "os" "fmt" "time" "runtime" "container/vector" ) // Version information const ( L4G_VERSION = "log4go-v1.0.1" L4G_MAJOR = 1 L4G_MINOR = 0 L4G_BUILD = 1 ) /****** Constants ******/ // These are the integer logging levels used by the logger const ( FINEST = iota FINE DEBUG TRACE INFO WARNING ERROR CRITICAL ) // Logging level strings var ( levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} ) /****** LogRecord ******/ // This is the lifeblood of the package; it contains all of the pertinent information for each message type LogRecord struct { Level int // The log level Created *time.Time // The time at which the log message was created Source string // The message source Message string // The log message } func newLogRecord(lv int, src string, msg string) *LogRecord { lr := new(LogRecord) lr.Created = time.LocalTime() lr.Level = lv lr.Source = src lr.Message = msg return lr } /****** LogWriter ******/ // This is an interface for anything that should be able to write logs type LogWriter interface { // This will be called to log a LogRecord message. // If necessary. this function should be *INTERNALLY* synchronzied, // and should spawn a separate goroutine if it could hang the program or take a long time. // TODO: This may be changed to have an Init() call that returns a // channel similar to <-chan *LogRecord for a more go-like internal setup LogWrite(rec *LogRecord) (n int, err os.Error) // This should return, at any given time, if the LogWriter is still in a good state. // A good state is defined as having the ability to dispatch a log message immediately. // if a LogWriter is not in a good state, the log message is simply not dispatched. Good() bool // This should clean up anything lingering about the LogWriter, as it is called before // the LogWriter is removed. If possible, this should guarantee that all LogWrites // have been completed. Close() } /****** Logger ******/ // If LogRecord is the blood of the package, is the heart. type Logger struct { // All filters have an entry in each of the following filterLevels map[string]int filterLogWriters map[string]LogWriter } // Create a new logger func NewLogger() *Logger { log := new(Logger) log.filterLevels = make(map[string]int) log.filterLogWriters = make(map[string]LogWriter) return log } // Closes all log writers in preparation for exiting the program. // Calling this is not really imperative, unless you want to guarantee that all log messages are written. func (log *Logger) Close() { // Close all open loggers for key := range log.filterLogWriters { log.filterLogWriters[key].Close() log.filterLogWriters[key] = nil, false log.filterLevels[key] = 0, false } } // Add the standard filter. // This function is NOT INTERNALLY THREAD SAFE. If you plan on // calling this function from multiple goroutines, you will want // to synchronize it yourself somehow. func (log *Logger) AddFilter(name string, level int, writer LogWriter) { if writer == nil || !writer.Good() { return } log.filterLevels[name] = level log.filterLogWriters[name] = writer } // Create a new logger with the standard stdout func NewConsoleLogger(level int) *Logger { log := NewLogger() log.AddFilter("stdout", level, new(ConsoleLogWriter)) return log } /******* Logging *******/ // Send a log message manually func (log *Logger) Log(level int, source, message string) { // Create a vector long enough to not require resizing var logto vector.StringVector logto.Resize(0, len(log.filterLevels)) // Determine if any logging will be done for filt := range log.filterLevels { if level >= log.filterLevels[filt] { logto.Push(filt) } } // Only log if a filter requires it if len(logto) > 0 { // Make the log record rec := newLogRecord(level, source, message) // Dispatch the logs for _,filt := range logto { lw := log.filterLogWriters[filt] if lw.Good() { lw.LogWrite(rec) } } } } // Send a formatted log message easily func (log *Logger) intLogf(level int, format string, args ...interface{}) { // Create a vector long enough to not require resizing var logto vector.StringVector logto.Resize(0, len(log.filterLevels)) // Determine if any logging will be done for filt := range log.filterLevels { if level >= log.filterLevels[filt] { logto.Push(filt) } } // Only log if a filter requires it if len(logto) > 0 { // Determine caller func pc, _, lineno, ok := runtime.Caller(2) src := "" if ok { src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) } // Make the log record rec := newLogRecord(level, src, fmt.Sprintf(format, args)) // Dispatch the logs for _,filt := range logto { log.filterLogWriters[filt].LogWrite(rec) } } } // Send a formatted log message easily func (log *Logger) Logf(level int, format string, args ...interface{}) { log.intLogf(level, format, args) } // Utility for finest log messages func (log *Logger) Finest(format string, args ...interface{}) { log.intLogf(FINEST, format, args) } // Utility for fine log messages func (log *Logger) Fine(format string, args ...interface{}) { log.intLogf(FINE, format, args) } // Utility for debug log messages func (log *Logger) Debug(format string, args ...interface{}) { log.intLogf(DEBUG, format, args) } // Utility for trace log messages func (log *Logger) Trace(format string, args ...interface{}) { log.intLogf(TRACE, format, args) } // Utility for info log messages func (log *Logger) Info(format string, args ...interface{}) { log.intLogf(INFO, format, args) } // Utility for warn log messages (returns an os.Error for easy function returns) func (log *Logger) Warn(format string, args ...interface{}) os.Error { log.intLogf(WARNING, format, args) return os.NewError(fmt.Sprintf(format, args)) } // Utility for error log messages (returns an os.Error for easy function returns) func (log *Logger) Error(format string, args ...interface{}) os.Error { log.intLogf(ERROR, format, args) return os.NewError(fmt.Sprintf(format, args)) } // Utility for critical log messages (returns an os.Error for easy function returns) func (log *Logger) Critical(format string, args ...interface{}) os.Error { log.intLogf(CRITICAL, format, args) return os.NewError(fmt.Sprintf(format, args)) }
package logger import ( "runtime" "strings" "github.com/fatih/color" ) // ColorMap - Used to map a particular color to a cf status phrase - returns lowercase strings in color. func (l *Logger) ColorMap(s string) string { // If Windows, disable colorS if runtime.GOOS == "windows" || *l.Colors { return strings.ToLower(s) } v := strings.Split(s, "_")[len(strings.Split(s, "_"))-1] var result string switch v { case "COMPLETE": result = color.New(color.FgGreen).Add(color.Bold).SprintFunc()(s) case "PROGRESS": result = color.New(color.FgYellow).Add(color.Bold).SprintFunc()(s) case "FAILED": result = color.New(color.FgRed).Add(color.Bold).SprintFunc()(s) case "SKIPPED": result = color.New(color.FgHiBlue).Add(color.Bold).SprintFunc()(s) default: // Unidentified, just returns the same string return strings.ToLower(s) } return strings.ToLower(result) } // ColorString - Returns colored string func (l *Logger) ColorString(s, col string) string { // If Windows, disable colorS if runtime.GOOS == "windows" || *l.Colors { return s } var result string switch strings.ToLower(col) { case "green": result = color.New(color.FgGreen).Add(color.Bold).SprintFunc()(s) case "yellow": result = color.New(color.FgYellow).Add(color.Bold).SprintFunc()(s) case "red": result = color.New(color.FgRed).Add(color.Bold).SprintFunc()(s) case "magenta": result = color.New(color.FgMagenta).Add(color.Bold).SprintFunc()(s) case "cyan": result = color.New(color.FgCyan).Add(color.Bold).SprintFunc()(s) default: // Unidentified, just returns the same string return s } return result } updated logger.colormap statuses package logger import ( "runtime" "strings" "github.com/fatih/color" ) // ColorMap - Used to map a particular color to a cf status phrase - returns lowercase strings in color. func (l *Logger) ColorMap(s string) string { // If Windows, disable colorS if runtime.GOOS == "windows" || *l.Colors { return strings.ToLower(s) } var result string switch s { case "CREATE_COMPLETE", "DELETE_COMPLETE", "UPDATE_COMPLETE": result = color.New(color.FgGreen).Add(color.Bold).SprintFunc()(s) case "DELETE_IN_PROGRESS", "REVIEW_IN_PROGRESS", "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_IN_PROGRESS": result = color.New(color.FgYellow).Add(color.Bold).SprintFunc()(s) default: // NOTE: all other status are red result = color.New(color.FgRed).Add(color.Bold).SprintFunc()(s) } return strings.ToLower(result) } // ColorString - Returns colored string func (l *Logger) ColorString(s, col string) string { // If Windows, disable colorS if runtime.GOOS == "windows" || *l.Colors { return s } var result string switch strings.ToLower(col) { case "green": result = color.New(color.FgGreen).Add(color.Bold).SprintFunc()(s) case "yellow": result = color.New(color.FgYellow).Add(color.Bold).SprintFunc()(s) case "red": result = color.New(color.FgRed).Add(color.Bold).SprintFunc()(s) case "magenta": result = color.New(color.FgMagenta).Add(color.Bold).SprintFunc()(s) case "cyan": result = color.New(color.FgCyan).Add(color.Bold).SprintFunc()(s) default: // Unidentified, just returns the same string return s } return result }
package main import ( "bufio" "database/sql" "flag" "fmt" _ "github.com/mattn/go-sqlite3" "io" "net" "os" "os/signal" "regexp" "strconv" "strings" "syscall" "time" ) type logEntry struct { Node string Time int64 Msg string } type clientEntry struct { Addr string Time int64 Online int } var ( tables map[string]string tsFormat = "2006/01/02 15:04:05" addr string dbFile string logChan chan *logEntry cliChan chan *clientEntry ) var logSplit = regexp.MustCompile("^([\\w-:]+) (\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2}) (.+)$") var responseCheck = regexp.MustCompile("response time: (\\d+)us$") func init() { flLogBuffer := flag.Uint("b", 16, "log entries to buffer") flDbFile := flag.String("f", "logs.db", "database file") port := flag.Uint("p", 5988, "port to listen on") flag.Parse() addr = fmt.Sprintf(":%d", *port) dbFile = *flDbFile logChan = make(chan *logEntry, *flLogBuffer) cliChan = make(chan *clientEntry, *flLogBuffer) tables = make(map[string]string, 0) tables["entries"] = "CREATE TABLE entries (node text, timestamp integer, message string)" tables["response_time"] = "CREATE TABLE response_time (node text, timestamp integer, microsec integer)" tables["clients"] = "CREATE TABLE clients (address text, timestamp integer, online integer)" } func main() { dbSetup() go listen() go log() sigc := make(chan os.Signal, 1) signal.Notify(sigc, os.Kill, os.Interrupt, syscall.SIGTERM) fmt.Println("[+] waiting for shutdown signal") <-sigc fmt.Println("[+] closing log channel") close(logChan) fmt.Println("[+] closing log channel") close(cliChan) <-time.After(100 * time.Millisecond) fmt.Println("[+] logsrv shutting down") os.Exit(0) } func listen() { fmt.Println("[+] start TCP server") tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { fmt.Fprintln(os.Stderr, "[!] failed to resolve TCP address:", err.Error()) os.Exit(1) } listener, err := net.ListenTCP("tcp", tcpAddr) if err != nil { fmt.Fprintln(os.Stderr, "[!] failed to set up TCP listener:", err.Error()) } else { defer listener.Close() fmt.Println("[+] listening for clients") for { conn, err := listener.Accept() if err != nil { fmt.Println("[!] TCP error:", err.Error()) continue } go processMessage(conn) } } fmt.Println("[+] TCP server shuts down") } func processMessage(conn net.Conn) { client := new(clientEntry) client.Addr = conn.RemoteAddr().String() client.Time = time.Now().UTC().Unix() client.Online = 1 cliChan <- client fmt.Println("[+] client connected:", conn.RemoteAddr()) defer conn.Close() r := bufio.NewReader(conn) for { msg, err := r.ReadString(0x0a) if err != nil { if err != io.EOF { fmt.Println("[!] error reading from client:", err.Error()) } break } else if msg == "" { break } msg = strings.Trim(string(msg), "\n \t") fmt.Println("-- ", msg) nodeID := logSplit.ReplaceAllString(msg, "$1") dateString := logSplit.ReplaceAllString(msg, "$2") logMsg := logSplit.ReplaceAllString(msg, "$3") tm, err := time.Parse(tsFormat, dateString) if err != nil { fmt.Fprintf(os.Stderr, "[!] error parsing time %s: %s\n", dateString, err.Error()) return } le := &logEntry{nodeID, tm.UTC().Unix(), logMsg} logChan <- le } fmt.Println("[+] client disconnected:", conn.RemoteAddr()) client.Online = 0 cliChan <- client } func log() { fmt.Println("[+] start log listener") db, err := sql.Open("sqlite3", dbFile) if err != nil { fmt.Println("[!] failed to open DB file:", err.Error()) os.Exit(1) } defer db.Close() for { select { case le, ok := <-logChan: if !ok { return } writeLogEntry(db, le) case client, ok := <-cliChan: if !ok { return } writeClientEntry(db, client) default: <-time.After(1 * time.Nanosecond) } } } func writeLogEntry(db *sql.DB, le *logEntry) { _, err := db.Exec("insert into entries values (?, ?, ?)", le.Node, le.Time, le.Msg) if err != nil { fmt.Println("[!] database error:", err.Error()) return } if responseCheck.MatchString(le.Msg) { respString := responseCheck.ReplaceAllString(le.Msg, "$1") rTime, err := strconv.Atoi(respString) if err != nil { fmt.Println("[!] error reading response time:", err.Error()) return } _, err = db.Exec("insert into response_time values (?, ?, ?)", le.Node, le.Time, rTime) if err != nil { fmt.Println("[!] error writing to database:", err.Error()) } } } func writeClientEntry(db *sql.DB, cli *clientEntry) { _, err := db.Exec("insert into clients values (?, ?, ?)", cli.Addr, cli.Time, cli.Online) if err != nil { fmt.Println("[!] database error:", err.Error()) } } func dbSetup() { fmt.Println("[+] checking tables") for tableName, tableSQL := range tables { fmt.Printf("\t[*] table %s\n", tableName) checkTable(tableName, tableSQL) } fmt.Println("[+] finished checking database") } func checkTable(tableName, tableSQL string) { db, err := sql.Open("sqlite3", dbFile) if err != nil { fmt.Println("[!] failed to open DB file:", err.Error()) os.Exit(1) } defer db.Close() rows, err := db.Query(`select sql from sqlite_master where type='table' and name=?`, tableName) if err != nil { fmt.Println("[!] error looking up table:", err.Error()) os.Exit(1) } var tblSql string for rows.Next() { err = rows.Scan(&tblSql) break } rows.Close() if err != nil { fmt.Println("[!] error reading database:", err.Error()) os.Exit(1) } else if tblSql == "" { fmt.Println("[+] creating table") _, err = db.Exec(tableSQL) if err != nil { fmt.Println("[!] error creating table:", err.Error()) os.Exit(1) } } else if tblSql != tableSQL { fmt.Println("[+] schema out of sync") _, err = db.Exec("drop table " + tableName) if err != nil { fmt.Println("[!] error dropping table:", err.Error()) os.Exit(1) } _, err = db.Exec(tableSQL) if err != nil { fmt.Println("[!] error creating table:", err.Error()) os.Exit(1) } fmt.Printf("[+] table %s updated\n", tableName) } } Response time logs now track which operation occurred. package main import ( "bufio" "database/sql" "flag" "fmt" _ "github.com/mattn/go-sqlite3" "io" "net" "os" "os/signal" "regexp" "strconv" "strings" "syscall" "time" ) type logEntry struct { Node string Time int64 Msg string } type clientEntry struct { Addr string Time int64 Online int } var ( tables map[string]string tsFormat = "2006/01/02 15:04:05" addr string dbFile string logChan chan *logEntry cliChan chan *clientEntry ) var logSplit = regexp.MustCompile("^([\\w-:]+) (\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2}) (.+)$") var responseCheck = regexp.MustCompile("(\\w+) response time: (\\d+)us$") func init() { flLogBuffer := flag.Uint("b", 16, "log entries to buffer") flDbFile := flag.String("f", "logs.db", "database file") port := flag.Uint("p", 5988, "port to listen on") flag.Parse() addr = fmt.Sprintf(":%d", *port) dbFile = *flDbFile logChan = make(chan *logEntry, *flLogBuffer) cliChan = make(chan *clientEntry, *flLogBuffer) tables = make(map[string]string, 0) tables["entries"] = "CREATE TABLE entries (node text, timestamp integer, message string)" tables["response_time"] = "CREATE TABLE response_time (node text, timestamp integer, microsec integer, operation text)" tables["clients"] = "CREATE TABLE clients (address text, timestamp integer, online integer)" } func main() { dbSetup() go listen() go log() sigc := make(chan os.Signal, 1) signal.Notify(sigc, os.Kill, os.Interrupt, syscall.SIGTERM) fmt.Println("[+] waiting for shutdown signal") <-sigc fmt.Println("[+] closing log channel") close(logChan) fmt.Println("[+] closing log channel") close(cliChan) <-time.After(100 * time.Millisecond) fmt.Println("[+] logsrv shutting down") os.Exit(0) } func listen() { fmt.Println("[+] start TCP server") tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { fmt.Fprintln(os.Stderr, "[!] failed to resolve TCP address:", err.Error()) os.Exit(1) } listener, err := net.ListenTCP("tcp", tcpAddr) if err != nil { fmt.Fprintln(os.Stderr, "[!] failed to set up TCP listener:", err.Error()) } else { defer listener.Close() fmt.Println("[+] listening for clients") for { conn, err := listener.Accept() if err != nil { fmt.Println("[!] TCP error:", err.Error()) continue } go processMessage(conn) } } fmt.Println("[+] TCP server shuts down") } func processMessage(conn net.Conn) { client := new(clientEntry) client.Addr = conn.RemoteAddr().String() client.Time = time.Now().UTC().Unix() client.Online = 1 cliChan <- client fmt.Println("[+] client connected:", conn.RemoteAddr()) defer conn.Close() r := bufio.NewReader(conn) for { msg, err := r.ReadString(0x0a) if err != nil { if err != io.EOF { fmt.Println("[!] error reading from client:", err.Error()) } break } else if msg == "" { break } msg = strings.Trim(string(msg), "\n \t") fmt.Println("-- ", msg) nodeID := logSplit.ReplaceAllString(msg, "$1") dateString := logSplit.ReplaceAllString(msg, "$2") logMsg := logSplit.ReplaceAllString(msg, "$3") tm, err := time.Parse(tsFormat, dateString) if err != nil { fmt.Fprintf(os.Stderr, "[!] error parsing time %s: %s\n", dateString, err.Error()) return } le := &logEntry{nodeID, tm.UTC().Unix(), logMsg} logChan <- le } fmt.Println("[+] client disconnected:", conn.RemoteAddr()) client.Online = 0 cliChan <- client } func log() { fmt.Println("[+] start log listener") db, err := sql.Open("sqlite3", dbFile) if err != nil { fmt.Println("[!] failed to open DB file:", err.Error()) os.Exit(1) } defer db.Close() for { select { case le, ok := <-logChan: if !ok { return } writeLogEntry(db, le) case client, ok := <-cliChan: if !ok { return } writeClientEntry(db, client) default: <-time.After(1 * time.Nanosecond) } } } func writeLogEntry(db *sql.DB, le *logEntry) { _, err := db.Exec("insert into entries values (?, ?, ?)", le.Node, le.Time, le.Msg) if err != nil { fmt.Println("[!] database error:", err.Error()) return } if responseCheck.MatchString(le.Msg) { opName := responseCheck.ReplaceAllString(le.Msg, "$1") respString := responseCheck.ReplaceAllString(le.Msg, "$2") rTime, err := strconv.Atoi(respString) if err != nil { fmt.Println("[!] error reading response time:", err.Error()) return } _, err = db.Exec("insert into response_time values (?, ?, ?, ?)", le.Node, le.Time, rTime, opName) if err != nil { fmt.Println("[!] error writing to database:", err.Error()) } } } func writeClientEntry(db *sql.DB, cli *clientEntry) { _, err := db.Exec("insert into clients values (?, ?, ?)", cli.Addr, cli.Time, cli.Online) if err != nil { fmt.Println("[!] database error:", err.Error()) } } func dbSetup() { fmt.Println("[+] checking tables") for tableName, tableSQL := range tables { fmt.Printf("\t[*] table %s\n", tableName) checkTable(tableName, tableSQL) } fmt.Println("[+] finished checking database") } func checkTable(tableName, tableSQL string) { db, err := sql.Open("sqlite3", dbFile) if err != nil { fmt.Println("[!] failed to open DB file:", err.Error()) os.Exit(1) } defer db.Close() rows, err := db.Query(`select sql from sqlite_master where type='table' and name=?`, tableName) if err != nil { fmt.Println("[!] error looking up table:", err.Error()) os.Exit(1) } var tblSql string for rows.Next() { err = rows.Scan(&tblSql) break } rows.Close() if err != nil { fmt.Println("[!] error reading database:", err.Error()) os.Exit(1) } else if tblSql == "" { fmt.Println("[+] creating table") _, err = db.Exec(tableSQL) if err != nil { fmt.Println("[!] error creating table:", err.Error()) os.Exit(1) } } else if tblSql != tableSQL { fmt.Println("[+] schema out of sync") _, err = db.Exec("drop table " + tableName) if err != nil { fmt.Println("[!] error dropping table:", err.Error()) os.Exit(1) } _, err = db.Exec(tableSQL) if err != nil { fmt.Println("[!] error creating table:", err.Error()) os.Exit(1) } fmt.Printf("[+] table %s updated\n", tableName) } }
/* * Copyright 2013 Nan Deng * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package srv import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "strconv" "strings" "sync" "time" "github.com/uniqush/uniqush-push/push" ) const ( admTokenURL string = "https://api.amazon.com/auth/O2/token" admServiceURL string = "https://api.amazon.com/messaging/registrations/" ) type pspLockResponse struct { err push.PushError psp *push.PushServiceProvider } type pspLockRequest struct { psp *push.PushServiceProvider respCh chan<- *pspLockResponse } type admPushService struct { pspLock chan *pspLockRequest } var _ push.PushServiceType = &admPushService{} func newADMPushService() *admPushService { ret := new(admPushService) ret.pspLock = make(chan *pspLockRequest) go admPspLocker(ret.pspLock) return ret } func InstallADM() { psm := push.GetPushServiceManager() psm.RegisterPushServiceType(newADMPushService()) } func (self *admPushService) Finalize() {} func (self *admPushService) Name() string { return "adm" } func (self *admPushService) SetErrorReportChan(errChan chan<- push.PushError) { return } func (self *admPushService) BuildPushServiceProviderFromMap(kv map[string]string, psp *push.PushServiceProvider) error { if service, ok := kv["service"]; ok && len(service) > 0 { psp.FixedData["service"] = service } else { return errors.New("NoService") } if clientid, ok := kv["clientid"]; ok && len(clientid) > 0 { psp.FixedData["clientid"] = clientid } else { return errors.New("NoClientID") } if clientsecret, ok := kv["clientsecret"]; ok && len(clientsecret) > 0 { psp.FixedData["clientsecret"] = clientsecret } else { return errors.New("NoClientSecrete") } return nil } func (self *admPushService) BuildDeliveryPointFromMap(kv map[string]string, dp *push.DeliveryPoint) error { if service, ok := kv["service"]; ok && len(service) > 0 { dp.FixedData["service"] = service } else { return errors.New("NoService") } if sub, ok := kv["subscriber"]; ok && len(sub) > 0 { dp.FixedData["subscriber"] = sub } else { return errors.New("NoSubscriber") } if regid, ok := kv["regid"]; ok && len(regid) > 0 { dp.FixedData["regid"] = regid } else { return errors.New("NoRegId") } return nil } func admPspLocker(lockChan <-chan *pspLockRequest) { pspLockMap := make(map[string]*push.PushServiceProvider, 10) for req := range lockChan { var ok bool var clientid string psp := req.psp resp := new(pspLockResponse) if clientid, ok = psp.FixedData["clientid"]; !ok { resp.err = push.NewBadPushServiceProviderWithDetails(psp, "NoClientID") req.respCh <- resp continue } if psp, ok = pspLockMap[clientid]; !ok { psp = req.psp pspLockMap[clientid] = psp } resp.err = requestToken(psp) resp.psp = psp if resp.err != nil { if _, ok := resp.err.(*push.PushServiceProviderUpdate); ok { pspLockMap[clientid] = psp } else { delete(pspLockMap, clientid) } } req.respCh <- resp } } type tokenSuccObj struct { Token string `json:"access_token"` Expire int `json:"expires_in"` Scope string `json:"scope"` Type string `json:"token_type"` } type tokenFailObj struct { Reason string `json:"error"` Description string `json:"error_description"` } func requestToken(psp *push.PushServiceProvider) push.PushError { var ok bool var clientid string var cserect string if _, ok = psp.VolatileData["token"]; ok { if exp, ok := psp.VolatileData["expire"]; ok { unixsec, err := strconv.ParseInt(exp, 10, 64) if err == nil { deadline := time.Unix(unixsec, int64(0)) if deadline.After(time.Now()) { fmt.Printf("We don't need to request another token\n") return nil } } } } if clientid, ok = psp.FixedData["clientid"]; !ok { return push.NewBadPushServiceProviderWithDetails(psp, "NoClientID") } if cserect, ok = psp.FixedData["clientsecret"]; !ok { return push.NewBadPushServiceProviderWithDetails(psp, "NoClientSecrete") } form := url.Values{} form.Set("grant_type", "client_credentials") form.Set("scope", "messaging:push") form.Set("client_id", clientid) form.Set("client_secret", cserect) req, err := http.NewRequest("POST", admTokenURL, bytes.NewBufferString(form.Encode())) if err != nil { return push.NewErrorf("NewRequest error: %v", err) } defer req.Body.Close() req.Header.Add("Content-Type", "application/x-www-form-urlencoded") client := &http.Client{} resp, err := client.Do(req) if err != nil { return push.NewErrorf("Do error: %v", err) } defer resp.Body.Close() content, err := ioutil.ReadAll(resp.Body) if err != nil { return push.NewBadPushServiceProviderWithDetails(psp, err.Error()) } if resp.StatusCode != 200 { var fail tokenFailObj err = json.Unmarshal(content, &fail) if err != nil { return push.NewBadPushServiceProviderWithDetails(psp, err.Error()) } reason := strings.ToUpper(fail.Reason) switch reason { case "INVALID_SCOPE": reason = "ADM is not enabled. Enable it on the Amazon Mobile App Distribution Portal" } return push.NewBadPushServiceProviderWithDetails(psp, fmt.Sprintf("%v:%v (%v)", resp.StatusCode, reason, fail.Description)) } var succ tokenSuccObj err = json.Unmarshal(content, &succ) if err != nil { return push.NewBadPushServiceProviderWithDetails(psp, err.Error()) } expire := time.Now().Add(time.Duration(succ.Expire-60) * time.Second) psp.VolatileData["expire"] = fmt.Sprintf("%v", expire.Unix()) psp.VolatileData["token"] = succ.Token psp.VolatileData["type"] = succ.Type return push.NewPushServiceProviderUpdate(psp) } type admMessage struct { Data map[string]string `json:"data"` MsgGroup string `json:"consolidationKey,omitempty"` TTL int64 `json:"expiresAfter,omitempty"` MD5 string `json:"md5,omitempty"` } func notifToMessage(notif *push.Notification) (msg *admMessage, err push.PushError) { if notif == nil || len(notif.Data) == 0 { err = push.NewBadNotificationWithDetails("empty notification") return } msg = new(admMessage) msg.Data = make(map[string]string, len(notif.Data)) if msggroup, ok := notif.Data["msggroup"]; ok { msg.MsgGroup = msggroup } if rawTTL, ok := notif.Data["ttl"]; ok { ttl, err := strconv.ParseInt(rawTTL, 10, 64) if err == nil { msg.TTL = ttl } } if rawPayload, ok := notif.Data["uniqush.payload.adm"]; ok { jsonErr := json.Unmarshal([]byte(rawPayload), &(msg.Data)) if jsonErr != nil { err = push.NewBadNotificationWithDetails(fmt.Sprintf("invalid uniqush.payload.adm: %v", jsonErr)) return } } else { for k, v := range notif.Data { if k == "msggroup" || k == "ttl" { continue } if strings.HasPrefix(k, "uniqush.") { // keys beginning with "uniqush." are reserved by Uniqush. continue } msg.Data[k] = v } } if len(msg.Data) == 0 { err = push.NewBadNotificationWithDetails("empty notification") return } return } func admURL(dp *push.DeliveryPoint) (url string, err push.PushError) { if dp == nil { err = push.NewError("nil dp") return } if regid, ok := dp.FixedData["regid"]; ok { url = fmt.Sprintf("%v%v/messages", admServiceURL, regid) } else { err = push.NewBadDeliveryPointWithDetails(dp, "empty delivery point") } return } func admNewRequest(psp *push.PushServiceProvider, dp *push.DeliveryPoint, data []byte) (req *http.Request, err push.PushError) { var token string var ok bool if token, ok = psp.VolatileData["token"]; !ok { err = push.NewBadPushServiceProviderWithDetails(psp, "NoToken") return } url, err := admURL(dp) if err != nil { return } req, reqErr := http.NewRequest("POST", url, bytes.NewBuffer(data)) if reqErr != nil { return } req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") req.Header.Set("x-amzn-type-version", "com.amazon.device.messaging.ADMMessage@1.0") req.Header.Set("x-amzn-accept-type", "com.amazon.device.messaging.ADMSendResult@1.0") req.Header.Set("Authorization", "Bearer "+token) return } type admPushFailResponse struct { Reason string `json:"reason"` } func admSinglePush(psp *push.PushServiceProvider, dp *push.DeliveryPoint, data []byte, notif *push.Notification) (string, push.PushError) { client := &http.Client{} req, err := admNewRequest(psp, dp, data) if err != nil { return "", err } defer req.Body.Close() resp, httpErr := client.Do(req) if httpErr != nil { return "", push.NewErrorf("Failed to send adm push: %v", httpErr.Error()) } defer resp.Body.Close() id := resp.Header.Get("x-amzn-RequestId") if resp.StatusCode != 200 { if resp.StatusCode == 503 || resp.StatusCode == 500 || resp.StatusCode == 429 { // By default, we retry after one minute. retryAfter := resp.Header.Get("Retry-After") retrySecond := 60 if retryAfter != "" { var retryErr error retrySecond, retryErr = strconv.Atoi(retryAfter) if retryErr != nil { retrySecond = 60 } } retryDuration := time.Duration(retrySecond) * time.Second err = push.NewRetryError(psp, dp, notif, retryDuration) return id, err } body, ioErr := ioutil.ReadAll(resp.Body) if ioErr != nil { return "", push.NewErrorf("Failed to read adm response: %v", err) } var fail admPushFailResponse jsonErr := json.Unmarshal(body, &fail) if jsonErr != nil { return "", push.NewErrorf("%v: %v", resp.StatusCode, string(body)) } reason := strings.ToLower(fail.Reason) switch reason { case "messagetoolarge": err = push.NewBadNotificationWithDetails("MessageTooLarge") case "invalidregistrationid": err = push.NewBadDeliveryPointWithDetails(dp, "InvalidRegistrationId") case "accesstokenexpired": // retry would fix it. err = push.NewRetryError(psp, dp, notif, 10*time.Second) default: err = push.NewErrorf("%v: %v", resp.StatusCode, fail.Reason) } return "", err } return id, nil } func (self *admPushService) lockPsp(psp *push.PushServiceProvider) (*push.PushServiceProvider, push.PushError) { respCh := make(chan *pspLockResponse) req := &pspLockRequest{ psp: psp, respCh: respCh, } self.pspLock <- req resp := <-respCh return resp.psp, resp.err } func (self *admPushService) Push(psp *push.PushServiceProvider, dpQueue <-chan *push.DeliveryPoint, resQueue chan<- *push.PushResult, notif *push.Notification) { defer close(resQueue) defer func() { for _ = range dpQueue { } }() res := new(push.PushResult) res.Content = notif res.Provider = psp var err push.PushError psp, err = self.lockPsp(psp) if err != nil { res.Err = err resQueue <- res if _, ok := err.(*push.PushServiceProviderUpdate); !ok { return } } msg, err := notifToMessage(notif) if err != nil { res.Err = err resQueue <- res return } data, jsonErr := json.Marshal(msg) if jsonErr != nil { res.Err = push.NewErrorf("Failed to marshal message: %v", jsonErr) resQueue <- res return } wg := sync.WaitGroup{} for dp := range dpQueue { wg.Add(1) res := new(push.PushResult) res.Content = notif res.Provider = psp res.Destination = dp go func(dp *push.DeliveryPoint) { res.MsgId, res.Err = admSinglePush(psp, dp, data, notif) resQueue <- res wg.Done() }(dp) } wg.Wait() } Fix typo in ADM return code: "NoClientSecrete" -> "NoClientSecret" /* * Copyright 2013 Nan Deng * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package srv import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "strconv" "strings" "sync" "time" "github.com/uniqush/uniqush-push/push" ) const ( admTokenURL string = "https://api.amazon.com/auth/O2/token" admServiceURL string = "https://api.amazon.com/messaging/registrations/" ) type pspLockResponse struct { err push.PushError psp *push.PushServiceProvider } type pspLockRequest struct { psp *push.PushServiceProvider respCh chan<- *pspLockResponse } type admPushService struct { pspLock chan *pspLockRequest } var _ push.PushServiceType = &admPushService{} func newADMPushService() *admPushService { ret := new(admPushService) ret.pspLock = make(chan *pspLockRequest) go admPspLocker(ret.pspLock) return ret } func InstallADM() { psm := push.GetPushServiceManager() psm.RegisterPushServiceType(newADMPushService()) } func (self *admPushService) Finalize() {} func (self *admPushService) Name() string { return "adm" } func (self *admPushService) SetErrorReportChan(errChan chan<- push.PushError) { return } func (self *admPushService) BuildPushServiceProviderFromMap(kv map[string]string, psp *push.PushServiceProvider) error { if service, ok := kv["service"]; ok && len(service) > 0 { psp.FixedData["service"] = service } else { return errors.New("NoService") } if clientid, ok := kv["clientid"]; ok && len(clientid) > 0 { psp.FixedData["clientid"] = clientid } else { return errors.New("NoClientID") } if clientsecret, ok := kv["clientsecret"]; ok && len(clientsecret) > 0 { psp.FixedData["clientsecret"] = clientsecret } else { return errors.New("NoClientSecret") } return nil } func (self *admPushService) BuildDeliveryPointFromMap(kv map[string]string, dp *push.DeliveryPoint) error { if service, ok := kv["service"]; ok && len(service) > 0 { dp.FixedData["service"] = service } else { return errors.New("NoService") } if sub, ok := kv["subscriber"]; ok && len(sub) > 0 { dp.FixedData["subscriber"] = sub } else { return errors.New("NoSubscriber") } if regid, ok := kv["regid"]; ok && len(regid) > 0 { dp.FixedData["regid"] = regid } else { return errors.New("NoRegId") } return nil } func admPspLocker(lockChan <-chan *pspLockRequest) { pspLockMap := make(map[string]*push.PushServiceProvider, 10) for req := range lockChan { var ok bool var clientid string psp := req.psp resp := new(pspLockResponse) if clientid, ok = psp.FixedData["clientid"]; !ok { resp.err = push.NewBadPushServiceProviderWithDetails(psp, "NoClientID") req.respCh <- resp continue } if psp, ok = pspLockMap[clientid]; !ok { psp = req.psp pspLockMap[clientid] = psp } resp.err = requestToken(psp) resp.psp = psp if resp.err != nil { if _, ok := resp.err.(*push.PushServiceProviderUpdate); ok { pspLockMap[clientid] = psp } else { delete(pspLockMap, clientid) } } req.respCh <- resp } } type tokenSuccObj struct { Token string `json:"access_token"` Expire int `json:"expires_in"` Scope string `json:"scope"` Type string `json:"token_type"` } type tokenFailObj struct { Reason string `json:"error"` Description string `json:"error_description"` } func requestToken(psp *push.PushServiceProvider) push.PushError { var ok bool var clientid string var cserect string if _, ok = psp.VolatileData["token"]; ok { if exp, ok := psp.VolatileData["expire"]; ok { unixsec, err := strconv.ParseInt(exp, 10, 64) if err == nil { deadline := time.Unix(unixsec, int64(0)) if deadline.After(time.Now()) { fmt.Printf("We don't need to request another token\n") return nil } } } } if clientid, ok = psp.FixedData["clientid"]; !ok { return push.NewBadPushServiceProviderWithDetails(psp, "NoClientID") } if cserect, ok = psp.FixedData["clientsecret"]; !ok { return push.NewBadPushServiceProviderWithDetails(psp, "NoClientSecret") } form := url.Values{} form.Set("grant_type", "client_credentials") form.Set("scope", "messaging:push") form.Set("client_id", clientid) form.Set("client_secret", cserect) req, err := http.NewRequest("POST", admTokenURL, bytes.NewBufferString(form.Encode())) if err != nil { return push.NewErrorf("NewRequest error: %v", err) } defer req.Body.Close() req.Header.Add("Content-Type", "application/x-www-form-urlencoded") client := &http.Client{} resp, err := client.Do(req) if err != nil { return push.NewErrorf("Do error: %v", err) } defer resp.Body.Close() content, err := ioutil.ReadAll(resp.Body) if err != nil { return push.NewBadPushServiceProviderWithDetails(psp, err.Error()) } if resp.StatusCode != 200 { var fail tokenFailObj err = json.Unmarshal(content, &fail) if err != nil { return push.NewBadPushServiceProviderWithDetails(psp, err.Error()) } reason := strings.ToUpper(fail.Reason) switch reason { case "INVALID_SCOPE": reason = "ADM is not enabled. Enable it on the Amazon Mobile App Distribution Portal" } return push.NewBadPushServiceProviderWithDetails(psp, fmt.Sprintf("%v:%v (%v)", resp.StatusCode, reason, fail.Description)) } var succ tokenSuccObj err = json.Unmarshal(content, &succ) if err != nil { return push.NewBadPushServiceProviderWithDetails(psp, err.Error()) } expire := time.Now().Add(time.Duration(succ.Expire-60) * time.Second) psp.VolatileData["expire"] = fmt.Sprintf("%v", expire.Unix()) psp.VolatileData["token"] = succ.Token psp.VolatileData["type"] = succ.Type return push.NewPushServiceProviderUpdate(psp) } type admMessage struct { Data map[string]string `json:"data"` MsgGroup string `json:"consolidationKey,omitempty"` TTL int64 `json:"expiresAfter,omitempty"` MD5 string `json:"md5,omitempty"` } func notifToMessage(notif *push.Notification) (msg *admMessage, err push.PushError) { if notif == nil || len(notif.Data) == 0 { err = push.NewBadNotificationWithDetails("empty notification") return } msg = new(admMessage) msg.Data = make(map[string]string, len(notif.Data)) if msggroup, ok := notif.Data["msggroup"]; ok { msg.MsgGroup = msggroup } if rawTTL, ok := notif.Data["ttl"]; ok { ttl, err := strconv.ParseInt(rawTTL, 10, 64) if err == nil { msg.TTL = ttl } } if rawPayload, ok := notif.Data["uniqush.payload.adm"]; ok { jsonErr := json.Unmarshal([]byte(rawPayload), &(msg.Data)) if jsonErr != nil { err = push.NewBadNotificationWithDetails(fmt.Sprintf("invalid uniqush.payload.adm: %v", jsonErr)) return } } else { for k, v := range notif.Data { if k == "msggroup" || k == "ttl" { continue } if strings.HasPrefix(k, "uniqush.") { // keys beginning with "uniqush." are reserved by Uniqush. continue } msg.Data[k] = v } } if len(msg.Data) == 0 { err = push.NewBadNotificationWithDetails("empty notification") return } return } func admURL(dp *push.DeliveryPoint) (url string, err push.PushError) { if dp == nil { err = push.NewError("nil dp") return } if regid, ok := dp.FixedData["regid"]; ok { url = fmt.Sprintf("%v%v/messages", admServiceURL, regid) } else { err = push.NewBadDeliveryPointWithDetails(dp, "empty delivery point") } return } func admNewRequest(psp *push.PushServiceProvider, dp *push.DeliveryPoint, data []byte) (req *http.Request, err push.PushError) { var token string var ok bool if token, ok = psp.VolatileData["token"]; !ok { err = push.NewBadPushServiceProviderWithDetails(psp, "NoToken") return } url, err := admURL(dp) if err != nil { return } req, reqErr := http.NewRequest("POST", url, bytes.NewBuffer(data)) if reqErr != nil { return } req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") req.Header.Set("x-amzn-type-version", "com.amazon.device.messaging.ADMMessage@1.0") req.Header.Set("x-amzn-accept-type", "com.amazon.device.messaging.ADMSendResult@1.0") req.Header.Set("Authorization", "Bearer "+token) return } type admPushFailResponse struct { Reason string `json:"reason"` } func admSinglePush(psp *push.PushServiceProvider, dp *push.DeliveryPoint, data []byte, notif *push.Notification) (string, push.PushError) { client := &http.Client{} req, err := admNewRequest(psp, dp, data) if err != nil { return "", err } defer req.Body.Close() resp, httpErr := client.Do(req) if httpErr != nil { return "", push.NewErrorf("Failed to send adm push: %v", httpErr.Error()) } defer resp.Body.Close() id := resp.Header.Get("x-amzn-RequestId") if resp.StatusCode != 200 { if resp.StatusCode == 503 || resp.StatusCode == 500 || resp.StatusCode == 429 { // By default, we retry after one minute. retryAfter := resp.Header.Get("Retry-After") retrySecond := 60 if retryAfter != "" { var retryErr error retrySecond, retryErr = strconv.Atoi(retryAfter) if retryErr != nil { retrySecond = 60 } } retryDuration := time.Duration(retrySecond) * time.Second err = push.NewRetryError(psp, dp, notif, retryDuration) return id, err } body, ioErr := ioutil.ReadAll(resp.Body) if ioErr != nil { return "", push.NewErrorf("Failed to read adm response: %v", err) } var fail admPushFailResponse jsonErr := json.Unmarshal(body, &fail) if jsonErr != nil { return "", push.NewErrorf("%v: %v", resp.StatusCode, string(body)) } reason := strings.ToLower(fail.Reason) switch reason { case "messagetoolarge": err = push.NewBadNotificationWithDetails("MessageTooLarge") case "invalidregistrationid": err = push.NewBadDeliveryPointWithDetails(dp, "InvalidRegistrationId") case "accesstokenexpired": // retry would fix it. err = push.NewRetryError(psp, dp, notif, 10*time.Second) default: err = push.NewErrorf("%v: %v", resp.StatusCode, fail.Reason) } return "", err } return id, nil } func (self *admPushService) lockPsp(psp *push.PushServiceProvider) (*push.PushServiceProvider, push.PushError) { respCh := make(chan *pspLockResponse) req := &pspLockRequest{ psp: psp, respCh: respCh, } self.pspLock <- req resp := <-respCh return resp.psp, resp.err } func (self *admPushService) Push(psp *push.PushServiceProvider, dpQueue <-chan *push.DeliveryPoint, resQueue chan<- *push.PushResult, notif *push.Notification) { defer close(resQueue) defer func() { for _ = range dpQueue { } }() res := new(push.PushResult) res.Content = notif res.Provider = psp var err push.PushError psp, err = self.lockPsp(psp) if err != nil { res.Err = err resQueue <- res if _, ok := err.(*push.PushServiceProviderUpdate); !ok { return } } msg, err := notifToMessage(notif) if err != nil { res.Err = err resQueue <- res return } data, jsonErr := json.Marshal(msg) if jsonErr != nil { res.Err = push.NewErrorf("Failed to marshal message: %v", jsonErr) resQueue <- res return } wg := sync.WaitGroup{} for dp := range dpQueue { wg.Add(1) res := new(push.PushResult) res.Content = notif res.Provider = psp res.Destination = dp go func(dp *push.DeliveryPoint) { res.MsgId, res.Err = admSinglePush(psp, dp, data, notif) resQueue <- res wg.Done() }(dp) } wg.Wait() }
package chunker import ( "fmt" "io" "log" "time" "github.com/thijzert/speeldoos/lib/wavreader" ) type mp3Chunker struct { audioIn wavreader.Writer mp3out *io.PipeReader embargo time.Time chcont *chunkContainer } func NewMP3() (Chunker, error) { r, w := io.Pipe() wavin, err := wavreader.ToMP3(w, wavreader.DAT) if err != nil { return nil, err } rv := &mp3Chunker{ audioIn: wavin, mp3out: r, embargo: time.Now(), chcont: &chunkContainer{ chunks: make([]chunk, 500), start: 0, end: 0, }, } go rv.splitChunks() return rv, nil } func (m *mp3Chunker) Init(fixedSize int) error { if m.chcont.errorState != nil { return m.chcont.errorState } return m.audioIn.Init(fixedSize) } func (m *mp3Chunker) Format() wavreader.StreamFormat { return m.audioIn.Format() } func (m *mp3Chunker) Write(buf []byte) (int, error) { if m.chcont.errorState != nil { return 0, m.chcont.errorState } return m.audioIn.Write(buf) } func (m *mp3Chunker) Close() error { if m.chcont.errorState != nil { return m.chcont.errorState } m.chcont.errorState = io.EOF return m.audioIn.Close() } func (m *mp3Chunker) CloseWithError(er error) error { if m.chcont.errorState != nil { return m.chcont.errorState } m.chcont.errorState = er if m.audioIn != nil { return m.audioIn.CloseWithError(er) } else { return er } } func (m *mp3Chunker) NewStream() (ChunkStream, error) { return m.chcont.NewStream() } func (m *mp3Chunker) splitChunks() { var hdr, nexthdr mp3header buf := make([]byte, 1024) firstOffset := 0 offset := 0 var err error var n, i, ct int for { if offset == len(buf) { m.CloseWithError(fmt.Errorf("buffer is full; no new header found")) return } n, err = m.mp3out.Read(buf[offset:]) if err != nil { if err != io.EOF { // TODO: remove log.Printf("splitting hairs got me this error: %s", err) } break } if n == 0 || (n+offset) < 4 { continue } unread := buf[:n+offset] i, nexthdr = nextHeader(unread[firstOffset:], hdr) for i >= 0 { hdr = nexthdr m.chcont.AddChunk(buf[:n], m.embargo) m.embargo = m.embargo.Add(hdr.Duration()) for time.Now().Add(100 * time.Millisecond).Before(m.embargo) { time.Sleep(1 * time.Millisecond) } unread = unread[i+4:] i, nexthdr = nextHeader(unread[4:], hdr) firstOffset = 4 ct++ } copy(buf, unread) offset = len(unread) } m.CloseWithError(err) } type mp3header struct { A, B, C, D byte } var tabsel_123 [2][3][16]uint16 = [2][3][16]uint16{ { {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448}, {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384}, {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320}, }, { {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256}, {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, }, } func (m mp3header) SyncWord() uint16 { return (uint16(m.A)<<4 | uint16(m.B)>>4) & 0xffe } func (m mp3header) Version() int { rv := m.B & 0x18 >> 3 if rv&0x02 == 0x02 { // MPEG 1 or 2 return int(rv&0x01 ^ 0x01) } else { // MPEG-2.5 return 2 } } func (m mp3header) Bitrate() uint16 { lsf := int((m.B >> 3 & 0x01) ^ 0x01) lay := 4 - int(m.B>>1&0x03) if lay == 0 || lay == 4 { return 0 } bitrate_index := int(m.C >> 4) return tabsel_123[lsf][lay-1][bitrate_index] } var sampleTable [4][4]int32 = [4][4]int32{ {44100, 48000, 32000, 22050}, {16000, 11025, 12000, 8000}, {22050, 24000, 16000, 11025}, {1, 1, 1, 1}, } func (m mp3header) SampleRate() int32 { samp := int(m.C&0x0c) >> 2 ver := m.Version() return sampleTable[ver][samp] } var durationTable [4][4][4]time.Duration = [4][4][4]time.Duration{ { // MPEG-1: 384, 1152, or 1152 samples per frame (all durations in ns) {8707483, 8000000, 12000000, 17414966}, {26122449, 24000000, 36000000, 52244898}, {26122449, 24000000, 36000000, 52244898}, }, { // MPEG 2: -1,384,1152, or 576 samples per frame (all durations in ns) {24000000, 34829932, 32000000, 48000000}, {72000000, 104489796, 96000000, 144000000}, {36000000, 52244898, 48000000, 72000000}, }, { // MPEG 2.5: -1,384,1152, or 576 samples per frame (all durations in ns) {17414966, 16000000, 24000000, 34829932}, {52244898, 48000000, 72000000, 104489796}, {26122449, 24000000, 36000000, 52244898}, }, } func (m mp3header) Duration() time.Duration { if m.A != 0xff { return 0 } ver := m.Version() lay := 4 - int(m.B>>1&0x03) samp := int(m.C&0x0c) >> 2 return durationTable[ver][lay-1][samp] } func (m mp3header) Padding() int { return int(m.C >> 1 & 0x01) } // Framesize returns the total length of the MP3 frame, including its header func (m mp3header) Framesize() int { ver := m.Version() lay := 4 - int(m.B>>1&0x03) if ver == 0 && lay == 3 { return (int(m.Bitrate()) * 144000 / int(m.SampleRate())) + m.Padding() } return 4 } func (m mp3header) String() string { if m.SyncWord() != 0xffe { return "(not an MP3 header)" } ver := [4]string{"1.0", "2.0", "2.5", "x.x"}[m.Version()] lay := [4]string{"Unknown", "I", "II", "III"}[4-int(m.B>>1&0x03)] samp := m.SampleRate() br := m.Bitrate() return fmt.Sprintf("MPEG-%s layer %s; %dHz %dkbps", ver, lay, samp, br) } func nextHeader(buf []byte, last mp3header) (int, mp3header) { l := len(buf) if l < 4 { return -1, last } if last.A == 0xff { // The previous header was filled; see if we can guess the next position i := last.Framesize() - 4 if l < (i + 4) { // HACK: the guess position isn't available yet - wait for the next read // to prevent a false positive in the slow path return -1, last } rv := mp3header{buf[i], buf[i+1], buf[i+2], buf[i+3]} if rv.SyncWord() == 0xffe && last.B == rv.B && last.SampleRate() == rv.SampleRate() { // We're in luck! return i, rv } } // Short path isn't available - scan the full buffer for i, c := range buf[:l-4] { if c != 0xff { continue } rv := mp3header{buf[i], buf[i+1], buf[i+2], buf[i+3]} if rv.SyncWord() != 0xffe || rv.Bitrate() == 0 || rv.SampleRate() == 0 { continue } if last.A != 0xff { // The previous header was empty - this one's good return i, rv } else { // Check if key fields match the previous header if last.B == rv.B && last.SampleRate() == rv.SampleRate() { return i, rv } } } return -1, last } Fix garbled audio bug package chunker import ( "fmt" "io" "log" "time" "github.com/thijzert/speeldoos/lib/wavreader" ) type mp3Chunker struct { audioIn wavreader.Writer mp3out *io.PipeReader embargo time.Time chcont *chunkContainer } func NewMP3() (Chunker, error) { r, w := io.Pipe() wavin, err := wavreader.ToMP3(w, wavreader.DAT) if err != nil { return nil, err } rv := &mp3Chunker{ audioIn: wavin, mp3out: r, embargo: time.Now(), chcont: &chunkContainer{ chunks: make([]chunk, 500), start: 0, end: 0, }, } go rv.splitChunks() return rv, nil } func (m *mp3Chunker) Init(fixedSize int) error { if m.chcont.errorState != nil { return m.chcont.errorState } return m.audioIn.Init(fixedSize) } func (m *mp3Chunker) Format() wavreader.StreamFormat { return m.audioIn.Format() } func (m *mp3Chunker) Write(buf []byte) (int, error) { if m.chcont.errorState != nil { return 0, m.chcont.errorState } return m.audioIn.Write(buf) } func (m *mp3Chunker) Close() error { if m.chcont.errorState != nil { return m.chcont.errorState } m.chcont.errorState = io.EOF return m.audioIn.Close() } func (m *mp3Chunker) CloseWithError(er error) error { if m.chcont.errorState != nil { return m.chcont.errorState } m.chcont.errorState = er if m.audioIn != nil { return m.audioIn.CloseWithError(er) } else { return er } } func (m *mp3Chunker) NewStream() (ChunkStream, error) { return m.chcont.NewStream() } func (m *mp3Chunker) splitChunks() { var hdr, nexthdr mp3header buf := make([]byte, 1024) firstOffset := 0 offset := 0 var err error var n, i, ct int for { if offset == len(buf) { m.CloseWithError(fmt.Errorf("buffer is full; no new header found")) return } n, err = m.mp3out.Read(buf[offset:]) if err != nil { if err != io.EOF { // TODO: remove log.Printf("splitting hairs got me this error: %s", err) } break } if n == 0 || (n+offset) < 4 { continue } unread := buf[:n+offset] i, nexthdr = nextHeader(unread[firstOffset:], hdr) for i >= 0 { hdr = nexthdr chunk := unread[:firstOffset+i] m.chcont.AddChunk(chunk, m.embargo) m.embargo = m.embargo.Add(hdr.Duration()) for time.Now().Add(100 * time.Millisecond).Before(m.embargo) { time.Sleep(1 * time.Millisecond) } unread = unread[i+4:] i, nexthdr = nextHeader(unread[4:], hdr) firstOffset = 4 ct++ } copy(buf, unread) offset = len(unread) } m.CloseWithError(err) } type mp3header struct { A, B, C, D byte } var tabsel_123 [2][3][16]uint16 = [2][3][16]uint16{ { {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448}, {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384}, {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320}, }, { {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256}, {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, }, } func (m mp3header) SyncWord() uint16 { return (uint16(m.A)<<4 | uint16(m.B)>>4) & 0xffe } func (m mp3header) Version() int { rv := m.B & 0x18 >> 3 if rv&0x02 == 0x02 { // MPEG 1 or 2 return int(rv&0x01 ^ 0x01) } else { // MPEG-2.5 return 2 } } func (m mp3header) Bitrate() uint16 { lsf := int((m.B >> 3 & 0x01) ^ 0x01) lay := 4 - int(m.B>>1&0x03) if lay == 0 || lay == 4 { return 0 } bitrate_index := int(m.C >> 4) return tabsel_123[lsf][lay-1][bitrate_index] } var sampleTable [4][4]int32 = [4][4]int32{ {44100, 48000, 32000, 22050}, {16000, 11025, 12000, 8000}, {22050, 24000, 16000, 11025}, {1, 1, 1, 1}, } func (m mp3header) SampleRate() int32 { samp := int(m.C&0x0c) >> 2 ver := m.Version() return sampleTable[ver][samp] } var durationTable [4][4][4]time.Duration = [4][4][4]time.Duration{ { // MPEG-1: 384, 1152, or 1152 samples per frame (all durations in ns) {8707483, 8000000, 12000000, 17414966}, {26122449, 24000000, 36000000, 52244898}, {26122449, 24000000, 36000000, 52244898}, }, { // MPEG 2: -1,384,1152, or 576 samples per frame (all durations in ns) {24000000, 34829932, 32000000, 48000000}, {72000000, 104489796, 96000000, 144000000}, {36000000, 52244898, 48000000, 72000000}, }, { // MPEG 2.5: -1,384,1152, or 576 samples per frame (all durations in ns) {17414966, 16000000, 24000000, 34829932}, {52244898, 48000000, 72000000, 104489796}, {26122449, 24000000, 36000000, 52244898}, }, } func (m mp3header) Duration() time.Duration { if m.A != 0xff { return 0 } ver := m.Version() lay := 4 - int(m.B>>1&0x03) samp := int(m.C&0x0c) >> 2 return durationTable[ver][lay-1][samp] } func (m mp3header) Padding() int { return int(m.C >> 1 & 0x01) } // Framesize returns the total length of the MP3 frame, including its header func (m mp3header) Framesize() int { ver := m.Version() lay := 4 - int(m.B>>1&0x03) if ver == 0 && lay == 3 { return (int(m.Bitrate()) * 144000 / int(m.SampleRate())) + m.Padding() } return 4 } func (m mp3header) String() string { if m.SyncWord() != 0xffe { return "(not an MP3 header)" } ver := [4]string{"1.0", "2.0", "2.5", "x.x"}[m.Version()] lay := [4]string{"Unknown", "I", "II", "III"}[4-int(m.B>>1&0x03)] samp := m.SampleRate() br := m.Bitrate() return fmt.Sprintf("MPEG-%s layer %s; %dHz %dkbps", ver, lay, samp, br) } func nextHeader(buf []byte, last mp3header) (int, mp3header) { l := len(buf) if l < 4 { return -1, last } if last.A == 0xff { // The previous header was filled; see if we can guess the next position i := last.Framesize() - 4 if l < (i + 4) { // HACK: the guess position isn't available yet - wait for the next read // to prevent a false positive in the slow path return -1, last } rv := mp3header{buf[i], buf[i+1], buf[i+2], buf[i+3]} if rv.SyncWord() == 0xffe && last.B == rv.B && last.SampleRate() == rv.SampleRate() { // We're in luck! return i, rv } } // Short path isn't available - scan the full buffer for i, c := range buf[:l-4] { if c != 0xff { continue } rv := mp3header{buf[i], buf[i+1], buf[i+2], buf[i+3]} if rv.SyncWord() != 0xffe || rv.Bitrate() == 0 || rv.SampleRate() == 0 { continue } if last.A != 0xff { // The previous header was empty - this one's good return i, rv } else { // Check if key fields match the previous header if last.B == rv.B && last.SampleRate() == rv.SampleRate() { return i, rv } } } return -1, last }
/* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2017 Red Hat, Inc. * */ package tests_test import ( "time" expect "github.com/google/goexpect" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" "kubevirt.io/kubevirt/tests/util" v1 "kubevirt.io/api/core/v1" "kubevirt.io/client-go/kubecli" "kubevirt.io/kubevirt/tests" "kubevirt.io/kubevirt/tests/console" cd "kubevirt.io/kubevirt/tests/containerdisk" "kubevirt.io/kubevirt/tests/libvmi" ) var _ = Describe("[rfe_id:127][posneg:negative][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]Console", func() { var virtClient kubecli.KubevirtClient BeforeEach(func() { var err error virtClient, err = kubecli.GetKubevirtClient() util.PanicOnError(err) tests.BeforeTestCleanup() }) expectConsoleOutput := func(vmi *v1.VirtualMachineInstance, expected string) { By("Checking that the console output equals to expected one") Expect(console.SafeExpectBatch(vmi, []expect.Batcher{ &expect.BSnd{S: "\n"}, &expect.BExp{R: expected}, }, 120)).To(Succeed()) } Describe("[rfe_id:127][posneg:negative][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance", func() { Context("with a serial console", func() { Context("with a cirros image", func() { It("[test_id:1588]should return that we are running cirros", func() { vmi := libvmi.NewCirros() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) expectConsoleOutput( vmi, "login as 'cirros' user", ) }) }) Context("with a fedora image", func() { It("[sig-compute][test_id:1589]should return that we are running fedora", func() { vmi := libvmi.NewFedora() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) expectConsoleOutput( vmi, "Welcome to", ) }) }) Context("with an alpine image", func() { type vmiBuilder func() *v1.VirtualMachineInstance newVirtualMachineInstanceWithAlpineFileDisk := func() *v1.VirtualMachineInstance { vmi, _ := tests.NewRandomVirtualMachineInstanceWithFileDisk(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), util.NamespaceTestDefault, k8sv1.ReadWriteOnce) return vmi } newVirtualMachineInstanceWithAlpineBlockDisk := func() *v1.VirtualMachineInstance { vmi, _ := tests.NewRandomVirtualMachineInstanceWithBlockDisk(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), util.NamespaceTestDefault, k8sv1.ReadWriteOnce) return vmi } table.DescribeTable("should return that we are running alpine", func(createVMI vmiBuilder) { vmi := createVMI() vmi = tests.RunVMIAndExpectLaunch(vmi, 120) expectConsoleOutput(vmi, "login") }, table.Entry("[test_id:4637][storage-req]with Filesystem Disk", newVirtualMachineInstanceWithAlpineFileDisk), table.Entry("[test_id:4638][storage-req]with Block Disk", newVirtualMachineInstanceWithAlpineBlockDisk), ) }) It("[test_id:1590]should be able to reconnect to console multiple times", func() { vmi := libvmi.NewAlpine() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) for i := 0; i < 5; i++ { expectConsoleOutput(vmi, "login") } }) It("[test_id:1591]should close console connection when new console connection is opened", func(done Done) { vmi := libvmi.NewAlpine() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) By("opening 1st console connection") expecter, errChan, err := console.NewExpecter(virtClient, vmi, 30*time.Second) Expect(err).ToNot(HaveOccurred()) defer expecter.Close() By("expecting error on 1st console connection") go func() { defer GinkgoRecover() select { case receivedErr := <-errChan: Expect(receivedErr.Error()).To(ContainSubstring("close")) close(done) case <-time.After(60 * time.Second): Fail("timed out waiting for closed 1st connection") } }() By("opening 2nd console connection") expectConsoleOutput(vmi, "login") }, 220) It("[test_id:1592]should wait until the virtual machine is in running state and return a stream interface", func() { vmi := libvmi.NewAlpine() By("Creating a new VirtualMachineInstance") vmi, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi) Expect(err).ToNot(HaveOccurred()) By("and connecting to it very quickly. Hopefully the VM is not yet up") _, err = virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, &kubecli.SerialConsoleOptions{ConnectionTimeout: 30 * time.Second}) Expect(err).ToNot(HaveOccurred()) }) It("[test_id:1593]should fail waiting for the virtual machine instance to be running", func() { vmi := libvmi.NewAlpine() vmi.Spec.Affinity = &k8sv1.Affinity{ NodeAffinity: &k8sv1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{ NodeSelectorTerms: []k8sv1.NodeSelectorTerm{ { MatchExpressions: []k8sv1.NodeSelectorRequirement{ {Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, Values: []string{"notexist"}}, }, }, }, }, }, } By("Creating a new VirtualMachineInstance") vmi, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi) Expect(err).ToNot(HaveOccurred()) _, err = virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, &kubecli.SerialConsoleOptions{ConnectionTimeout: 30 * time.Second}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("Timeout trying to connect to the virtual machine instance")) }) }) Context("without a serial console", func() { var vmi *v1.VirtualMachineInstance BeforeEach(func() { vmi = tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskAlpine)) f := false vmi.Spec.Domain.Devices.AutoattachSerialConsole = &f }) It("[test_id:4116]should create the vmi without any issue", func() { tests.RunVMIAndExpectLaunch(vmi, 30) }) It("[test_id:4117]should not have the serial console in xml", func() { tests.RunVMIAndExpectLaunch(vmi, 30) runningVMISpec, err := tests.GetRunningVMIDomainSpec(vmi) Expect(err).ToNot(HaveOccurred(), "should get vmi spec without problem") Expect(len(runningVMISpec.Devices.Serials)).To(Equal(0), "should not have any serial consoles present") Expect(len(runningVMISpec.Devices.Consoles)).To(Equal(0), "should not have any virtio console for serial consoles") }) It("[test_id:4118]should not connect to the serial console", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 30) _, err := virtClient.VirtualMachineInstance(vmi.ObjectMeta.Namespace).SerialConsole(vmi.ObjectMeta.Name, &kubecli.SerialConsoleOptions{}) Expect(err.Error()).To(Equal("No serial consoles are present."), "serial console should not connect if there are no serial consoles present") }) }) }) }) console_test: create one VM w/o console instead of three The former code was starting a VM without console three times only to check read-only facts about it. Checking these facts on a single VM is good enough, while using less resources. Signed-off-by: Dan Kenigsberg <184831158ae7531bcabc22617ed51e8717c81e93@redhat.com> /* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2017 Red Hat, Inc. * */ package tests_test import ( "time" expect "github.com/google/goexpect" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" "kubevirt.io/kubevirt/tests/util" v1 "kubevirt.io/api/core/v1" "kubevirt.io/client-go/kubecli" "kubevirt.io/kubevirt/tests" "kubevirt.io/kubevirt/tests/console" cd "kubevirt.io/kubevirt/tests/containerdisk" "kubevirt.io/kubevirt/tests/libvmi" ) var _ = Describe("[rfe_id:127][posneg:negative][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]Console", func() { var virtClient kubecli.KubevirtClient BeforeEach(func() { var err error virtClient, err = kubecli.GetKubevirtClient() util.PanicOnError(err) tests.BeforeTestCleanup() }) expectConsoleOutput := func(vmi *v1.VirtualMachineInstance, expected string) { By("Checking that the console output equals to expected one") Expect(console.SafeExpectBatch(vmi, []expect.Batcher{ &expect.BSnd{S: "\n"}, &expect.BExp{R: expected}, }, 120)).To(Succeed()) } Describe("[rfe_id:127][posneg:negative][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance", func() { Context("with a serial console", func() { Context("with a cirros image", func() { It("[test_id:1588]should return that we are running cirros", func() { vmi := libvmi.NewCirros() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) expectConsoleOutput( vmi, "login as 'cirros' user", ) }) }) Context("with a fedora image", func() { It("[sig-compute][test_id:1589]should return that we are running fedora", func() { vmi := libvmi.NewFedora() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) expectConsoleOutput( vmi, "Welcome to", ) }) }) Context("with an alpine image", func() { type vmiBuilder func() *v1.VirtualMachineInstance newVirtualMachineInstanceWithAlpineFileDisk := func() *v1.VirtualMachineInstance { vmi, _ := tests.NewRandomVirtualMachineInstanceWithFileDisk(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), util.NamespaceTestDefault, k8sv1.ReadWriteOnce) return vmi } newVirtualMachineInstanceWithAlpineBlockDisk := func() *v1.VirtualMachineInstance { vmi, _ := tests.NewRandomVirtualMachineInstanceWithBlockDisk(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), util.NamespaceTestDefault, k8sv1.ReadWriteOnce) return vmi } table.DescribeTable("should return that we are running alpine", func(createVMI vmiBuilder) { vmi := createVMI() vmi = tests.RunVMIAndExpectLaunch(vmi, 120) expectConsoleOutput(vmi, "login") }, table.Entry("[test_id:4637][storage-req]with Filesystem Disk", newVirtualMachineInstanceWithAlpineFileDisk), table.Entry("[test_id:4638][storage-req]with Block Disk", newVirtualMachineInstanceWithAlpineBlockDisk), ) }) It("[test_id:1590]should be able to reconnect to console multiple times", func() { vmi := libvmi.NewAlpine() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) for i := 0; i < 5; i++ { expectConsoleOutput(vmi, "login") } }) It("[test_id:1591]should close console connection when new console connection is opened", func(done Done) { vmi := libvmi.NewAlpine() vmi = tests.RunVMIAndExpectLaunch(vmi, 30) By("opening 1st console connection") expecter, errChan, err := console.NewExpecter(virtClient, vmi, 30*time.Second) Expect(err).ToNot(HaveOccurred()) defer expecter.Close() By("expecting error on 1st console connection") go func() { defer GinkgoRecover() select { case receivedErr := <-errChan: Expect(receivedErr.Error()).To(ContainSubstring("close")) close(done) case <-time.After(60 * time.Second): Fail("timed out waiting for closed 1st connection") } }() By("opening 2nd console connection") expectConsoleOutput(vmi, "login") }, 220) It("[test_id:1592]should wait until the virtual machine is in running state and return a stream interface", func() { vmi := libvmi.NewAlpine() By("Creating a new VirtualMachineInstance") vmi, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi) Expect(err).ToNot(HaveOccurred()) By("and connecting to it very quickly. Hopefully the VM is not yet up") _, err = virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, &kubecli.SerialConsoleOptions{ConnectionTimeout: 30 * time.Second}) Expect(err).ToNot(HaveOccurred()) }) It("[test_id:1593]should fail waiting for the virtual machine instance to be running", func() { vmi := libvmi.NewAlpine() vmi.Spec.Affinity = &k8sv1.Affinity{ NodeAffinity: &k8sv1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{ NodeSelectorTerms: []k8sv1.NodeSelectorTerm{ { MatchExpressions: []k8sv1.NodeSelectorRequirement{ {Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, Values: []string{"notexist"}}, }, }, }, }, }, } By("Creating a new VirtualMachineInstance") vmi, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi) Expect(err).ToNot(HaveOccurred()) _, err = virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, &kubecli.SerialConsoleOptions{ConnectionTimeout: 30 * time.Second}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("Timeout trying to connect to the virtual machine instance")) }) }) Context("without a serial console", func() { It("[test_id:4118]should run but not be connectable via the serial console", func() { vmi := libvmi.NewAlpine() f := false vmi.Spec.Domain.Devices.AutoattachSerialConsole = &f vmi = tests.RunVMIAndExpectLaunch(vmi, 30) runningVMISpec, err := tests.GetRunningVMIDomainSpec(vmi) Expect(err).ToNot(HaveOccurred(), "should get vmi spec without problem") Expect(len(runningVMISpec.Devices.Serials)).To(Equal(0), "should not have any serial consoles present") Expect(len(runningVMISpec.Devices.Consoles)).To(Equal(0), "should not have any virtio console for serial consoles") By("failing to connect to serial console") _, err = virtClient.VirtualMachineInstance(vmi.ObjectMeta.Namespace).SerialConsole(vmi.ObjectMeta.Name, &kubecli.SerialConsoleOptions{}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("No serial consoles are present."), "serial console should not connect if there are no serial consoles present") }) }) }) })
/* Copyright 2020 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "io/ioutil" "os" "path/filepath" "runtime" "strings" "testing" "time" "gotest.tools/v3/assert" "gotest.tools/v3/golden" "gotest.tools/v3/icmd" . "github.com/docker/api/tests/framework" ) var binDir string func TestMain(m *testing.M) { p, cleanup, err := SetupExistingCLI() if err != nil { fmt.Println(err) os.Exit(1) } binDir = p exitCode := m.Run() cleanup() os.Exit(exitCode) } func TestComposeNotImplemented(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "default"}) res = c.RunDockerCmd("compose", "up") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `compose command not supported on context type "moby": not implemented`, }) } func TestContextDefault(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("show", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "default"}) }) t.Run("ls", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), GoldenFile("ls-out-default")) }) t.Run("inspect", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "inspect", "default") res.Assert(t, icmd.Expected{Out: `"Name": "default"`}) }) t.Run("inspect current", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "inspect") res.Assert(t, icmd.Expected{Out: `"Name": "default"`}) }) } func TestContextCreateDocker(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "create", "test-docker", "--from", "default") res.Assert(t, icmd.Expected{Out: "test-docker"}) t.Run("ls", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), GoldenFile("ls-out-test-docker")) }) t.Run("ls quiet", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls", "-q") golden.Assert(t, res.Stdout(), "ls-out-test-docker-quiet.golden") }) t.Run("ls format", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls", "--format", "{{ json . }}") res.Assert(t, icmd.Expected{Out: `"Name":"default"`}) }) } func TestContextInspect(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "create", "test-docker", "--from", "default") res.Assert(t, icmd.Expected{Out: "test-docker"}) t.Run("inspect current", func(t *testing.T) { // Cannot be run in parallel because of "context use" res := c.RunDockerCmd("context", "use", "test-docker") res.Assert(t, icmd.Expected{Out: "test-docker"}) res = c.RunDockerCmd("context", "inspect") res.Assert(t, icmd.Expected{Out: `"Name": "test-docker"`}) }) } func TestContextHelpACI(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("help", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "create", "aci", "--help") // Can't use golden here as the help prints the config directory which changes res.Assert(t, icmd.Expected{Out: "docker context create aci CONTEXT [flags]"}) res.Assert(t, icmd.Expected{Out: "--location"}) res.Assert(t, icmd.Expected{Out: "--subscription-id"}) res.Assert(t, icmd.Expected{Out: "--resource-group"}) }) t.Run("check exec", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "create", "aci", "--subscription-id", "invalid-id") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "accepts 1 arg(s), received 0", }) assert.Assert(t, !strings.Contains(res.Combined(), "unknown flag")) }) } func TestContextDuplicateACI(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "mycontext", "--from", "default").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "create", "aci", "mycontext") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "context mycontext: already exists", }) } func TestContextRemove(t *testing.T) { t.Run("remove current", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "test-context-rm", "--from", "default").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "use", "test-context-rm") res.Assert(t, icmd.Expected{Out: "test-context-rm"}) res = c.RunDockerCmd("context", "rm", "test-context-rm") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "cannot delete current context", }) }) t.Run("force remove current", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "test-context-rmf").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "test-context-rmf").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "rm", "-f", "test-context-rmf") res.Assert(t, icmd.Expected{Out: "test-context-rmf"}) res = c.RunDockerCmd("context", "ls") res.Assert(t, icmd.Expected{Out: "default *"}) }) } func TestLoginCommandDelegation(t *testing.T) { // These tests just check that the existing CLI is called in various cases. // They do not test actual login functionality. c := NewParallelE2eCLI(t, binDir) t.Run("default context", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password", }) }) t.Run("interactive", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "someregistry.docker.io") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Cannot perform an interactive login from a non TTY device", }) }) t.Run("logout", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("logout", "someregistry.docker.io") res.Assert(t, icmd.Expected{Out: "someregistry.docker.io"}) }) t.Run("existing context", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "local", "local").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "local").Assert(t, icmd.Success) res := c.RunDockerCmd("login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password", }) }) } func TestCloudLogin(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("unknown backend", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "mycloudbackend") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "unknown backend type for cloud login: mycloudbackend", }) }) } func TestMissingExistingCLI(t *testing.T) { t.Parallel() home, err := ioutil.TempDir("", "") assert.NilError(t, err) t.Cleanup(func() { _ = os.RemoveAll(home) }) bin, err := ioutil.TempDir("", "") assert.NilError(t, err) t.Cleanup(func() { _ = os.RemoveAll(bin) }) err = CopyFile(filepath.Join(binDir, DockerExecutableName), filepath.Join(bin, DockerExecutableName)) assert.NilError(t, err) env := []string{"PATH=" + bin} if runtime.GOOS == "windows" { env = append(env, "USERPROFILE="+home) } else { env = append(env, "HOME="+home) } c := icmd.Cmd{ Env: env, Command: []string{filepath.Join(bin, "docker")}, } res := icmd.RunCmd(c) res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `"com.docker.cli": executable file not found`, }) } func TestLegacy(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("help", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("--help") res.Assert(t, icmd.Expected{Out: "swarm"}) }) t.Run("swarm", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("swarm", "join") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `"docker swarm join" requires exactly 1 argument.`, }) }) t.Run("local run", func(t *testing.T) { t.Parallel() cmd := c.NewDockerCmd("run", "--rm", "hello-world") cmd.Timeout = 20 * time.Second res := icmd.RunCmd(cmd) res.Assert(t, icmd.Expected{Out: "Hello from Docker!"}) }) t.Run("error messages", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("foo") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "docker: 'foo' is not a docker command.", }) }) t.Run("host flag", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("-H", "tcp://localhost:123", "version") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Cannot connect to the Docker daemon at tcp://localhost:123", }) }) t.Run("existing contexts delegate", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "moby-ctx", "--from=default").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "moby-ctx").Assert(t, icmd.Success) res := c.RunDockerCmd("swarm", "join") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `"docker swarm join" requires exactly 1 argument.`, }) }) t.Run("host flag overrides context", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "example", "test-example").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "test-example").Assert(t, icmd.Success) endpoint := "unix:///var/run/docker.sock" if runtime.GOOS == "windows" { endpoint = "npipe:////./pipe/docker_engine" } res := c.RunDockerCmd("-H", endpoint, "ps") res.Assert(t, icmd.Success) // Example backend's ps output includes these strings assert.Assert(t, !strings.Contains(res.Stdout(), "id")) assert.Assert(t, !strings.Contains(res.Stdout(), "1234")) }) } func TestLegacyLogin(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("host flag login", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("-H", "tcp://localhost:123", "login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "WARNING! Using --password via the CLI is insecure. Use --password-stdin.", }) }) t.Run("log level flag login", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("--log-level", "debug", "login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "WARNING! Using --password via the CLI is insecure", }) }) t.Run("login help global flags", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "--help") res.Assert(t, icmd.Success) assert.Assert(t, !strings.Contains(res.Combined(), "--log-level")) }) } func TestUnsupportedCommand(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "create", "example", "test-example") res.Assert(t, icmd.Success) res = c.RunDockerCmd("--context", "test-example", "images") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `Command "images" not available in current context (test-example), you can use the "default" context to run this command`, }) } func TestVersion(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("azure version", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("version") res.Assert(t, icmd.Expected{Out: "Azure integration"}) }) t.Run("format", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("version", "-f", "{{ json . }}") res.Assert(t, icmd.Expected{Out: `"Client":`}) res = c.RunDockerCmd("version", "--format", "{{ json . }}") res.Assert(t, icmd.Expected{Out: `"Client":`}) }) t.Run("delegate version flag", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "example", "test-example").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "test-example").Assert(t, icmd.Success) res := c.RunDockerCmd("-v") res.Assert(t, icmd.Expected{Out: "Docker version"}) }) } func TestMockBackend(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "example", "test-example").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "use", "test-example") res.Assert(t, icmd.Expected{Out: "test-example"}) t.Run("use", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "test-example"}) res = c.RunDockerCmd("context", "ls") golden.Assert(t, res.Stdout(), GoldenFile("ls-out-test-example")) }) t.Run("ps", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("ps") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "ps-out-example.golden") }) t.Run("ps quiet", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("ps", "-q") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "ps-quiet-out-example.golden") }) t.Run("ps quiet all", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("ps", "-q", "--all") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "ps-quiet-all-out-example.golden") }) t.Run("inspect", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("inspect", "id") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "inspect-id.golden") }) t.Run("run", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("run", "-d", "nginx", "-p", "80:80") res.Assert(t, icmd.Expected{ Out: `Running container "nginx" with name`, }) }) } tests.e2e: Check Windows specific error Signed-off-by: Chris Crone <0bd24f27efbdecb22ed8ee46cb16be12b5608d69@docker.com> /* Copyright 2020 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "io/ioutil" "os" "path/filepath" "runtime" "strings" "testing" "time" "gotest.tools/v3/assert" "gotest.tools/v3/golden" "gotest.tools/v3/icmd" . "github.com/docker/api/tests/framework" ) var binDir string func TestMain(m *testing.M) { p, cleanup, err := SetupExistingCLI() if err != nil { fmt.Println(err) os.Exit(1) } binDir = p exitCode := m.Run() cleanup() os.Exit(exitCode) } func TestComposeNotImplemented(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "default"}) res = c.RunDockerCmd("compose", "up") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `compose command not supported on context type "moby": not implemented`, }) } func TestContextDefault(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("show", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "default"}) }) t.Run("ls", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), GoldenFile("ls-out-default")) }) t.Run("inspect", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "inspect", "default") res.Assert(t, icmd.Expected{Out: `"Name": "default"`}) }) t.Run("inspect current", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "inspect") res.Assert(t, icmd.Expected{Out: `"Name": "default"`}) }) } func TestContextCreateDocker(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "create", "test-docker", "--from", "default") res.Assert(t, icmd.Expected{Out: "test-docker"}) t.Run("ls", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), GoldenFile("ls-out-test-docker")) }) t.Run("ls quiet", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls", "-q") golden.Assert(t, res.Stdout(), "ls-out-test-docker-quiet.golden") }) t.Run("ls format", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "ls", "--format", "{{ json . }}") res.Assert(t, icmd.Expected{Out: `"Name":"default"`}) }) } func TestContextInspect(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "create", "test-docker", "--from", "default") res.Assert(t, icmd.Expected{Out: "test-docker"}) t.Run("inspect current", func(t *testing.T) { // Cannot be run in parallel because of "context use" res := c.RunDockerCmd("context", "use", "test-docker") res.Assert(t, icmd.Expected{Out: "test-docker"}) res = c.RunDockerCmd("context", "inspect") res.Assert(t, icmd.Expected{Out: `"Name": "test-docker"`}) }) } func TestContextHelpACI(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("help", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "create", "aci", "--help") // Can't use golden here as the help prints the config directory which changes res.Assert(t, icmd.Expected{Out: "docker context create aci CONTEXT [flags]"}) res.Assert(t, icmd.Expected{Out: "--location"}) res.Assert(t, icmd.Expected{Out: "--subscription-id"}) res.Assert(t, icmd.Expected{Out: "--resource-group"}) }) t.Run("check exec", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "create", "aci", "--subscription-id", "invalid-id") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "accepts 1 arg(s), received 0", }) assert.Assert(t, !strings.Contains(res.Combined(), "unknown flag")) }) } func TestContextDuplicateACI(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "mycontext", "--from", "default").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "create", "aci", "mycontext") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "context mycontext: already exists", }) } func TestContextRemove(t *testing.T) { t.Run("remove current", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "test-context-rm", "--from", "default").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "use", "test-context-rm") res.Assert(t, icmd.Expected{Out: "test-context-rm"}) res = c.RunDockerCmd("context", "rm", "test-context-rm") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "cannot delete current context", }) }) t.Run("force remove current", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "test-context-rmf").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "test-context-rmf").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "rm", "-f", "test-context-rmf") res.Assert(t, icmd.Expected{Out: "test-context-rmf"}) res = c.RunDockerCmd("context", "ls") res.Assert(t, icmd.Expected{Out: "default *"}) }) } func TestLoginCommandDelegation(t *testing.T) { // These tests just check that the existing CLI is called in various cases. // They do not test actual login functionality. c := NewParallelE2eCLI(t, binDir) t.Run("default context", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password", }) }) t.Run("interactive", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "someregistry.docker.io") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Cannot perform an interactive login from a non TTY device", }) }) t.Run("logout", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("logout", "someregistry.docker.io") res.Assert(t, icmd.Expected{Out: "someregistry.docker.io"}) }) t.Run("existing context", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "local", "local").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "local").Assert(t, icmd.Success) res := c.RunDockerCmd("login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password", }) }) } func TestCloudLogin(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("unknown backend", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "mycloudbackend") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "unknown backend type for cloud login: mycloudbackend", }) }) } func TestMissingExistingCLI(t *testing.T) { t.Parallel() home, err := ioutil.TempDir("", "") assert.NilError(t, err) t.Cleanup(func() { _ = os.RemoveAll(home) }) bin, err := ioutil.TempDir("", "") assert.NilError(t, err) t.Cleanup(func() { _ = os.RemoveAll(bin) }) err = CopyFile(filepath.Join(binDir, DockerExecutableName), filepath.Join(bin, DockerExecutableName)) assert.NilError(t, err) env := []string{"PATH=" + bin} if runtime.GOOS == "windows" { env = append(env, "USERPROFILE="+home) } else { env = append(env, "HOME="+home) } c := icmd.Cmd{ Env: env, Command: []string{filepath.Join(bin, "docker")}, } res := icmd.RunCmd(c) res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `"com.docker.cli": executable file not found`, }) } func TestLegacy(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("help", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("--help") res.Assert(t, icmd.Expected{Out: "swarm"}) }) t.Run("swarm", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("swarm", "join") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `"docker swarm join" requires exactly 1 argument.`, }) }) t.Run("local run", func(t *testing.T) { t.Parallel() cmd := c.NewDockerCmd("run", "--rm", "hello-world") cmd.Timeout = 20 * time.Second res := icmd.RunCmd(cmd) res.Assert(t, icmd.Expected{Out: "Hello from Docker!"}) }) t.Run("error messages", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("foo") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "docker: 'foo' is not a docker command.", }) }) t.Run("host flag", func(t *testing.T) { t.Parallel() stderr := "Cannot connect to the Docker daemon at tcp://localhost:123" if runtime.GOOS == "windows" { stderr = "error during connect: Get http://localhost:123" } res := c.RunDockerCmd("-H", "tcp://localhost:123", "version") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: stderr, }) }) t.Run("existing contexts delegate", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "moby-ctx", "--from=default").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "moby-ctx").Assert(t, icmd.Success) res := c.RunDockerCmd("swarm", "join") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `"docker swarm join" requires exactly 1 argument.`, }) }) t.Run("host flag overrides context", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "example", "test-example").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "test-example").Assert(t, icmd.Success) endpoint := "unix:///var/run/docker.sock" if runtime.GOOS == "windows" { endpoint = "npipe:////./pipe/docker_engine" } res := c.RunDockerCmd("-H", endpoint, "ps") res.Assert(t, icmd.Success) // Example backend's ps output includes these strings assert.Assert(t, !strings.Contains(res.Stdout(), "id")) assert.Assert(t, !strings.Contains(res.Stdout(), "1234")) }) } func TestLegacyLogin(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("host flag login", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("-H", "tcp://localhost:123", "login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "WARNING! Using --password via the CLI is insecure. Use --password-stdin.", }) }) t.Run("log level flag login", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("--log-level", "debug", "login", "-u", "nouser", "-p", "wrongpasword") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: "WARNING! Using --password via the CLI is insecure", }) }) t.Run("login help global flags", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("login", "--help") res.Assert(t, icmd.Success) assert.Assert(t, !strings.Contains(res.Combined(), "--log-level")) }) } func TestUnsupportedCommand(t *testing.T) { c := NewParallelE2eCLI(t, binDir) res := c.RunDockerCmd("context", "create", "example", "test-example") res.Assert(t, icmd.Success) res = c.RunDockerCmd("--context", "test-example", "images") res.Assert(t, icmd.Expected{ ExitCode: 1, Err: `Command "images" not available in current context (test-example), you can use the "default" context to run this command`, }) } func TestVersion(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("azure version", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("version") res.Assert(t, icmd.Expected{Out: "Azure integration"}) }) t.Run("format", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("version", "-f", "{{ json . }}") res.Assert(t, icmd.Expected{Out: `"Client":`}) res = c.RunDockerCmd("version", "--format", "{{ json . }}") res.Assert(t, icmd.Expected{Out: `"Client":`}) }) t.Run("delegate version flag", func(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "example", "test-example").Assert(t, icmd.Success) c.RunDockerCmd("context", "use", "test-example").Assert(t, icmd.Success) res := c.RunDockerCmd("-v") res.Assert(t, icmd.Expected{Out: "Docker version"}) }) } func TestMockBackend(t *testing.T) { c := NewParallelE2eCLI(t, binDir) c.RunDockerCmd("context", "create", "example", "test-example").Assert(t, icmd.Success) res := c.RunDockerCmd("context", "use", "test-example") res.Assert(t, icmd.Expected{Out: "test-example"}) t.Run("use", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "test-example"}) res = c.RunDockerCmd("context", "ls") golden.Assert(t, res.Stdout(), GoldenFile("ls-out-test-example")) }) t.Run("ps", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("ps") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "ps-out-example.golden") }) t.Run("ps quiet", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("ps", "-q") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "ps-quiet-out-example.golden") }) t.Run("ps quiet all", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("ps", "-q", "--all") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "ps-quiet-all-out-example.golden") }) t.Run("inspect", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("inspect", "id") res.Assert(t, icmd.Success) golden.Assert(t, res.Stdout(), "inspect-id.golden") }) t.Run("run", func(t *testing.T) { t.Parallel() res := c.RunDockerCmd("run", "-d", "nginx", "-p", "80:80") res.Assert(t, icmd.Expected{ Out: `Running container "nginx" with name`, }) }) }
package influxdb import ( "errors" "fmt" "path/filepath" ) var ( // ErrAuthorizerNotSupported notes that the provided authorizer is not supported for the action you are trying to perform. ErrAuthorizerNotSupported = errors.New("your authorizer is not supported, please use *platform.Authorization as authorizer") // ErrInvalidResourceType notes that the provided resource is invalid ErrInvalidResourceType = errors.New("unknown resource type for permission") // ErrInvalidAction notes that the provided action is invalid ErrInvalidAction = errors.New("unknown action for permission") ) // Authorizer will authorize a permission. type Authorizer interface { // Allowed returns true is the associated permission is allowed by the authorizer Allowed(p Permission) bool // ID returns an identifier used for auditing. Identifier() ID // GetUserID returns the user id. GetUserID() ID // Kind metadata for auditing. Kind() string } // PermissionAllowed determines if a permission is allowed. func PermissionAllowed(perm Permission, ps []Permission) bool { for _, p := range ps { if p.Matches(perm) { return true } } return false } // Action is an enum defining all possible resource operations type Action string const ( // ReadAction is the action for reading. ReadAction Action = "read" // 1 // WriteAction is the action for writing. WriteAction Action = "write" // 2 ) var actions = []Action{ ReadAction, // 1 WriteAction, // 2 } // Valid checks if the action is a member of the Action enum func (a Action) Valid() (err error) { switch a { case ReadAction: // 1 case WriteAction: // 2 default: err = ErrInvalidAction } return err } // ResourceType is an enum defining all resource types that have a permission model in platform type ResourceType string // Resource is an authorizable resource. type Resource struct { Type ResourceType `json:"type"` ID *ID `json:"id,omitempty"` OrgID *ID `json:"orgID,omitempty"` } // String stringifies a resource func (r Resource) String() string { if r.OrgID != nil && r.ID != nil { return filepath.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type), r.ID.String()) } if r.OrgID != nil { return filepath.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type)) } if r.ID != nil { return filepath.Join(string(r.Type), r.ID.String()) } return string(r.Type) } const ( // AuthorizationsResourceType gives permissions to one or more authorizations. AuthorizationsResourceType = ResourceType("authorizations") // 0 // BucketsResourceType gives permissions to one or more buckets. BucketsResourceType = ResourceType("buckets") // 1 // DashboardsResourceType gives permissions to one or more dashboards. DashboardsResourceType = ResourceType("dashboards") // 2 // OrgsResourceType gives permissions to one or more orgs. OrgsResourceType = ResourceType("orgs") // 3 // SourcesResourceType gives permissions to one or more sources. SourcesResourceType = ResourceType("sources") // 4 // TasksResourceType gives permissions to one or more tasks. TasksResourceType = ResourceType("tasks") // 5 // TelegrafsResourceType type gives permissions to a one or more telegrafs. TelegrafsResourceType = ResourceType("telegrafs") // 6 // UsersResourceType gives permissions to one or more users. UsersResourceType = ResourceType("users") // 7 // VariablesResourceType gives permission to one or more variables. VariablesResourceType = ResourceType("variables") // 8 // ScraperResourceType gives permission to one or more scrapers. ScraperResourceType = ResourceType("scrapers") // 9 // SecretsResourceType gives permission to one or more secrets. SecretsResourceType = ResourceType("secrets") // 10 // LabelsResourceType gives permission to one or more labels. LabelsResourceType = ResourceType("labels") // 11 // ViewsResourceType gives permission to one or more views. ViewsResourceType = ResourceType("views") // 12 DocumentsResourceType = ResourceType("documents") // 13 ) // AllResourceTypes is the list of all known resource types. var AllResourceTypes = []ResourceType{ AuthorizationsResourceType, // 0 BucketsResourceType, // 1 DashboardsResourceType, // 2 OrgsResourceType, // 3 SourcesResourceType, // 4 TasksResourceType, // 5 TelegrafsResourceType, // 6 UsersResourceType, // 7 VariablesResourceType, // 8 ScraperResourceType, // 9 SecretsResourceType, // 10 LabelsResourceType, // 11 ViewsResourceType, // 12 DocumentsResourceType, // 13 } // OrgResourceTypes is the list of all known resource types that belong to an organization. var OrgResourceTypes = []ResourceType{ BucketsResourceType, // 1 DashboardsResourceType, // 2 SourcesResourceType, // 4 TasksResourceType, // 5 TelegrafsResourceType, // 6 UsersResourceType, // 7 VariablesResourceType, // 8 SecretsResourceType, // 10 DocumentsResourceType, //13 } // Valid checks if the resource type is a member of the ResourceType enum. func (r Resource) Valid() (err error) { return r.Type.Valid() } // Valid checks if the resource type is a member of the ResourceType enum. func (t ResourceType) Valid() (err error) { switch t { case AuthorizationsResourceType: // 0 case BucketsResourceType: // 1 case DashboardsResourceType: // 2 case OrgsResourceType: // 3 case TasksResourceType: // 4 case TelegrafsResourceType: // 5 case SourcesResourceType: // 6 case UsersResourceType: // 7 case VariablesResourceType: // 8 case ScraperResourceType: // 9 case SecretsResourceType: // 10 case LabelsResourceType: // 11 case ViewsResourceType: // 12 case DocumentsResourceType: // 13 default: err = ErrInvalidResourceType } return err } // Permission defines an action and a resource. type Permission struct { Action Action `json:"action"` Resource Resource `json:"resource"` } // Matches returns whether or not one permission matches the other. func (p Permission) Matches(perm Permission) bool { if p.Action != perm.Action { return false } if p.Resource.Type != perm.Resource.Type { return false } if p.Resource.OrgID == nil && p.Resource.ID == nil { return true } if p.Resource.OrgID != nil && p.Resource.ID == nil { pOrgID := *p.Resource.OrgID if perm.Resource.OrgID != nil { permOrgID := *perm.Resource.OrgID if pOrgID == permOrgID { return true } } } if p.Resource.ID != nil { pID := *p.Resource.ID if perm.Resource.ID != nil { permID := *perm.Resource.ID if pID == permID { return true } } } return false } func (p Permission) String() string { return fmt.Sprintf("%s:%s", p.Action, p.Resource) } // Valid checks if there the resource and action provided is known. func (p *Permission) Valid() error { if err := p.Resource.Valid(); err != nil { return &Error{ Code: EInvalid, Err: err, Msg: "invalid resource type for permission", } } if err := p.Action.Valid(); err != nil { return &Error{ Code: EInvalid, Err: err, Msg: "invalid action type for permission", } } if p.Resource.OrgID != nil && !(*p.Resource.OrgID).Valid() { return &Error{ Code: EInvalid, Err: ErrInvalidID, Msg: "invalid org id for permission", } } if p.Resource.ID != nil && !(*p.Resource.ID).Valid() { return &Error{ Code: EInvalid, Err: ErrInvalidID, Msg: "invalid id for permission", } } return nil } // NewPermission returns a permission with provided arguments. func NewPermission(a Action, rt ResourceType, orgID ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ Type: rt, OrgID: &orgID, }, } return p, p.Valid() } // NewGlobalPermission constructs a global permission capable of accessing any resource of type rt. func NewGlobalPermission(a Action, rt ResourceType) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ Type: rt, }, } return p, p.Valid() } // NewPermissionAtID creates a permission with the provided arguments. func NewPermissionAtID(id ID, a Action, rt ResourceType, orgID ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ Type: rt, OrgID: &orgID, ID: &id, }, } return p, p.Valid() } // OperPermissions are the default permissions for those who setup the application. func OperPermissions() []Permission { ps := []Permission{} for _, r := range AllResourceTypes { for _, a := range actions { ps = append(ps, Permission{Action: a, Resource: Resource{Type: r}}) } } return ps } // OwnerPermissions are the default permissions for those who own a resource. func OwnerPermissions(orgID ID) []Permission { ps := []Permission{} for _, r := range AllResourceTypes { for _, a := range actions { if r == OrgsResourceType { ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, ID: &orgID}}) continue } ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, OrgID: &orgID}}) } } // TODO(desa): this is likely just a thing for the alpha. We'll likely want a limited number of users about to // create organizations. https://github.com/influxdata/influxdb/issues/11344 ps = append(ps, Permission{Action: WriteAction, Resource: Resource{Type: OrgsResourceType}}, Permission{ReadAction, Resource{Type: OrgsResourceType}}) return ps } // MePermissions is the permission to read/write myself. func MePermissions(userID ID) []Permission { ps := []Permission{} for _, a := range actions { ps = append(ps, Permission{Action: a, Resource: Resource{Type: UsersResourceType, ID: &userID}}) } return ps } // MemberPermissions are the default permissions for those who can see a resource. func MemberPermissions(orgID ID) []Permission { ps := []Permission{} for _, r := range AllResourceTypes { if r == OrgsResourceType { ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, ID: &orgID}}) continue } ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, OrgID: &orgID}}) } return ps } fix(authz): revert https://github.com/influxdata/influxdb/pull/11441 This will disallow web users from creating organizations until we figure out how. package influxdb import ( "errors" "fmt" "path/filepath" ) var ( // ErrAuthorizerNotSupported notes that the provided authorizer is not supported for the action you are trying to perform. ErrAuthorizerNotSupported = errors.New("your authorizer is not supported, please use *platform.Authorization as authorizer") // ErrInvalidResourceType notes that the provided resource is invalid ErrInvalidResourceType = errors.New("unknown resource type for permission") // ErrInvalidAction notes that the provided action is invalid ErrInvalidAction = errors.New("unknown action for permission") ) // Authorizer will authorize a permission. type Authorizer interface { // Allowed returns true is the associated permission is allowed by the authorizer Allowed(p Permission) bool // ID returns an identifier used for auditing. Identifier() ID // GetUserID returns the user id. GetUserID() ID // Kind metadata for auditing. Kind() string } // PermissionAllowed determines if a permission is allowed. func PermissionAllowed(perm Permission, ps []Permission) bool { for _, p := range ps { if p.Matches(perm) { return true } } return false } // Action is an enum defining all possible resource operations type Action string const ( // ReadAction is the action for reading. ReadAction Action = "read" // 1 // WriteAction is the action for writing. WriteAction Action = "write" // 2 ) var actions = []Action{ ReadAction, // 1 WriteAction, // 2 } // Valid checks if the action is a member of the Action enum func (a Action) Valid() (err error) { switch a { case ReadAction: // 1 case WriteAction: // 2 default: err = ErrInvalidAction } return err } // ResourceType is an enum defining all resource types that have a permission model in platform type ResourceType string // Resource is an authorizable resource. type Resource struct { Type ResourceType `json:"type"` ID *ID `json:"id,omitempty"` OrgID *ID `json:"orgID,omitempty"` } // String stringifies a resource func (r Resource) String() string { if r.OrgID != nil && r.ID != nil { return filepath.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type), r.ID.String()) } if r.OrgID != nil { return filepath.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type)) } if r.ID != nil { return filepath.Join(string(r.Type), r.ID.String()) } return string(r.Type) } const ( // AuthorizationsResourceType gives permissions to one or more authorizations. AuthorizationsResourceType = ResourceType("authorizations") // 0 // BucketsResourceType gives permissions to one or more buckets. BucketsResourceType = ResourceType("buckets") // 1 // DashboardsResourceType gives permissions to one or more dashboards. DashboardsResourceType = ResourceType("dashboards") // 2 // OrgsResourceType gives permissions to one or more orgs. OrgsResourceType = ResourceType("orgs") // 3 // SourcesResourceType gives permissions to one or more sources. SourcesResourceType = ResourceType("sources") // 4 // TasksResourceType gives permissions to one or more tasks. TasksResourceType = ResourceType("tasks") // 5 // TelegrafsResourceType type gives permissions to a one or more telegrafs. TelegrafsResourceType = ResourceType("telegrafs") // 6 // UsersResourceType gives permissions to one or more users. UsersResourceType = ResourceType("users") // 7 // VariablesResourceType gives permission to one or more variables. VariablesResourceType = ResourceType("variables") // 8 // ScraperResourceType gives permission to one or more scrapers. ScraperResourceType = ResourceType("scrapers") // 9 // SecretsResourceType gives permission to one or more secrets. SecretsResourceType = ResourceType("secrets") // 10 // LabelsResourceType gives permission to one or more labels. LabelsResourceType = ResourceType("labels") // 11 // ViewsResourceType gives permission to one or more views. ViewsResourceType = ResourceType("views") // 12 DocumentsResourceType = ResourceType("documents") // 13 ) // AllResourceTypes is the list of all known resource types. var AllResourceTypes = []ResourceType{ AuthorizationsResourceType, // 0 BucketsResourceType, // 1 DashboardsResourceType, // 2 OrgsResourceType, // 3 SourcesResourceType, // 4 TasksResourceType, // 5 TelegrafsResourceType, // 6 UsersResourceType, // 7 VariablesResourceType, // 8 ScraperResourceType, // 9 SecretsResourceType, // 10 LabelsResourceType, // 11 ViewsResourceType, // 12 DocumentsResourceType, // 13 } // OrgResourceTypes is the list of all known resource types that belong to an organization. var OrgResourceTypes = []ResourceType{ BucketsResourceType, // 1 DashboardsResourceType, // 2 SourcesResourceType, // 4 TasksResourceType, // 5 TelegrafsResourceType, // 6 UsersResourceType, // 7 VariablesResourceType, // 8 SecretsResourceType, // 10 DocumentsResourceType, //13 } // Valid checks if the resource type is a member of the ResourceType enum. func (r Resource) Valid() (err error) { return r.Type.Valid() } // Valid checks if the resource type is a member of the ResourceType enum. func (t ResourceType) Valid() (err error) { switch t { case AuthorizationsResourceType: // 0 case BucketsResourceType: // 1 case DashboardsResourceType: // 2 case OrgsResourceType: // 3 case TasksResourceType: // 4 case TelegrafsResourceType: // 5 case SourcesResourceType: // 6 case UsersResourceType: // 7 case VariablesResourceType: // 8 case ScraperResourceType: // 9 case SecretsResourceType: // 10 case LabelsResourceType: // 11 case ViewsResourceType: // 12 case DocumentsResourceType: // 13 default: err = ErrInvalidResourceType } return err } // Permission defines an action and a resource. type Permission struct { Action Action `json:"action"` Resource Resource `json:"resource"` } // Matches returns whether or not one permission matches the other. func (p Permission) Matches(perm Permission) bool { if p.Action != perm.Action { return false } if p.Resource.Type != perm.Resource.Type { return false } if p.Resource.OrgID == nil && p.Resource.ID == nil { return true } if p.Resource.OrgID != nil && p.Resource.ID == nil { pOrgID := *p.Resource.OrgID if perm.Resource.OrgID != nil { permOrgID := *perm.Resource.OrgID if pOrgID == permOrgID { return true } } } if p.Resource.ID != nil { pID := *p.Resource.ID if perm.Resource.ID != nil { permID := *perm.Resource.ID if pID == permID { return true } } } return false } func (p Permission) String() string { return fmt.Sprintf("%s:%s", p.Action, p.Resource) } // Valid checks if there the resource and action provided is known. func (p *Permission) Valid() error { if err := p.Resource.Valid(); err != nil { return &Error{ Code: EInvalid, Err: err, Msg: "invalid resource type for permission", } } if err := p.Action.Valid(); err != nil { return &Error{ Code: EInvalid, Err: err, Msg: "invalid action type for permission", } } if p.Resource.OrgID != nil && !(*p.Resource.OrgID).Valid() { return &Error{ Code: EInvalid, Err: ErrInvalidID, Msg: "invalid org id for permission", } } if p.Resource.ID != nil && !(*p.Resource.ID).Valid() { return &Error{ Code: EInvalid, Err: ErrInvalidID, Msg: "invalid id for permission", } } return nil } // NewPermission returns a permission with provided arguments. func NewPermission(a Action, rt ResourceType, orgID ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ Type: rt, OrgID: &orgID, }, } return p, p.Valid() } // NewGlobalPermission constructs a global permission capable of accessing any resource of type rt. func NewGlobalPermission(a Action, rt ResourceType) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ Type: rt, }, } return p, p.Valid() } // NewPermissionAtID creates a permission with the provided arguments. func NewPermissionAtID(id ID, a Action, rt ResourceType, orgID ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ Type: rt, OrgID: &orgID, ID: &id, }, } return p, p.Valid() } // OperPermissions are the default permissions for those who setup the application. func OperPermissions() []Permission { ps := []Permission{} for _, r := range AllResourceTypes { for _, a := range actions { ps = append(ps, Permission{Action: a, Resource: Resource{Type: r}}) } } return ps } // OwnerPermissions are the default permissions for those who own a resource. func OwnerPermissions(orgID ID) []Permission { ps := []Permission{} for _, r := range AllResourceTypes { for _, a := range actions { if r == OrgsResourceType { ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, ID: &orgID}}) continue } ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, OrgID: &orgID}}) } } return ps } // MePermissions is the permission to read/write myself. func MePermissions(userID ID) []Permission { ps := []Permission{} for _, a := range actions { ps = append(ps, Permission{Action: a, Resource: Resource{Type: UsersResourceType, ID: &userID}}) } return ps } // MemberPermissions are the default permissions for those who can see a resource. func MemberPermissions(orgID ID) []Permission { ps := []Permission{} for _, r := range AllResourceTypes { if r == OrgsResourceType { ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, ID: &orgID}}) continue } ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, OrgID: &orgID}}) } return ps }
package storage import ( "io" "time" "github.com/lxc/lxd/lxd/backup" "github.com/lxc/lxd/lxd/cluster/request" "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/migration" "github.com/lxc/lxd/lxd/operations" "github.com/lxc/lxd/lxd/revert" "github.com/lxc/lxd/lxd/storage/drivers" "github.com/lxc/lxd/shared/api" "github.com/lxc/lxd/shared/instancewriter" ) // MountInfo represents info about the result of a mount operation. type MountInfo struct { DiskPath string // The location of the block disk (if supported). } // Pool represents a LXD storage pool. type Pool interface { // Pool. ID() int64 Name() string Driver() drivers.Driver Description() string Status() string LocalStatus() string ToAPI() api.StoragePool GetResources() (*api.ResourcesStoragePool, error) IsUsed() (bool, error) Delete(clientType request.ClientType, op *operations.Operation) error Update(clientType request.ClientType, newDesc string, newConfig map[string]string, op *operations.Operation) error Create(clientType request.ClientType, op *operations.Operation) error Mount() (bool, error) Unmount() (bool, error) ApplyPatch(name string) error GetVolume(volumeType drivers.VolumeType, contentType drivers.ContentType, name string, config map[string]string) drivers.Volume // Instances. FillInstanceConfig(inst instance.Instance, config map[string]string) error CreateInstance(inst instance.Instance, op *operations.Operation) error CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, allowInconsistent bool, op *operations.Operation) error CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error DeleteInstance(inst instance.Instance, op *operations.Operation) error UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) ImportInstance(inst instance.Instance, op *operations.Operation) error MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, allowInconsistent bool, op *operations.Operation) error BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error GetInstanceUsage(inst instance.Instance) (int64, error) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) UnmountInstance(inst instance.Instance, op *operations.Operation) (bool, error) // Instance snapshots. CreateInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (bool, error) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error // Images. EnsureImage(fingerprint string, op *operations.Operation) error DeleteImage(fingerprint string, op *operations.Operation) error UpdateImage(fingerprint string, newDesc string, newConfig map[string]string, op *operations.Operation) error // Custom volumes. CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error RenameCustomVolume(projectName string, volName string, newVolName string, op *operations.Operation) error DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error GetCustomVolumeDisk(projectName string, volName string) (string, error) GetCustomVolumeUsage(projectName string, volName string) (int64, error) MountCustomVolume(projectName string, volName string, op *operations.Operation) error UnmountCustomVolume(projectName string, volName string, op *operations.Operation) (bool, error) ImportCustomVolume(projectName string, poolVol backup.Config, op *operations.Operation) error RefreshCustomVolume(projectName string, srcProjectName string, volName, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error // Custom volume snapshots. CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, newExpiryDate time.Time, op *operations.Operation) error RenameCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, op *operations.Operation) error DeleteCustomVolumeSnapshot(projectName string, volName string, op *operations.Operation) error UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, newExpiryDate time.Time, op *operations.Operation) error RestoreCustomVolume(projectName string, volName string, snapshotName string, op *operations.Operation) error // Custom volume migration. MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error // Custom volume backups. BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error // Storage volume recovery. ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) } lxd/storage/pool/interface: Rename srcVolOnly to snapshots for RefreshCustomVolume and CreateCustomVolumeFromCopy Aligns with their instance equivalent functions. Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com> package storage import ( "io" "time" "github.com/lxc/lxd/lxd/backup" "github.com/lxc/lxd/lxd/cluster/request" "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/migration" "github.com/lxc/lxd/lxd/operations" "github.com/lxc/lxd/lxd/revert" "github.com/lxc/lxd/lxd/storage/drivers" "github.com/lxc/lxd/shared/api" "github.com/lxc/lxd/shared/instancewriter" ) // MountInfo represents info about the result of a mount operation. type MountInfo struct { DiskPath string // The location of the block disk (if supported). } // Pool represents a LXD storage pool. type Pool interface { // Pool. ID() int64 Name() string Driver() drivers.Driver Description() string Status() string LocalStatus() string ToAPI() api.StoragePool GetResources() (*api.ResourcesStoragePool, error) IsUsed() (bool, error) Delete(clientType request.ClientType, op *operations.Operation) error Update(clientType request.ClientType, newDesc string, newConfig map[string]string, op *operations.Operation) error Create(clientType request.ClientType, op *operations.Operation) error Mount() (bool, error) Unmount() (bool, error) ApplyPatch(name string) error GetVolume(volumeType drivers.VolumeType, contentType drivers.ContentType, name string, config map[string]string) drivers.Volume // Instances. FillInstanceConfig(inst instance.Instance, config map[string]string) error CreateInstance(inst instance.Instance, op *operations.Operation) error CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, allowInconsistent bool, op *operations.Operation) error CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error DeleteInstance(inst instance.Instance, op *operations.Operation) error UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) ImportInstance(inst instance.Instance, op *operations.Operation) error MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, allowInconsistent bool, op *operations.Operation) error BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error GetInstanceUsage(inst instance.Instance) (int64, error) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) UnmountInstance(inst instance.Instance, op *operations.Operation) (bool, error) // Instance snapshots. CreateInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (bool, error) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error // Images. EnsureImage(fingerprint string, op *operations.Operation) error DeleteImage(fingerprint string, op *operations.Operation) error UpdateImage(fingerprint string, newDesc string, newConfig map[string]string, op *operations.Operation) error // Custom volumes. CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName, desc string, config map[string]string, srcPoolName, srcVolName string, snapshots bool, op *operations.Operation) error UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error RenameCustomVolume(projectName string, volName string, newVolName string, op *operations.Operation) error DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error GetCustomVolumeDisk(projectName string, volName string) (string, error) GetCustomVolumeUsage(projectName string, volName string) (int64, error) MountCustomVolume(projectName string, volName string, op *operations.Operation) error UnmountCustomVolume(projectName string, volName string, op *operations.Operation) (bool, error) ImportCustomVolume(projectName string, poolVol backup.Config, op *operations.Operation) error RefreshCustomVolume(projectName string, srcProjectName string, volName, desc string, config map[string]string, srcPoolName, srcVolName string, snapshots bool, op *operations.Operation) error // Custom volume snapshots. CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, newExpiryDate time.Time, op *operations.Operation) error RenameCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, op *operations.Operation) error DeleteCustomVolumeSnapshot(projectName string, volName string, op *operations.Operation) error UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, newExpiryDate time.Time, op *operations.Operation) error RestoreCustomVolume(projectName string, volName string, snapshotName string, op *operations.Operation) error // Custom volume migration. MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error // Custom volume backups. BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error // Storage volume recovery. ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) }
// Copyright (c) 2014-2015 Solano Labs Inc. All Rights Reserved. package main import ( "bytes" "crypto/sha1" "encoding/hex" "fmt" "go/parser" "go/token" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "strings" "syscall" "gopkg.in/yaml.v2" ) const ( BEGOTTEN = "Begotten" BEGOTTEN_LOCK = "Begotten.lock" EMPTY_DEP = "_begot_empty_dep" IMPLICIT_PREFIX = "_begot_implicit" // This is an identifier for the version of begot. It gets written into // Begotten.lock. //CODE_VERSION = 'begot-1.0-' + hashlib.sha1(open(__file__).read()).hexdigest()[:8] CODE_VERSION = "FIXME" // This should change if the format of Begotten.lock changes in an incompatible // way. (But prefer changing it in compatible ways and not incrementing this.) FILE_VERSION = 1 ) // Known public servers and how many path components form the repo name. var KNOWN_GIT_SERVERS = map[string]int{ "github.com": 2, "bitbucket.org": 2, "begot.test": 2, } var RE_NON_IDENTIFIER_CHAR = regexp.MustCompile("\\W") func replace_non_identifier_chars(in string) string { return RE_NON_IDENTIFIER_CHAR.ReplaceAllLiteralString(in, "_") } func Command(cwd string, name string, args ...string) (cmd *exec.Cmd) { cmd = exec.Command(name, args...) cmd.Dir = cwd return } func cc(cwd string, name string, args ...string) { //fmt.Println("+", "in", filepath.Base(cwd), ":", name, args) cmd := Command(cwd, name, args...) if err := cmd.Run(); err != nil { panic(err) } } func co(cwd string, name string, args ...string) string { //fmt.Println("+", "in", filepath.Base(cwd), ":", name, args) cmd := Command(cwd, name, args...) if outb, err := cmd.Output(); err != nil { panic(err) } else { return string(outb) } } func contains_str(lst []string, val string) bool { for _, item := range lst { if item == val { return true } } return false } func sha1str(in string) string { sum := sha1.Sum([]byte(in)) return hex.EncodeToString(sum[:]) } func sha1bts(in []byte) string { sum := sha1.Sum(in) return hex.EncodeToString(sum[:]) } func realpath(path string) (out string) { if abs, err := filepath.Abs(path); err != nil { panic(err) } else if out, err = filepath.EvalSymlinks(abs); err != nil { panic(err) } return } func ln_sf(target, path string) (created bool, err error) { current, e := os.Readlink(path) if e != nil || current != target { if err = os.RemoveAll(path); err != nil { return } if err = os.MkdirAll(filepath.Dir(path), 0777); err != nil { return } if err = os.Symlink(target, path); err != nil { return } created = true } return } func yaml_copy(in interface{}, out interface{}) { if bts, err := yaml.Marshal(in); err != nil { panic(err) } else if err = yaml.Unmarshal(bts, out); err != nil { panic(err) } } type Dep struct { name string Import_path string Git_url string Subpath string Ref string Aliases []string } // A Begotten or Begotten.lock file contains exactly one of these in YAML format. type BegottenFileStruct struct { Meta struct { File_version int Generated_by string } Deps map[string]interface{} // either string or Dep Repo_aliases map[string]interface{} // either string or subset of Dep {git_url, ref} Repo_deps map[string][]string } type BegottenFile struct { data BegottenFileStruct } func BegottenFileNew(fn string) (bf *BegottenFile) { bf = new(BegottenFile) bf.data.Meta.File_version = -1 if data, err := ioutil.ReadFile(fn); err != nil { panic(err) } else if err := yaml.Unmarshal(data, &bf.data); err != nil { panic(err) } ver := bf.data.Meta.File_version if ver != -1 && ver != FILE_VERSION { panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver)) } return } func (bf *BegottenFile) save(fn string) { bf.data.Meta.File_version = FILE_VERSION bf.data.Meta.Generated_by = CODE_VERSION if data, err := yaml.Marshal(bf.data); err != nil { panic(err) } else if err := ioutil.WriteFile(fn, data, 0666); err != nil { panic(err) } } func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string { // Hook for testing: test_repo_path := os.Getenv("BEGOT_TEST_REPOS") if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" { return "file://" + filepath.Join(test_repo_path, repo_path) } // Default to https for other repos: return "https://" + repo_path } func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) { dep.name = name if _, ok := v.(string); ok { v = map[interface{}]interface{}{"import_path": v} } mv, ok := v.(map[interface{}]interface{}) if !ok { panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v)) } yaml_copy(mv, &dep) if dep.Import_path != "" { parts := strings.Split(dep.Import_path, "/") if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok { panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name)) } else { repo_path := strings.Join(parts[:repo_parts+1], "/") dep.Git_url = bf.default_git_url_from_repo_path(repo_path) dep.Subpath = strings.Join(parts[repo_parts+1:], "/") dep.Aliases = append(dep.Aliases, dep.Import_path) // Redirect through repo aliases: if alias, ok := bf.data.Repo_aliases[repo_path]; ok { var aliasdep Dep // only allow git_url and ref if aliasstr, ok := alias.(string); ok { aliasstr = bf.default_git_url_from_repo_path(aliasstr) alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}} } yaml_copy(alias, &aliasdep) if aliasdep.Git_url != "" { dep.Git_url = aliasdep.Git_url } if aliasdep.Ref != "" { dep.Ref = aliasdep.Ref } } } } if dep.Git_url == "" { panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name)) } if dep.Ref == "" { dep.Ref = "master" } return } func (bf *BegottenFile) deps() (out []Dep) { out = make([]Dep, len(bf.data.Deps)) i := 0 for name, v := range bf.data.Deps { out[i] = bf.parse_dep(name, v) i++ } return } func (bf *BegottenFile) set_deps(deps []Dep) { bf.data.Deps = make(map[string]interface{}) for _, dep := range deps { bf.data.Deps[dep.name] = dep } } func (bf *BegottenFile) repo_deps() map[string][]string { if bf.data.Repo_deps == nil { bf.data.Repo_deps = make(map[string][]string) } return bf.data.Repo_deps } func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) { bf.data.Repo_deps = repo_deps } type Env struct { Home string BegotCache string DepWorkspaceDir string CodeWorkspaceDir string RepoDir string CacheLock string } func EnvNew() (env *Env) { env = new(Env) env.Home = os.Getenv("HOME") env.BegotCache = os.Getenv("BEGOT_CACHE") if env.BegotCache == "" { env.BegotCache = filepath.Join(env.Home, ".cache", "begot") } env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk") env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk") env.RepoDir = filepath.Join(env.BegotCache, "repo") env.CacheLock = filepath.Join(env.BegotCache, "lock") return } type Builder struct { env *Env code_root string code_wk string dep_wk string bf *BegottenFile deps []Dep repo_deps map[string][]string processing_repo string cached_lf_hash string } func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) { b = new(Builder) b.env = env b.code_root = realpath(code_root) hsh := sha1str(b.code_root)[:8] b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh) b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh) var fn string if use_lockfile { fn = filepath.Join(b.code_root, BEGOTTEN_LOCK) } else { fn = filepath.Join(b.code_root, BEGOTTEN) } b.bf = BegottenFileNew(fn) b.deps = b.bf.deps() b.repo_deps = b.bf.repo_deps() return } func (b *Builder) _all_repos() (out map[string]string) { out = make(map[string]string) for _, dep := range b.deps { out[dep.Git_url] = dep.Ref } return } func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) { out = make(map[string]string) if len(limits) == 0 { return } defer func() { if err := recover(); err != nil { panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK)) } }() bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK)) lock_deps := bf_lock.deps() lock_repo_deps := bf_lock.repo_deps() match := func(name string) bool { for _, limit := range limits { if matched, err := filepath.Match(limit, name); err != nil { panic(err) } else if matched { return true } } return false } repos_to_update := make(map[string]bool) for _, dep := range lock_deps { if match(dep.name) { repos_to_update[dep.Git_url] = true } } // transitive closure n := -1 for len(repos_to_update) != n { n = len(repos_to_update) repos := make([]string, 0, len(repos_to_update)) for repo, _ := range repos_to_update { repos = append(repos, repo) } for _, repo := range repos { if deps, ok := lock_repo_deps[repo]; ok { for _, dep := range deps { repos_to_update[dep] = true } } } } for _, dep := range lock_deps { if !repos_to_update[dep.Git_url] { out[dep.Git_url] = dep.Ref } } return } func (b *Builder) setup_repos(fetch bool, limits []string) *Builder { processed_deps := 0 repo_versions := make(map[string]string) var fetched_set map[string]bool if fetch { fetched_set = make(map[string]bool) } locked_refs := b.get_locked_refs_for_update(limits) for processed_deps < len(b.deps) { repos_to_setup := []string{} for i, dep := range b.deps[processed_deps:] { have := repo_versions[dep.Git_url] if fetch && strings.HasPrefix(dep.name, IMPLICIT_PREFIX) && have != "" { // Implicit deps take the revision of an explicit dep from the same // repo, if one exists. b.deps[processed_deps+i].Ref = have continue } want := locked_refs[dep.Git_url] if want == "" { want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set) } if have != "" { if have != want { panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)", dep.name, have, want, dep.Ref)) } } else { repo_versions[dep.Git_url] = want repos_to_setup = append(repos_to_setup, dep.Git_url) } b.deps[processed_deps+i].Ref = want } processed_deps = len(b.deps) // This will add newly-found dependencies to b.deps. for _, url := range repos_to_setup { b._setup_repo(url, repo_versions[url]) } } return b } func (b *Builder) save_lockfile() *Builder { // Should only be called when loaded from Begotten, not lockfile. b.bf.set_deps(b.deps) b.bf.set_repo_deps(b.repo_deps) b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK)) return b } func (b *Builder) _add_implicit_dep(name string, v interface{}) (dep Dep) { dep = b.bf.parse_dep(name, v) //FIXME append: b.deps = b.deps.append(dep) b.deps = append(b.deps, dep) return } func (b *Builder) _record_repo_dep(git_url string) { if b.processing_repo != git_url { lst := b.repo_deps[b.processing_repo] if !contains_str(lst, git_url) { b.repo_deps[b.processing_repo] = append(lst, git_url) } } } func (b *Builder) _repo_dir(url string) string { return filepath.Join(b.env.RepoDir, sha1str(url)) } func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) { repo_dir := b._repo_dir(url) if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() { fmt.Printf("Cloning %s\n", url) cc("/", "git", "clone", "-q", url, repo_dir) // Get into detached head state so we can manipulate things without // worrying about messing up a branch. cc(repo_dir, "git", "checkout", "-q", "--detach") } else if fetched_set != nil { if !fetched_set[url] { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch") fetched_set[url] = true } } for _, pfx := range []string{"origin/", ""} { cmd := Command(repo_dir, "git", "rev-parse", "--verify", pfx+ref) cmd.Stderr = nil if outb, err := cmd.Output(); err == nil { resolved_ref = strings.TrimSpace(string(outb)) return } } panic(fmt.Errorf("Can't resolve reference %q for %s", ref, url)) } func (b *Builder) _setup_repo(url, resolved_ref string) { b.processing_repo = url hsh := sha1str(url)[:8] repo_dir := b._repo_dir(url) fmt.Printf("Fixing imports in %s\n", url) // TODO: can this ever fail (if we made it here)? if so, need to fall back // to fetch first. cc(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) //try: // cc(['git', 'reset', '-q', '--hard', resolved_ref], cwd=repo_dir) //except subprocess.CalledProcessError: // print "Missing local ref %r, updating" % resolved_ref // cc(['git', 'fetch', '-q'], cwd=repo_dir) // cc(['git', 'reset', '-q', '--hard', resolved_ref], cwd=repo_dir) // Match up sub-deps to our deps. sub_dep_map := make(map[string]string) self_deps := []Dep{} sub_bg_path := filepath.Join(repo_dir, BEGOTTEN_LOCK) if _, err := os.Stat(sub_bg_path); err == nil { sub_bg := BegottenFileNew(sub_bg_path) // Add implicit and explicit external dependencies. for _, sub_dep := range sub_bg.deps() { b._record_repo_dep(sub_dep.Git_url) our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath) if our_dep != nil { if sub_dep.Ref != our_dep.Ref { panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s", url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref)) } sub_dep_map[sub_dep.name] = our_dep.name } else { // Include a hash of this repo identifier so that if two repos use the // same dep name to refer to two different things, they don't conflict // when we flatten deps. transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name) sub_dep_map[sub_dep.name] = transitive_name sub_dep.name = transitive_name // FIXME append: b.deps.append(sub_dep) b.deps = append(b.deps, sub_dep) } } // Allow relative import paths within this repo. e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { basename := filepath.Base(path) if err != nil { return err } else if fi.IsDir() && basename[0] == '.' { return filepath.SkipDir } else if path == repo_dir { return nil } relpath := path[len(repo_dir)+1:] our_dep := b._lookup_dep_by_git_url_and_path(url, relpath) if our_dep != nil { sub_dep_map[relpath] = our_dep.name } else { // See comment on _lookup_dep_name for rationale. self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath)) sub_dep_map[relpath] = self_name self_deps = append(self_deps, Dep{ name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref}) } return nil }) if e != nil { panic(e) } } used_rewrites := make(map[string]bool) b._rewrite_imports(repo_dir, &sub_dep_map, &used_rewrites) msg := fmt.Sprintf("rewritten by begot for %s", b.code_root) cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg) // Add only the self-deps that were used, to reduce clutter. for _, self_dep := range self_deps { if used_rewrites[self_dep.name] { //FIXME append: b.deps.append(self_dep) b.deps = append(b.deps, self_dep) } } } func (b *Builder) _rewrite_imports(repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if strings.HasSuffix(path, ".go") { b._rewrite_file(path, sub_dep_map, used_rewrites) } return nil }) } func (b *Builder) _rewrite_file(path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { bts, err := ioutil.ReadFile(path) if err != nil { panic(err) } fs := token.NewFileSet() f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly) if err != nil { panic(err) } var pos int var out bytes.Buffer out.Grow(len(bts) * 5 / 4) for _, imp := range f.Imports { start := fs.Position(imp.Path.Pos()).Offset end := fs.Position(imp.Path.End()).Offset orig_import := string(bts[start+1 : end-1]) rewritten := b._rewrite_import(orig_import, sub_dep_map, used_rewrites) if orig_import != rewritten { out.Write(bts[pos : start+1]) out.WriteString(rewritten) pos = end - 1 } } out.Write(bts[pos:]) if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil { panic(err) } } func (b *Builder) _rewrite_import(imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string { if rewrite, ok := (*sub_dep_map)[imp]; ok { imp = rewrite (*used_rewrites)[rewrite] = true } else { parts := strings.Split(imp, "/") if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok { imp = b._lookup_dep_name(imp) } } return imp } func (b *Builder) _lookup_dep_name(imp string) string { for _, dep := range b.deps { if contains_str(dep.Aliases, imp) { b._record_repo_dep(dep.Git_url) return dep.name } } // Each dep turns into a symlink at build time. Packages can be nested, so we // might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't // also create 'a/b'. So rename it to 'a_b'. name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp) dep := b._add_implicit_dep(name, imp) b._record_repo_dep(dep.Git_url) return name } func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep { for _, dep := range b.deps { if dep.Git_url == git_url && dep.Subpath == subpath { return &dep } } return nil } func (b *Builder) tag_repos() { // Run this after setup_repos. for url, ref := range b._all_repos() { out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref)) for _, line := range strings.SplitAfter(out, "\n") { if !strings.HasPrefix(line, "Updated tag ") { fmt.Print(line) } } } } func (b *Builder) _tag_hash(ref string) string { // We want to tag the current state with a name that depends on: // 1. The base ref that we rewrote from. // 2. The full set of deps that describe how we rewrote imports. // The contents of Begotten.lock suffice for (2): if b.cached_lf_hash == "" { lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK) if bts, err := ioutil.ReadFile(lockfile); err != nil { panic(err) } else { b.cached_lf_hash = sha1bts(bts) } } return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash) } func (b *Builder) run(args []string) { b._reset_to_tags() // Set up code_wk. cbin := filepath.Join(b.code_wk, "bin") depsrc := filepath.Join(b.dep_wk, "src") empty_dep := filepath.Join(depsrc, EMPTY_DEP) os.MkdirAll(filepath.Join(cbin, empty_dep), 0777) if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil { panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " + "Please remove it before using begot.")) } ln_sf(b.code_root, filepath.Join(b.code_wk, "src")) old_links := make(map[string]bool) filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Mode()&os.ModeType == os.ModeSymlink { old_links[path] = true } return nil }) for _, dep := range b.deps { path := filepath.Join(depsrc, dep.name) target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath) if created, err := ln_sf(target, path); err != nil { panic(err) } else if created { // If we've created or changed this symlink, any pkg files that go may // have compiled from it should be invalidated. // Note: This makes some assumptions about go's build layout. It should // be safe enough, though it may be simpler to just blow away everything // if any dep symlinks change. pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*")) for _, pkg := range pkgs { os.RemoveAll(pkg) } } delete(old_links, path) } // Remove unexpected links. for old_link := range old_links { os.RemoveAll(old_link) } // Try to remove all directories; ignore ENOTEMPTY errors. var dirs []string filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.IsDir() { dirs = append(dirs, path) } return nil }) for i := len(dirs) - 1; i >= 0; i-- { if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY { panic(err) } } // Set up empty dep. // // The go tool tries to be helpful by not rebuilding modified code if that // code is in a workspace and no packages from that workspace are mentioned // on the command line. See cmd/go/pkg.go:isStale around line 680. // // We are explicitly managing all of the workspaces in our GOPATH and do // indeed want to rebuild everything when dependencies change. That is // required by the goal of reproducible builds: the alternative would mean // what you get for this build depends on the state of a previous build. // // The go tool doesn't provide any way of disabling this "helpful" // functionality. The simplest workaround is to always mention a package from // the dependency workspace on the command line. Hence, we add an empty // package. empty_go := filepath.Join(empty_dep, "empty.go") if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() { os.MkdirAll(filepath.Dir(empty_go), 0777) if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil { panic(err) } } // Overwrite any existing GOPATH. if argv0, err := exec.LookPath(args[0]); err != nil { panic(err) } else { os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk)) os.Chdir(b.code_root) err := syscall.Exec(argv0, args, os.Environ()) panic(fmt.Errorf("exec failed: %s", err)) } } func (b *Builder) _reset_to_tags() { defer func() { if recover() != nil { panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " + "Please run 'begot fetch' first.")) } }() for url, ref := range b._all_repos() { wd := b._repo_dir(url) if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() { panic("not directory") } cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref)) } } func (b *Builder) clean() { os.RemoveAll(b.dep_wk) os.RemoveAll(b.code_wk) os.Remove(filepath.Join(b.code_root, "bin")) } func get_gopath(env *Env) string { // This duplicates logic in Builder, but we want to just get the GOPATH without // parsing anything. for { if _, err := os.Stat(BEGOTTEN); err == nil { break } if wd, err := os.Getwd(); err != nil { panic(err) } else if wd == "/" { panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN)) } if err := os.Chdir(".."); err != nil { panic(err) } } hsh := sha1str(realpath("."))[:8] code_wk := filepath.Join(env.CodeWorkspaceDir, hsh) dep_wk := filepath.Join(env.DepWorkspaceDir, hsh) return code_wk + ":" + dep_wk } var _cache_lock *os.File func lock_cache(env *Env) { os.MkdirAll(env.BegotCache, 0777) _cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666) if err != nil { panic(err) } err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err != nil { panic(fmt.Errorf("Can't lock %r", env.BegotCache)) } // Leave file open for lifetime of this process and anything exec'd by this // process. } func print_help(ret int) { fmt.Fprintln(os.Stderr, "FIXME") os.Exit(ret) } func main() { env := EnvNew() defer func() { if err := recover(); err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } }() lock_cache(env) if len(os.Args) < 2 { print_help(1) } switch os.Args[1] { case "update": BuilderNew(env, ".", false).setup_repos(true, os.Args[2:]).save_lockfile().tag_repos() case "just_rewrite": BuilderNew(env, ".", false).setup_repos(false, []string{}).save_lockfile().tag_repos() case "fetch": BuilderNew(env, ".", true).setup_repos(false, []string{}).tag_repos() case "build": BuilderNew(env, ".", true).run([]string{"go", "install", "./...", EMPTY_DEP}) case "go": BuilderNew(env, ".", true).run(append([]string{"go"}, os.Args[2:]...)) case "exec": BuilderNew(env, ".", true).run(os.Args[2:]) case "clean": BuilderNew(env, ".", false).clean() case "gopath": fmt.Println(get_gopath(env)) case "help": print_help(0) default: fmt.Fprintf(os.Stderr, "Unknown subcommand %q\n", os.Args[1]) print_help(1) } } cleanups: get rid of global processing_repo, pass in args. inline add_implicit_dep. // Copyright (c) 2014-2015 Solano Labs Inc. All Rights Reserved. package main import ( "bytes" "crypto/sha1" "encoding/hex" "fmt" "go/parser" "go/token" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "strings" "syscall" "gopkg.in/yaml.v2" ) const ( BEGOTTEN = "Begotten" BEGOTTEN_LOCK = "Begotten.lock" EMPTY_DEP = "_begot_empty_dep" IMPLICIT_PREFIX = "_begot_implicit" // This is an identifier for the version of begot. It gets written into // Begotten.lock. //CODE_VERSION = 'begot-1.0-' + hashlib.sha1(open(__file__).read()).hexdigest()[:8] CODE_VERSION = "FIXME" // This should change if the format of Begotten.lock changes in an incompatible // way. (But prefer changing it in compatible ways and not incrementing this.) FILE_VERSION = 1 ) // Known public servers and how many path components form the repo name. var KNOWN_GIT_SERVERS = map[string]int{ "github.com": 2, "bitbucket.org": 2, "begot.test": 2, } var RE_NON_IDENTIFIER_CHAR = regexp.MustCompile("\\W") func replace_non_identifier_chars(in string) string { return RE_NON_IDENTIFIER_CHAR.ReplaceAllLiteralString(in, "_") } func Command(cwd string, name string, args ...string) (cmd *exec.Cmd) { cmd = exec.Command(name, args...) cmd.Dir = cwd return } func cc(cwd string, name string, args ...string) { //fmt.Println("+", "in", filepath.Base(cwd), ":", name, args) cmd := Command(cwd, name, args...) if err := cmd.Run(); err != nil { panic(err) } } func co(cwd string, name string, args ...string) string { //fmt.Println("+", "in", filepath.Base(cwd), ":", name, args) cmd := Command(cwd, name, args...) if outb, err := cmd.Output(); err != nil { panic(err) } else { return string(outb) } } func contains_str(lst []string, val string) bool { for _, item := range lst { if item == val { return true } } return false } func sha1str(in string) string { sum := sha1.Sum([]byte(in)) return hex.EncodeToString(sum[:]) } func sha1bts(in []byte) string { sum := sha1.Sum(in) return hex.EncodeToString(sum[:]) } func realpath(path string) (out string) { if abs, err := filepath.Abs(path); err != nil { panic(err) } else if out, err = filepath.EvalSymlinks(abs); err != nil { panic(err) } return } func ln_sf(target, path string) (created bool, err error) { current, e := os.Readlink(path) if e != nil || current != target { if err = os.RemoveAll(path); err != nil { return } if err = os.MkdirAll(filepath.Dir(path), 0777); err != nil { return } if err = os.Symlink(target, path); err != nil { return } created = true } return } func yaml_copy(in interface{}, out interface{}) { if bts, err := yaml.Marshal(in); err != nil { panic(err) } else if err = yaml.Unmarshal(bts, out); err != nil { panic(err) } } type Dep struct { name string Import_path string Git_url string Subpath string Ref string Aliases []string } // A Begotten or Begotten.lock file contains exactly one of these in YAML format. type BegottenFileStruct struct { Meta struct { File_version int Generated_by string } Deps map[string]interface{} // either string or Dep Repo_aliases map[string]interface{} // either string or subset of Dep {git_url, ref} Repo_deps map[string][]string } type BegottenFile struct { data BegottenFileStruct } func BegottenFileNew(fn string) (bf *BegottenFile) { bf = new(BegottenFile) bf.data.Meta.File_version = -1 if data, err := ioutil.ReadFile(fn); err != nil { panic(err) } else if err := yaml.Unmarshal(data, &bf.data); err != nil { panic(err) } ver := bf.data.Meta.File_version if ver != -1 && ver != FILE_VERSION { panic(fmt.Errorf("Incompatible file version for %r; please run 'begot update'.", ver)) } return } func (bf *BegottenFile) save(fn string) { bf.data.Meta.File_version = FILE_VERSION bf.data.Meta.Generated_by = CODE_VERSION if data, err := yaml.Marshal(bf.data); err != nil { panic(err) } else if err := ioutil.WriteFile(fn, data, 0666); err != nil { panic(err) } } func (bf *BegottenFile) default_git_url_from_repo_path(repo_path string) string { // Hook for testing: test_repo_path := os.Getenv("BEGOT_TEST_REPOS") if strings.HasPrefix(repo_path, "begot.test/") && test_repo_path != "" { return "file://" + filepath.Join(test_repo_path, repo_path) } // Default to https for other repos: return "https://" + repo_path } func (bf *BegottenFile) parse_dep(name string, v interface{}) (dep Dep) { dep.name = name if _, ok := v.(string); ok { v = map[interface{}]interface{}{"import_path": v} } mv, ok := v.(map[interface{}]interface{}) if !ok { panic(fmt.Errorf("Dependency value must be string or dict, got %T: %v", v, v)) } yaml_copy(mv, &dep) if dep.Import_path != "" { parts := strings.Split(dep.Import_path, "/") if repo_parts, ok := KNOWN_GIT_SERVERS[parts[0]]; !ok { panic(fmt.Errorf("Unknown git server %r for %r", parts[0], name)) } else { repo_path := strings.Join(parts[:repo_parts+1], "/") dep.Git_url = bf.default_git_url_from_repo_path(repo_path) dep.Subpath = strings.Join(parts[repo_parts+1:], "/") dep.Aliases = append(dep.Aliases, dep.Import_path) // Redirect through repo aliases: if alias, ok := bf.data.Repo_aliases[repo_path]; ok { var aliasdep Dep // only allow git_url and ref if aliasstr, ok := alias.(string); ok { aliasstr = bf.default_git_url_from_repo_path(aliasstr) alias = yaml.MapSlice{yaml.MapItem{"git_url", aliasstr}} } yaml_copy(alias, &aliasdep) if aliasdep.Git_url != "" { dep.Git_url = aliasdep.Git_url } if aliasdep.Ref != "" { dep.Ref = aliasdep.Ref } } } } if dep.Git_url == "" { panic(fmt.Errorf("Missing 'git_url' for %q; only git is supported for now", name)) } if dep.Ref == "" { dep.Ref = "master" } return } func (bf *BegottenFile) deps() (out []Dep) { out = make([]Dep, len(bf.data.Deps)) i := 0 for name, v := range bf.data.Deps { out[i] = bf.parse_dep(name, v) i++ } return } func (bf *BegottenFile) set_deps(deps []Dep) { bf.data.Deps = make(map[string]interface{}) for _, dep := range deps { bf.data.Deps[dep.name] = dep } } func (bf *BegottenFile) repo_deps() map[string][]string { if bf.data.Repo_deps == nil { bf.data.Repo_deps = make(map[string][]string) } return bf.data.Repo_deps } func (bf *BegottenFile) set_repo_deps(repo_deps map[string][]string) { bf.data.Repo_deps = repo_deps } type Env struct { Home string BegotCache string DepWorkspaceDir string CodeWorkspaceDir string RepoDir string CacheLock string } func EnvNew() (env *Env) { env = new(Env) env.Home = os.Getenv("HOME") env.BegotCache = os.Getenv("BEGOT_CACHE") if env.BegotCache == "" { env.BegotCache = filepath.Join(env.Home, ".cache", "begot") } env.DepWorkspaceDir = filepath.Join(env.BegotCache, "depwk") env.CodeWorkspaceDir = filepath.Join(env.BegotCache, "wk") env.RepoDir = filepath.Join(env.BegotCache, "repo") env.CacheLock = filepath.Join(env.BegotCache, "lock") return } type Builder struct { env *Env code_root string code_wk string dep_wk string bf *BegottenFile deps []Dep repo_deps map[string][]string cached_lf_hash string } func BuilderNew(env *Env, code_root string, use_lockfile bool) (b *Builder) { b = new(Builder) b.env = env b.code_root = realpath(code_root) hsh := sha1str(b.code_root)[:8] b.code_wk = filepath.Join(env.CodeWorkspaceDir, hsh) b.dep_wk = filepath.Join(env.DepWorkspaceDir, hsh) var fn string if use_lockfile { fn = filepath.Join(b.code_root, BEGOTTEN_LOCK) } else { fn = filepath.Join(b.code_root, BEGOTTEN) } b.bf = BegottenFileNew(fn) b.deps = b.bf.deps() b.repo_deps = b.bf.repo_deps() return } func (b *Builder) _all_repos() (out map[string]string) { out = make(map[string]string) for _, dep := range b.deps { out[dep.Git_url] = dep.Ref } return } func (b *Builder) get_locked_refs_for_update(limits []string) (out map[string]string) { out = make(map[string]string) if len(limits) == 0 { return } defer func() { if err := recover(); err != nil { panic(fmt.Errorf("You must have a %s to do a limited update.", BEGOTTEN_LOCK)) } }() bf_lock := BegottenFileNew(filepath.Join(b.code_root, BEGOTTEN_LOCK)) lock_deps := bf_lock.deps() lock_repo_deps := bf_lock.repo_deps() match := func(name string) bool { for _, limit := range limits { if matched, err := filepath.Match(limit, name); err != nil { panic(err) } else if matched { return true } } return false } repos_to_update := make(map[string]bool) for _, dep := range lock_deps { if match(dep.name) { repos_to_update[dep.Git_url] = true } } // transitive closure n := -1 for len(repos_to_update) != n { n = len(repos_to_update) repos := make([]string, 0, len(repos_to_update)) for repo, _ := range repos_to_update { repos = append(repos, repo) } for _, repo := range repos { if deps, ok := lock_repo_deps[repo]; ok { for _, dep := range deps { repos_to_update[dep] = true } } } } for _, dep := range lock_deps { if !repos_to_update[dep.Git_url] { out[dep.Git_url] = dep.Ref } } return } func (b *Builder) setup_repos(fetch bool, limits []string) *Builder { processed_deps := 0 repo_versions := make(map[string]string) var fetched_set map[string]bool if fetch { fetched_set = make(map[string]bool) } locked_refs := b.get_locked_refs_for_update(limits) for processed_deps < len(b.deps) { repos_to_setup := []string{} for i, dep := range b.deps[processed_deps:] { have := repo_versions[dep.Git_url] if fetch && strings.HasPrefix(dep.name, IMPLICIT_PREFIX) && have != "" { // Implicit deps take the revision of an explicit dep from the same // repo, if one exists. b.deps[processed_deps+i].Ref = have continue } want := locked_refs[dep.Git_url] if want == "" { want = b._resolve_ref(dep.Git_url, dep.Ref, fetched_set) } if have != "" { if have != want { panic(fmt.Errorf("Conflicting versions for %r: have %s, want %s (%s)", dep.name, have, want, dep.Ref)) } } else { repo_versions[dep.Git_url] = want repos_to_setup = append(repos_to_setup, dep.Git_url) } b.deps[processed_deps+i].Ref = want } processed_deps = len(b.deps) // This will add newly-found dependencies to b.deps. for _, url := range repos_to_setup { b._setup_repo(url, repo_versions[url]) } } return b } func (b *Builder) save_lockfile() *Builder { // Should only be called when loaded from Begotten, not lockfile. b.bf.set_deps(b.deps) b.bf.set_repo_deps(b.repo_deps) b.bf.save(filepath.Join(b.code_root, BEGOTTEN_LOCK)) return b } func (b *Builder) _record_repo_dep(src_url, dep_url string) { if src_url != dep_url { lst := b.repo_deps[src_url] if !contains_str(lst, dep_url) { b.repo_deps[src_url] = append(lst, dep_url) } } } func (b *Builder) _repo_dir(url string) string { return filepath.Join(b.env.RepoDir, sha1str(url)) } func (b *Builder) _resolve_ref(url, ref string, fetched_set map[string]bool) (resolved_ref string) { repo_dir := b._repo_dir(url) if fi, err := os.Stat(repo_dir); err != nil || !fi.Mode().IsDir() { fmt.Printf("Cloning %s\n", url) cc("/", "git", "clone", "-q", url, repo_dir) // Get into detached head state so we can manipulate things without // worrying about messing up a branch. cc(repo_dir, "git", "checkout", "-q", "--detach") } else if fetched_set != nil { if !fetched_set[url] { fmt.Printf("Updating %s\n", url) cc(repo_dir, "git", "fetch") fetched_set[url] = true } } for _, pfx := range []string{"origin/", ""} { cmd := Command(repo_dir, "git", "rev-parse", "--verify", pfx+ref) cmd.Stderr = nil if outb, err := cmd.Output(); err == nil { resolved_ref = strings.TrimSpace(string(outb)) return } } panic(fmt.Errorf("Can't resolve reference %q for %s", ref, url)) } func (b *Builder) _setup_repo(url, resolved_ref string) { hsh := sha1str(url)[:8] repo_dir := b._repo_dir(url) fmt.Printf("Fixing imports in %s\n", url) cc(repo_dir, "git", "reset", "-q", "--hard", resolved_ref) // Match up sub-deps to our deps. sub_dep_map := make(map[string]string) self_deps := []Dep{} sub_bg_path := filepath.Join(repo_dir, BEGOTTEN_LOCK) if _, err := os.Stat(sub_bg_path); err == nil { sub_bg := BegottenFileNew(sub_bg_path) // Add implicit and explicit external dependencies. for _, sub_dep := range sub_bg.deps() { b._record_repo_dep(url, sub_dep.Git_url) our_dep := b._lookup_dep_by_git_url_and_path(sub_dep.Git_url, sub_dep.Subpath) if our_dep != nil { if sub_dep.Ref != our_dep.Ref { panic(fmt.Sprintf("Conflict: %s depends on %s at %s, we depend on it at %s", url, sub_dep.Git_url, sub_dep.Ref, our_dep.Ref)) } sub_dep_map[sub_dep.name] = our_dep.name } else { // Include a hash of this repo identifier so that if two repos use the // same dep name to refer to two different things, they don't conflict // when we flatten deps. transitive_name := fmt.Sprintf("_begot_transitive_%s/%s", hsh, sub_dep.name) sub_dep_map[sub_dep.name] = transitive_name sub_dep.name = transitive_name // FIXME append: b.deps.append(sub_dep) b.deps = append(b.deps, sub_dep) } } // Allow relative import paths within this repo. e := filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { basename := filepath.Base(path) if err != nil { return err } else if fi.IsDir() && basename[0] == '.' { return filepath.SkipDir } else if path == repo_dir { return nil } relpath := path[len(repo_dir)+1:] our_dep := b._lookup_dep_by_git_url_and_path(url, relpath) if our_dep != nil { sub_dep_map[relpath] = our_dep.name } else { // See comment on _lookup_dep_name for rationale. self_name := fmt.Sprintf("_begot_self_%s/%s", hsh, replace_non_identifier_chars(relpath)) sub_dep_map[relpath] = self_name self_deps = append(self_deps, Dep{ name: self_name, Git_url: url, Subpath: relpath, Ref: resolved_ref}) } return nil }) if e != nil { panic(e) } } used_rewrites := make(map[string]bool) b._rewrite_imports(url, repo_dir, &sub_dep_map, &used_rewrites) msg := fmt.Sprintf("rewritten by begot for %s", b.code_root) cc(repo_dir, "git", "commit", "--allow-empty", "-a", "-q", "-m", msg) // Add only the self-deps that were used, to reduce clutter. for _, self_dep := range self_deps { if used_rewrites[self_dep.name] { //FIXME append: b.deps.append(self_dep) b.deps = append(b.deps, self_dep) } } } func (b *Builder) _rewrite_imports(src_url, repo_dir string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { filepath.Walk(repo_dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if strings.HasSuffix(path, ".go") { b._rewrite_file(src_url, path, sub_dep_map, used_rewrites) } return nil }) } func (b *Builder) _rewrite_file(src_url, path string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) { bts, err := ioutil.ReadFile(path) if err != nil { panic(err) } fs := token.NewFileSet() f, err := parser.ParseFile(fs, path, bts, parser.ImportsOnly) if err != nil { panic(err) } var pos int var out bytes.Buffer out.Grow(len(bts) * 5 / 4) for _, imp := range f.Imports { start := fs.Position(imp.Path.Pos()).Offset end := fs.Position(imp.Path.End()).Offset orig_import := string(bts[start+1 : end-1]) rewritten := b._rewrite_import(src_url, orig_import, sub_dep_map, used_rewrites) if orig_import != rewritten { out.Write(bts[pos : start+1]) out.WriteString(rewritten) pos = end - 1 } } out.Write(bts[pos:]) if err := ioutil.WriteFile(path, out.Bytes(), 0666); err != nil { panic(err) } } func (b *Builder) _rewrite_import(src_url, imp string, sub_dep_map *map[string]string, used_rewrites *map[string]bool) string { if rewrite, ok := (*sub_dep_map)[imp]; ok { imp = rewrite (*used_rewrites)[rewrite] = true } else { parts := strings.Split(imp, "/") if _, ok := KNOWN_GIT_SERVERS[parts[0]]; ok { imp = b._lookup_dep_name(src_url, imp) } } return imp } func (b *Builder) _lookup_dep_name(src_url, imp string) string { for _, dep := range b.deps { if contains_str(dep.Aliases, imp) { b._record_repo_dep(src_url, dep.Git_url) return dep.name } } // Each dep turns into a symlink at build time. Packages can be nested, so we // might depend on 'a' and 'a/b'. If we create a symlink for 'a', we can't // also create 'a/b'. So rename it to 'a_b'. name := IMPLICIT_PREFIX + replace_non_identifier_chars(imp) dep := b.bf.parse_dep(name, imp) b.deps = append(b.deps, dep) b._record_repo_dep(src_url, dep.Git_url) return name } func (b *Builder) _lookup_dep_by_git_url_and_path(git_url string, subpath string) *Dep { for _, dep := range b.deps { if dep.Git_url == git_url && dep.Subpath == subpath { return &dep } } return nil } func (b *Builder) tag_repos() { // Run this after setup_repos. for url, ref := range b._all_repos() { out := co(b._repo_dir(url), "git", "tag", "--force", b._tag_hash(ref)) for _, line := range strings.SplitAfter(out, "\n") { if !strings.HasPrefix(line, "Updated tag ") { fmt.Print(line) } } } } func (b *Builder) _tag_hash(ref string) string { // We want to tag the current state with a name that depends on: // 1. The base ref that we rewrote from. // 2. The full set of deps that describe how we rewrote imports. // The contents of Begotten.lock suffice for (2): if b.cached_lf_hash == "" { lockfile := filepath.Join(b.code_root, BEGOTTEN_LOCK) if bts, err := ioutil.ReadFile(lockfile); err != nil { panic(err) } else { b.cached_lf_hash = sha1bts(bts) } } return "_begot_rewrote_" + sha1str(ref+b.cached_lf_hash) } func (b *Builder) run(args []string) { b._reset_to_tags() // Set up code_wk. cbin := filepath.Join(b.code_wk, "bin") depsrc := filepath.Join(b.dep_wk, "src") empty_dep := filepath.Join(depsrc, EMPTY_DEP) os.MkdirAll(filepath.Join(cbin, empty_dep), 0777) if _, err := ln_sf(cbin, filepath.Join(b.code_root, "bin")); err != nil { panic(fmt.Errorf("It looks like you have an existing 'bin' directory. " + "Please remove it before using begot.")) } ln_sf(b.code_root, filepath.Join(b.code_wk, "src")) old_links := make(map[string]bool) filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Mode()&os.ModeType == os.ModeSymlink { old_links[path] = true } return nil }) for _, dep := range b.deps { path := filepath.Join(depsrc, dep.name) target := filepath.Join(b._repo_dir(dep.Git_url), dep.Subpath) if created, err := ln_sf(target, path); err != nil { panic(err) } else if created { // If we've created or changed this symlink, any pkg files that go may // have compiled from it should be invalidated. // Note: This makes some assumptions about go's build layout. It should // be safe enough, though it may be simpler to just blow away everything // if any dep symlinks change. pkgs, _ := filepath.Glob(filepath.Join(b.dep_wk, "pkg", "*", dep.name+".*")) for _, pkg := range pkgs { os.RemoveAll(pkg) } } delete(old_links, path) } // Remove unexpected links. for old_link := range old_links { os.RemoveAll(old_link) } // Try to remove all directories; ignore ENOTEMPTY errors. var dirs []string filepath.Walk(depsrc, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.IsDir() { dirs = append(dirs, path) } return nil }) for i := len(dirs) - 1; i >= 0; i-- { if err := syscall.Rmdir(dirs[i]); err != nil && err != syscall.ENOTEMPTY { panic(err) } } // Set up empty dep. // // The go tool tries to be helpful by not rebuilding modified code if that // code is in a workspace and no packages from that workspace are mentioned // on the command line. See cmd/go/pkg.go:isStale around line 680. // // We are explicitly managing all of the workspaces in our GOPATH and do // indeed want to rebuild everything when dependencies change. That is // required by the goal of reproducible builds: the alternative would mean // what you get for this build depends on the state of a previous build. // // The go tool doesn't provide any way of disabling this "helpful" // functionality. The simplest workaround is to always mention a package from // the dependency workspace on the command line. Hence, we add an empty // package. empty_go := filepath.Join(empty_dep, "empty.go") if fi, err := os.Stat(empty_go); err != nil || !fi.Mode().IsRegular() { os.MkdirAll(filepath.Dir(empty_go), 0777) if err := ioutil.WriteFile(empty_go, []byte(fmt.Sprintf("package %s\n", EMPTY_DEP)), 0666); err != nil { panic(err) } } // Overwrite any existing GOPATH. if argv0, err := exec.LookPath(args[0]); err != nil { panic(err) } else { os.Setenv("GOPATH", fmt.Sprintf("%s:%s", b.code_wk, b.dep_wk)) os.Chdir(b.code_root) err := syscall.Exec(argv0, args, os.Environ()) panic(fmt.Errorf("exec failed: %s", err)) } } func (b *Builder) _reset_to_tags() { defer func() { if recover() != nil { panic(fmt.Errorf("Begotten.lock refers to a missing local commit. " + "Please run 'begot fetch' first.")) } }() for url, ref := range b._all_repos() { wd := b._repo_dir(url) if fi, err := os.Stat(wd); err != nil || !fi.Mode().IsDir() { panic("not directory") } cc(wd, "git", "reset", "-q", "--hard", "tags/"+b._tag_hash(ref)) } } func (b *Builder) clean() { os.RemoveAll(b.dep_wk) os.RemoveAll(b.code_wk) os.Remove(filepath.Join(b.code_root, "bin")) } func get_gopath(env *Env) string { // This duplicates logic in Builder, but we want to just get the GOPATH without // parsing anything. for { if _, err := os.Stat(BEGOTTEN); err == nil { break } if wd, err := os.Getwd(); err != nil { panic(err) } else if wd == "/" { panic(fmt.Errorf("Couldn't find %s file", BEGOTTEN)) } if err := os.Chdir(".."); err != nil { panic(err) } } hsh := sha1str(realpath("."))[:8] code_wk := filepath.Join(env.CodeWorkspaceDir, hsh) dep_wk := filepath.Join(env.DepWorkspaceDir, hsh) return code_wk + ":" + dep_wk } var _cache_lock *os.File func lock_cache(env *Env) { os.MkdirAll(env.BegotCache, 0777) _cache_lock, err := os.OpenFile(env.CacheLock, os.O_CREATE|os.O_RDWR, 0666) if err != nil { panic(err) } err = syscall.Flock(int(_cache_lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) if err != nil { panic(fmt.Errorf("Can't lock %r", env.BegotCache)) } // Leave file open for lifetime of this process and anything exec'd by this // process. } func print_help(ret int) { fmt.Fprintln(os.Stderr, "FIXME") os.Exit(ret) } func main() { env := EnvNew() defer func() { if err := recover(); err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } }() lock_cache(env) if len(os.Args) < 2 { print_help(1) } switch os.Args[1] { case "update": BuilderNew(env, ".", false).setup_repos(true, os.Args[2:]).save_lockfile().tag_repos() case "just_rewrite": BuilderNew(env, ".", false).setup_repos(false, []string{}).save_lockfile().tag_repos() case "fetch": BuilderNew(env, ".", true).setup_repos(false, []string{}).tag_repos() case "build": BuilderNew(env, ".", true).run([]string{"go", "install", "./...", EMPTY_DEP}) case "go": BuilderNew(env, ".", true).run(append([]string{"go"}, os.Args[2:]...)) case "exec": BuilderNew(env, ".", true).run(os.Args[2:]) case "clean": BuilderNew(env, ".", false).clean() case "gopath": fmt.Println(get_gopath(env)) case "help": print_help(0) default: fmt.Fprintf(os.Stderr, "Unknown subcommand %q\n", os.Args[1]) print_help(1) } }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* fixconfig automatically fixes the prow config to have automatically generated security repo presubmits transformed from the kubernetes presubmits NOTE: this makes a few assumptions - $PWD/prow/config.yaml is where the config lives (unless you supply --config=) - `presubmits:` exists - ` kubernetes-security/kubernetes:` exists in presubmits - some other ` org/repo:` exists in presubmits *after* ` kubernetes-security/kubernetes:` - the original contents around this will be kept, but this section will be automatically rewritten */ package main import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "log" "os" "regexp" "strings" "github.com/ghodss/yaml" flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/test-infra/prow/config" "k8s.io/test-infra/prow/kube" ) var configPath = flag.String("config", "", "path to prow/config.yaml, defaults to $PWD/prow/config.yaml") var configJSONPath = flag.String("config-json", "", "path to jobs/config.json, defaults to $PWD/jobs/config.json") // config.json is the worst but contains useful information :-( type configJSON map[string]map[string]interface{} func (c configJSON) ScenarioForJob(jobName string) string { if scenario, ok := c[jobName]["scenario"]; ok { return scenario.(string) } return "" } func (c configJSON) ArgsForJob(jobName string) []string { res := []string{} if args, ok := c[jobName]["args"]; ok { for _, arg := range args.([]interface{}) { res = append(res, arg.(string)) } } return res } func readConfigJSON(path string) (config configJSON, err error) { raw, err := ioutil.ReadFile(path) if err != nil { return nil, err } config = configJSON{} err = json.Unmarshal(raw, &config) if err != nil { return nil, err } return config, nil } func readConfig(path string) (raw []byte, parsed *config.Config, err error) { raw, err = ioutil.ReadFile(path) if err != nil { return nil, nil, err } parsed = &config.Config{} err = yaml.Unmarshal(raw, parsed) if err != nil { return nil, nil, err } return raw, parsed, nil } // get the start/end byte indexes of the security repo presubmits // in the raw config.yaml bytes func getSecurityRepoJobsIndex(configBytes []byte) (start, end int, err error) { // find security-repo config beginning // first find presubmits presubmitIdx := bytes.Index(configBytes, ([]byte)("presubmits:")) // then find k-s/k: startRegex := regexp.MustCompile("(?m)^ kubernetes-security/kubernetes:$") loc := startRegex.FindIndex(configBytes[presubmitIdx:]) if loc == nil { return 0, 0, fmt.Errorf("failed to find start of security repo presubmits") } start = presubmitIdx + loc[1] // must be like ` org/repo:` loc = regexp.MustCompile("(?m)^ [^ #-][^ #]+/.+:$").FindIndex(configBytes[start:]) if loc == nil { return 0, 0, fmt.Errorf("failed to find end of security repo presubmits") } // loc[0] is the beginning of the match end = start + loc[0] return start, end, nil } func volumeIsCacheSSD(v *kube.Volume) bool { return v.HostPath != nil && strings.HasPrefix(v.HostPath.Path, "/mnt/disks/ssd0") } // strip cache ssd related settings func stripCache(j *config.Presubmit) { container := &j.Spec.Containers[0] // strip cache disk related args etc filteredArgs := []string{} for _, arg := range container.Args { if strings.HasPrefix(arg, "--git-cache") { continue } filteredArgs = append(filteredArgs, arg) } container.Args = filteredArgs // filter cache related env filteredEnv := []kube.EnvVar{} for _, env := range container.Env { // don't keep bazel cache directory env if env.Name == "TEST_TMPDIR" { continue } filteredEnv = append(filteredEnv, env) } container.Env = filteredEnv // filter cache disk volumes, swap DIND volume for filteredVolumes := []kube.Volume{} removedVolumeNames := sets.String{} for _, volume := range j.Spec.Volumes { if volumeIsCacheSSD(&volume) { removedVolumeNames.Insert(volume.Name) continue } filteredVolumes = append(filteredVolumes, volume) } j.Spec.Volumes = filteredVolumes // filter out mounts for filtered out volumes filteredVolumeMounts := []kube.VolumeMount{} for _, volumeMount := range container.VolumeMounts { if removedVolumeNames.Has(volumeMount.Name) { continue } filteredVolumeMounts = append(filteredVolumeMounts, volumeMount) } container.VolumeMounts = filteredVolumeMounts // remove """cache port""" container.Ports = []kube.Port{} } // run after stripCache to make sure we still at least mount an emptyDir to // /docker-graph for dind enabled jobs func ensureDockerGraphVolume(j *config.Presubmit) { // make sure this is a docker-in-docker job first dindEnabled := false container := &j.Spec.Containers[0] for _, env := range container.Env { if env.Name == "DOCKER_IN_DOCKER_ENABLED" && env.Value == "true" { dindEnabled = true break } } if !dindEnabled { return } // filter out old /docker-graph volume mounts of any sort const dockerGraphMountPath = "/docker-graph" oldDockerGraphVolumeMount := "" removedVolumeNames := sets.String{} filteredVolumeMounts := []kube.VolumeMount{} for _, volumeMount := range container.VolumeMounts { if volumeMount.MountPath == dockerGraphMountPath { removedVolumeNames.Insert(volumeMount.Name) continue } filteredVolumeMounts = append(filteredVolumeMounts, volumeMount) } container.VolumeMounts = filteredVolumeMounts // remove old volumes associated with old mounts if any if removedVolumeNames.Len() > 0 { filteredVolumes := []kube.Volume{} for _, volume := range j.Spec.Volumes { if volume.Name == oldDockerGraphVolumeMount { continue } filteredVolumes = append(filteredVolumes, volume) } j.Spec.Volumes = filteredVolumes } // add new auto generated volume mount const dockerGraphVolumeMount = "auto-generated-docker-graph-volume-mount" container.VolumeMounts = append(container.VolumeMounts, kube.VolumeMount{ Name: dockerGraphVolumeMount, MountPath: dockerGraphMountPath, }) // add matching auto generated emptyDir volume volumeSource := kube.VolumeSource{} volumeSource.EmptyDir = &kube.EmptyDirVolumeSource{} volume := kube.Volume{ Name: dockerGraphVolumeMount, VolumeSource: volumeSource, } j.Spec.Volumes = append(j.Spec.Volumes, volume) } // returns all of the labels for presets that mount the cache SSD volume // as "key: v" func getCacheSSDPresetLabels(c *config.Config) (labels sets.String) { labels = sets.NewString() for _, preset := range c.Presets { for _, volume := range preset.Volumes { if volumeIsCacheSSD(&volume) { for k, v := range preset.Labels { labels.Insert(fmt.Sprintf("%s: %s", k, v)) } break } } } return labels } // convert a kubernetes/kubernetes job to a kubernetes-security/kubernetes job // dropLabels should be a set of "k: v" strings // xref: prow/config/config_test.go replace(...) func convertJobToSecurityJob(j *config.Presubmit, dropLabels sets.String, jobsConfig configJSON) { // filter out the unwanted labels if len(j.Labels) > 0 { filteredLabels := make(map[string]string) for k, v := range j.Labels { if !dropLabels.Has(fmt.Sprintf("%s: %s", k, v)) { filteredLabels[k] = v } } j.Labels = filteredLabels } originalName := j.Name // fix name and triggers for all jobs j.Name = strings.Replace(originalName, "pull-kubernetes", "pull-security-kubernetes", -1) j.RerunCommand = strings.Replace(j.RerunCommand, "pull-kubernetes", "pull-security-kubernetes", -1) j.Trigger = strings.Replace(j.Trigger, "pull-kubernetes", "pull-security-kubernetes", -1) j.Context = strings.Replace(j.Context, "pull-kubernetes", "pull-security-kubernetes", -1) // handle k8s job args, volumes etc if j.Agent == "kubernetes" { j.Cluster = "security" container := &j.Spec.Containers[0] // check for args that need hijacking endsWithScenarioArgs := false needGCSFlag := false needGCSSharedFlag := false needStagingFlag := false for i, arg := range container.Args { if arg == "--" { endsWithScenarioArgs = true // handle --repo substitution for main repo } else if strings.HasPrefix(arg, "--repo=k8s.io/kubernetes") || strings.HasPrefix(arg, "--repo=k8s.io/$(REPO_NAME)") { container.Args[i] = strings.Replace(arg, "k8s.io/", "github.com/kubernetes-security/", 1) // handle upload bucket } else if strings.HasPrefix(arg, "--upload=") { container.Args[i] = "--upload=gs://kubernetes-security-prow/pr-logs" // check if we need to change staging artifact location for bazel-build and e2es } else if strings.HasPrefix(arg, "--release") { needGCSFlag = true needGCSSharedFlag = true } else if strings.HasPrefix(arg, "--stage") { needStagingFlag = true } else if strings.HasPrefix(arg, "--use-shared-build") { needGCSSharedFlag = true } } // NOTE: this needs to be before the bare -- and then bootstrap args so we prepend it container.Args = append([]string{"--ssh=/etc/ssh-security/ssh-security"}, container.Args...) // check for scenario specific tweaks // NOTE: jobs are remapped to their original name in bootstrap to de-dupe config // check if we need to change staging artifact location for bazel-build and e2es if jobsConfig.ScenarioForJob(originalName) == "kubernetes_bazel" { for _, arg := range jobsConfig.ArgsForJob(originalName) { if strings.HasPrefix(arg, "--release") { needGCSFlag = true needGCSSharedFlag = true break } } } if jobsConfig.ScenarioForJob(originalName) == "kubernetes_e2e" { for _, arg := range jobsConfig.ArgsForJob(originalName) { if strings.HasPrefix(arg, "--stage") { needStagingFlag = true } else if strings.HasPrefix(arg, "--use-shared-build") { needGCSSharedFlag = true } } } // NOTE: these needs to be at the end and after a -- if there is none (it's a scenario arg) if !endsWithScenarioArgs && (needGCSFlag || needGCSSharedFlag || needStagingFlag) { container.Args = append(container.Args, "--") } if needGCSFlag { container.Args = append(container.Args, "--gcs=gs://kubernetes-security-prow/ci/"+j.Name) } if needGCSSharedFlag { container.Args = append(container.Args, "--gcs-shared=gs://kubernetes-security-prow/bazel") } if needStagingFlag { container.Args = append(container.Args, "--stage=gs://kubernetes-security-prow/ci/"+j.Name) } // add ssh key volume / mount container.VolumeMounts = append( container.VolumeMounts, kube.VolumeMount{ Name: "ssh-security", MountPath: "/etc/ssh-security", }, ) defaultMode := int32(0400) j.Spec.Volumes = append( j.Spec.Volumes, kube.Volume{ Name: "ssh-security", VolumeSource: kube.VolumeSource{ Secret: &kube.SecretSource{ SecretName: "ssh-security", DefaultMode: &defaultMode, }, }, }, ) // remove cache-ssd related args stripCache(j) // strip cache may remove the /docker-graph mount if it is on the cache // ssd, make sure we still have an emptyDir instead for dind jobs ensureDockerGraphVolume(j) } // done with this job, check for run_after_success for i := range j.RunAfterSuccess { convertJobToSecurityJob(&j.RunAfterSuccess[i], dropLabels, jobsConfig) } } func yamlBytesStripNulls(yamlBytes []byte) []byte { nullRE := regexp.MustCompile("(?m)[\n]+^[^\n]+: null$") return nullRE.ReplaceAll(yamlBytes, []byte{}) } func yamlBytesToEntry(yamlBytes []byte, indent int) []byte { var buff bytes.Buffer // spaces of length indent prefix := bytes.Repeat([]byte{32}, indent) // `- ` before the first field of a yaml entry prefix[len(prefix)-2] = byte(45) buff.Write(prefix) // put back space prefix[len(prefix)-2] = byte(32) for i, b := range yamlBytes { buff.WriteByte(b) // indent after newline, except the last one if b == byte(10) && i+1 != len(yamlBytes) { buff.Write(prefix) } } return buff.Bytes() } func copyFile(srcPath, destPath string) error { // fallback to copying the file instead src, err := os.Open(srcPath) if err != nil { return err } dst, err := os.OpenFile(destPath, os.O_WRONLY, 0666) if err != nil { return err } _, err = io.Copy(dst, src) if err != nil { return err } dst.Sync() dst.Close() src.Close() return nil } func main() { flag.Parse() // default to $PWD/prow/config.yaml pwd, err := os.Getwd() if err != nil { log.Fatalf("Failed to get $PWD: %v", err) } if *configPath == "" { *configPath = pwd + "/prow/config.yaml" } if *configJSONPath == "" { *configJSONPath = pwd + "/jobs/config.json" } // read in current prow config originalBytes, parsed, err := readConfig(*configPath) if err != nil { log.Fatalf("Failed to read config file: %v", err) } // read in jobs config jobsConfig, err := readConfigJSON(*configJSONPath) // find security repo section securityRepoStart, securityRepoEnd, err := getSecurityRepoJobsIndex(originalBytes) if err != nil { log.Fatalf("Failed to find security repo section: %v", err) } // create temp file to write updated config f, err := ioutil.TempFile("", "prow-config") if err != nil { log.Fatalf("Failed to create temp file: %v", err) } defer os.Remove(f.Name()) // write the original bytes before the security repo section _, err = f.Write(originalBytes[:securityRepoStart]) if err != nil { log.Fatalf("Failed to write temp file: %v", err) } f.Sync() io.WriteString(f, "\n") // convert each kubernetes/kubernetes presubmit to a // kubernetes-security/kubernetes presubmit and write to the file cacheLabels := getCacheSSDPresetLabels(parsed) for _, job := range parsed.Presubmits["kubernetes/kubernetes"] { convertJobToSecurityJob(&job, cacheLabels, jobsConfig) jobBytes, err := yaml.Marshal(job) if err != nil { log.Fatalf("Failed to marshal job: %v", err) } // write, properly indented, and stripped of `foo: null` jobBytes = yamlBytesStripNulls(jobBytes) f.Write(yamlBytesToEntry(jobBytes, 4)) } // write the original bytes after the security repo section _, err = f.Write(originalBytes[securityRepoEnd:]) if err != nil { log.Fatalf("Failed to write temp file: %v", err) } f.Sync() // move file to replace original f.Close() err = os.Rename(f.Name(), *configPath) if err != nil { // fallback to copying the file instead err = copyFile(f.Name(), *configPath) if err != nil { log.Fatalf("Failed to replace config with updated version: %v", err) } } } use tempfile in the same directory as the output file /* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* fixconfig automatically fixes the prow config to have automatically generated security repo presubmits transformed from the kubernetes presubmits NOTE: this makes a few assumptions - $PWD/prow/config.yaml is where the config lives (unless you supply --config=) - `presubmits:` exists - ` kubernetes-security/kubernetes:` exists in presubmits - some other ` org/repo:` exists in presubmits *after* ` kubernetes-security/kubernetes:` - the original contents around this will be kept, but this section will be automatically rewritten */ package main import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "log" "os" "path/filepath" "regexp" "strings" "github.com/ghodss/yaml" flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/test-infra/prow/config" "k8s.io/test-infra/prow/kube" ) var configPath = flag.String("config", "", "path to prow/config.yaml, defaults to $PWD/prow/config.yaml") var configJSONPath = flag.String("config-json", "", "path to jobs/config.json, defaults to $PWD/jobs/config.json") // config.json is the worst but contains useful information :-( type configJSON map[string]map[string]interface{} func (c configJSON) ScenarioForJob(jobName string) string { if scenario, ok := c[jobName]["scenario"]; ok { return scenario.(string) } return "" } func (c configJSON) ArgsForJob(jobName string) []string { res := []string{} if args, ok := c[jobName]["args"]; ok { for _, arg := range args.([]interface{}) { res = append(res, arg.(string)) } } return res } func readConfigJSON(path string) (config configJSON, err error) { raw, err := ioutil.ReadFile(path) if err != nil { return nil, err } config = configJSON{} err = json.Unmarshal(raw, &config) if err != nil { return nil, err } return config, nil } func readConfig(path string) (raw []byte, parsed *config.Config, err error) { raw, err = ioutil.ReadFile(path) if err != nil { return nil, nil, err } parsed = &config.Config{} err = yaml.Unmarshal(raw, parsed) if err != nil { return nil, nil, err } return raw, parsed, nil } // get the start/end byte indexes of the security repo presubmits // in the raw config.yaml bytes func getSecurityRepoJobsIndex(configBytes []byte) (start, end int, err error) { // find security-repo config beginning // first find presubmits presubmitIdx := bytes.Index(configBytes, ([]byte)("presubmits:")) // then find k-s/k: startRegex := regexp.MustCompile("(?m)^ kubernetes-security/kubernetes:$") loc := startRegex.FindIndex(configBytes[presubmitIdx:]) if loc == nil { return 0, 0, fmt.Errorf("failed to find start of security repo presubmits") } start = presubmitIdx + loc[1] // must be like ` org/repo:` loc = regexp.MustCompile("(?m)^ [^ #-][^ #]+/.+:$").FindIndex(configBytes[start:]) if loc == nil { return 0, 0, fmt.Errorf("failed to find end of security repo presubmits") } // loc[0] is the beginning of the match end = start + loc[0] return start, end, nil } func volumeIsCacheSSD(v *kube.Volume) bool { return v.HostPath != nil && strings.HasPrefix(v.HostPath.Path, "/mnt/disks/ssd0") } // strip cache ssd related settings func stripCache(j *config.Presubmit) { container := &j.Spec.Containers[0] // strip cache disk related args etc filteredArgs := []string{} for _, arg := range container.Args { if strings.HasPrefix(arg, "--git-cache") { continue } filteredArgs = append(filteredArgs, arg) } container.Args = filteredArgs // filter cache related env filteredEnv := []kube.EnvVar{} for _, env := range container.Env { // don't keep bazel cache directory env if env.Name == "TEST_TMPDIR" { continue } filteredEnv = append(filteredEnv, env) } container.Env = filteredEnv // filter cache disk volumes, swap DIND volume for filteredVolumes := []kube.Volume{} removedVolumeNames := sets.String{} for _, volume := range j.Spec.Volumes { if volumeIsCacheSSD(&volume) { removedVolumeNames.Insert(volume.Name) continue } filteredVolumes = append(filteredVolumes, volume) } j.Spec.Volumes = filteredVolumes // filter out mounts for filtered out volumes filteredVolumeMounts := []kube.VolumeMount{} for _, volumeMount := range container.VolumeMounts { if removedVolumeNames.Has(volumeMount.Name) { continue } filteredVolumeMounts = append(filteredVolumeMounts, volumeMount) } container.VolumeMounts = filteredVolumeMounts // remove """cache port""" container.Ports = []kube.Port{} } // run after stripCache to make sure we still at least mount an emptyDir to // /docker-graph for dind enabled jobs func ensureDockerGraphVolume(j *config.Presubmit) { // make sure this is a docker-in-docker job first dindEnabled := false container := &j.Spec.Containers[0] for _, env := range container.Env { if env.Name == "DOCKER_IN_DOCKER_ENABLED" && env.Value == "true" { dindEnabled = true break } } if !dindEnabled { return } // filter out old /docker-graph volume mounts of any sort const dockerGraphMountPath = "/docker-graph" oldDockerGraphVolumeMount := "" removedVolumeNames := sets.String{} filteredVolumeMounts := []kube.VolumeMount{} for _, volumeMount := range container.VolumeMounts { if volumeMount.MountPath == dockerGraphMountPath { removedVolumeNames.Insert(volumeMount.Name) continue } filteredVolumeMounts = append(filteredVolumeMounts, volumeMount) } container.VolumeMounts = filteredVolumeMounts // remove old volumes associated with old mounts if any if removedVolumeNames.Len() > 0 { filteredVolumes := []kube.Volume{} for _, volume := range j.Spec.Volumes { if volume.Name == oldDockerGraphVolumeMount { continue } filteredVolumes = append(filteredVolumes, volume) } j.Spec.Volumes = filteredVolumes } // add new auto generated volume mount const dockerGraphVolumeMount = "auto-generated-docker-graph-volume-mount" container.VolumeMounts = append(container.VolumeMounts, kube.VolumeMount{ Name: dockerGraphVolumeMount, MountPath: dockerGraphMountPath, }) // add matching auto generated emptyDir volume volumeSource := kube.VolumeSource{} volumeSource.EmptyDir = &kube.EmptyDirVolumeSource{} volume := kube.Volume{ Name: dockerGraphVolumeMount, VolumeSource: volumeSource, } j.Spec.Volumes = append(j.Spec.Volumes, volume) } // returns all of the labels for presets that mount the cache SSD volume // as "key: v" func getCacheSSDPresetLabels(c *config.Config) (labels sets.String) { labels = sets.NewString() for _, preset := range c.Presets { for _, volume := range preset.Volumes { if volumeIsCacheSSD(&volume) { for k, v := range preset.Labels { labels.Insert(fmt.Sprintf("%s: %s", k, v)) } break } } } return labels } // convert a kubernetes/kubernetes job to a kubernetes-security/kubernetes job // dropLabels should be a set of "k: v" strings // xref: prow/config/config_test.go replace(...) func convertJobToSecurityJob(j *config.Presubmit, dropLabels sets.String, jobsConfig configJSON) { // filter out the unwanted labels if len(j.Labels) > 0 { filteredLabels := make(map[string]string) for k, v := range j.Labels { if !dropLabels.Has(fmt.Sprintf("%s: %s", k, v)) { filteredLabels[k] = v } } j.Labels = filteredLabels } originalName := j.Name // fix name and triggers for all jobs j.Name = strings.Replace(originalName, "pull-kubernetes", "pull-security-kubernetes", -1) j.RerunCommand = strings.Replace(j.RerunCommand, "pull-kubernetes", "pull-security-kubernetes", -1) j.Trigger = strings.Replace(j.Trigger, "pull-kubernetes", "pull-security-kubernetes", -1) j.Context = strings.Replace(j.Context, "pull-kubernetes", "pull-security-kubernetes", -1) // handle k8s job args, volumes etc if j.Agent == "kubernetes" { j.Cluster = "security" container := &j.Spec.Containers[0] // check for args that need hijacking endsWithScenarioArgs := false needGCSFlag := false needGCSSharedFlag := false needStagingFlag := false for i, arg := range container.Args { if arg == "--" { endsWithScenarioArgs = true // handle --repo substitution for main repo } else if strings.HasPrefix(arg, "--repo=k8s.io/kubernetes") || strings.HasPrefix(arg, "--repo=k8s.io/$(REPO_NAME)") { container.Args[i] = strings.Replace(arg, "k8s.io/", "github.com/kubernetes-security/", 1) // handle upload bucket } else if strings.HasPrefix(arg, "--upload=") { container.Args[i] = "--upload=gs://kubernetes-security-prow/pr-logs" // check if we need to change staging artifact location for bazel-build and e2es } else if strings.HasPrefix(arg, "--release") { needGCSFlag = true needGCSSharedFlag = true } else if strings.HasPrefix(arg, "--stage") { needStagingFlag = true } else if strings.HasPrefix(arg, "--use-shared-build") { needGCSSharedFlag = true } } // NOTE: this needs to be before the bare -- and then bootstrap args so we prepend it container.Args = append([]string{"--ssh=/etc/ssh-security/ssh-security"}, container.Args...) // check for scenario specific tweaks // NOTE: jobs are remapped to their original name in bootstrap to de-dupe config // check if we need to change staging artifact location for bazel-build and e2es if jobsConfig.ScenarioForJob(originalName) == "kubernetes_bazel" { for _, arg := range jobsConfig.ArgsForJob(originalName) { if strings.HasPrefix(arg, "--release") { needGCSFlag = true needGCSSharedFlag = true break } } } if jobsConfig.ScenarioForJob(originalName) == "kubernetes_e2e" { for _, arg := range jobsConfig.ArgsForJob(originalName) { if strings.HasPrefix(arg, "--stage") { needStagingFlag = true } else if strings.HasPrefix(arg, "--use-shared-build") { needGCSSharedFlag = true } } } // NOTE: these needs to be at the end and after a -- if there is none (it's a scenario arg) if !endsWithScenarioArgs && (needGCSFlag || needGCSSharedFlag || needStagingFlag) { container.Args = append(container.Args, "--") } if needGCSFlag { container.Args = append(container.Args, "--gcs=gs://kubernetes-security-prow/ci/"+j.Name) } if needGCSSharedFlag { container.Args = append(container.Args, "--gcs-shared=gs://kubernetes-security-prow/bazel") } if needStagingFlag { container.Args = append(container.Args, "--stage=gs://kubernetes-security-prow/ci/"+j.Name) } // add ssh key volume / mount container.VolumeMounts = append( container.VolumeMounts, kube.VolumeMount{ Name: "ssh-security", MountPath: "/etc/ssh-security", }, ) defaultMode := int32(0400) j.Spec.Volumes = append( j.Spec.Volumes, kube.Volume{ Name: "ssh-security", VolumeSource: kube.VolumeSource{ Secret: &kube.SecretSource{ SecretName: "ssh-security", DefaultMode: &defaultMode, }, }, }, ) // remove cache-ssd related args stripCache(j) // strip cache may remove the /docker-graph mount if it is on the cache // ssd, make sure we still have an emptyDir instead for dind jobs ensureDockerGraphVolume(j) } // done with this job, check for run_after_success for i := range j.RunAfterSuccess { convertJobToSecurityJob(&j.RunAfterSuccess[i], dropLabels, jobsConfig) } } func yamlBytesStripNulls(yamlBytes []byte) []byte { nullRE := regexp.MustCompile("(?m)[\n]+^[^\n]+: null$") return nullRE.ReplaceAll(yamlBytes, []byte{}) } func yamlBytesToEntry(yamlBytes []byte, indent int) []byte { var buff bytes.Buffer // spaces of length indent prefix := bytes.Repeat([]byte{32}, indent) // `- ` before the first field of a yaml entry prefix[len(prefix)-2] = byte(45) buff.Write(prefix) // put back space prefix[len(prefix)-2] = byte(32) for i, b := range yamlBytes { buff.WriteByte(b) // indent after newline, except the last one if b == byte(10) && i+1 != len(yamlBytes) { buff.Write(prefix) } } return buff.Bytes() } func copyFile(srcPath, destPath string) error { // fallback to copying the file instead src, err := os.Open(srcPath) if err != nil { return err } dst, err := os.OpenFile(destPath, os.O_WRONLY, 0666) if err != nil { return err } _, err = io.Copy(dst, src) if err != nil { return err } dst.Sync() dst.Close() src.Close() return nil } func main() { flag.Parse() // default to $PWD/prow/config.yaml pwd, err := os.Getwd() if err != nil { log.Fatalf("Failed to get $PWD: %v", err) } if *configPath == "" { *configPath = pwd + "/prow/config.yaml" } if *configJSONPath == "" { *configJSONPath = pwd + "/jobs/config.json" } // read in current prow config originalBytes, parsed, err := readConfig(*configPath) if err != nil { log.Fatalf("Failed to read config file: %v", err) } // read in jobs config jobsConfig, err := readConfigJSON(*configJSONPath) // find security repo section securityRepoStart, securityRepoEnd, err := getSecurityRepoJobsIndex(originalBytes) if err != nil { log.Fatalf("Failed to find security repo section: %v", err) } // create temp file to write updated config f, err := ioutil.TempFile(filepath.Dir(*configPath), "temp") if err != nil { log.Fatalf("Failed to create temp file: %v", err) } defer os.Remove(f.Name()) // write the original bytes before the security repo section _, err = f.Write(originalBytes[:securityRepoStart]) if err != nil { log.Fatalf("Failed to write temp file: %v", err) } f.Sync() io.WriteString(f, "\n") // convert each kubernetes/kubernetes presubmit to a // kubernetes-security/kubernetes presubmit and write to the file cacheLabels := getCacheSSDPresetLabels(parsed) for _, job := range parsed.Presubmits["kubernetes/kubernetes"] { convertJobToSecurityJob(&job, cacheLabels, jobsConfig) jobBytes, err := yaml.Marshal(job) if err != nil { log.Fatalf("Failed to marshal job: %v", err) } // write, properly indented, and stripped of `foo: null` jobBytes = yamlBytesStripNulls(jobBytes) f.Write(yamlBytesToEntry(jobBytes, 4)) } // write the original bytes after the security repo section _, err = f.Write(originalBytes[securityRepoEnd:]) if err != nil { log.Fatalf("Failed to write temp file: %v", err) } f.Sync() // move file to replace original f.Close() err = os.Rename(f.Name(), *configPath) if err != nil { // fallback to copying the file instead err = copyFile(f.Name(), *configPath) if err != nil { log.Fatalf("Failed to replace config with updated version: %v", err) } } }
// Copyright ©2014 The gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package goblas import ( "fmt" "runtime" "github.com/gonum/blas" ) const ( blockSize = 64 // b x b matrix minParBlock = 4 // minimum number of blocks needed to go parallel buffMul = 4 // how big is the buffer relative to the number of workers ) // Dgemm computes c := beta * C + alpha * A * B. If tA or tB is blas.Trans, // A or B is transposed. // m is the number of rows in A or A transpose // n is the number of columns in B or B transpose // k is the columns of A and rows of B func (Blas) Dgemm(tA, tB blas.Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { var amat, bmat, cmat general if tA == blas.Trans { amat = general{ data: a, rows: k, cols: m, stride: lda, } } else { amat = general{ data: a, rows: m, cols: k, stride: lda, } } err := amat.check() if err != nil { panic(err) } if tB == blas.Trans { bmat = general{ data: b, rows: n, cols: k, stride: ldb, } } else { bmat = general{ data: b, rows: k, cols: n, stride: ldb, } } err = bmat.check() if err != nil { panic(err) } cmat = general{ data: c, rows: m, cols: n, stride: ldc, } err = cmat.check() if err != nil { panic(err) } if tA != blas.Trans && tA != blas.NoTrans { panic(badTranspose) } if tB != blas.Trans && tB != blas.NoTrans { panic(badTranspose) } // scale c if beta != 1 { for i := 0; i < m; i++ { ctmp := cmat.data[i*cmat.stride : i*cmat.stride+cmat.cols] for j := range ctmp { ctmp[j] *= beta } } } dgemmParallel(tA, tB, amat, bmat, cmat, alpha) } func dgemmParallel(tA, tB blas.Transpose, a, b, c general, alpha float64) { // dgemmParallel computes a parallel matrix multiplication by partitioning // a and b into sub-blocks, and updating c with the multiplication of the sub-block // In all cases, // A = [ A_11 A_12 ... A_1j // A_21 A_22 ... A_2j // ... // A_i1 A_i2 ... A_ij] // // and same for B. All of the submatrix sizes are blockSize*blockSize except // at the edges. // In all cases, there is one dimension for each matrix along which // C must be updated sequentially. // Cij = \sum_k Aik Bki, (A * B) // Cij = \sum_k Aki Bkj, (A^T * B) // Cij = \sum_k Aik Bjk, (A * B^T) // Cij = \sum_k Aki Bjk, (A^T * B^T) // // This code computes one {i, j} block sequentially along the k dimension, // and computes all of the {i, j} blocks concurrently. This // partitioning allows Cij to be updated in-place without race-conditions. // Instead of launching a goroutine for each possible concurrent computation, // a number of worker goroutines are created and channels are used to pass // available and completed cases. // // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix // multiplies, though this code does not copy matrices to attempt to eliminate // cache misses. aTrans := tA == blas.Trans bTrans := tB == blas.Trans maxKLen, parBlocks := computeNumBlocks(a, b, aTrans, bTrans) if parBlocks < minParBlock { // The matrix multiplication is small in the dimensions where it can be // computed concurrently. Just do it in serial. dgemmSerial(tA, tB, a, b, c, alpha) return } nWorkers := runtime.GOMAXPROCS(0) if parBlocks < nWorkers { nWorkers = parBlocks } // There is a tradeoff between the workers having to wait for work // and a large buffer making operations slow. buf := buffMul * nWorkers if buf > parBlocks { buf = parBlocks } sendChan := make(chan subMul, buf) quitChan := make(chan struct{}, nWorkers) // Launch workers. A worker receives an {i, j} submatrix of c, and computes // A_ik B_ki (or the transposed version) storing the result in c_ij. When the // channel is finally closed, it sends a message that it has finished the final // computation. for i := 0; i < nWorkers; i++ { go func() { // Make local copies of otherwise global variables to reduce shared memory. // This has a noticable effect on benchmarks in some cases. alpha := alpha aTrans := aTrans bTrans := bTrans crows := c.rows ccols := c.cols for sub := range sendChan { i := sub.i j := sub.j leni := blockSize if i+leni > crows { leni = crows - i } lenj := blockSize if j+lenj > ccols { lenj = ccols - j } cSub := c.view(i, j, leni, lenj) // Compute A_ik B_kj for all k for k := 0; k < maxKLen; k += blockSize { lenk := blockSize if k+lenk > maxKLen { lenk = maxKLen - k } var aSub, bSub general if aTrans { aSub = a.view(k, i, lenk, leni) } else { aSub = a.view(i, k, leni, lenk) } if bTrans { bSub = b.view(j, k, lenj, lenk) } else { bSub = b.view(k, j, lenk, lenj) } dgemmSerial(tA, tB, aSub, bSub, cSub, alpha) } } quitChan <- struct{}{} }() } // Send out all of the {i, j} subblocks for computation. for i := 0; i < c.rows; i += blockSize { for j := 0; j < c.cols; j += blockSize { sendChan <- subMul{ i: i, j: j, } } } close(sendChan) for i := 0; i < nWorkers; i++ { <-quitChan } return } type subMul struct { i, j int // index of block } // computeNumBlocks says how many blocks there are to compute. maxKLen says the length of the // k dimension, parBlocks is the number of blocks that could be computed in parallel // (the submatrices in i and j). expect is the full number of blocks that will be computed. func computeNumBlocks(a, b general, aTrans, bTrans bool) (maxKLen, parBlocks int) { aRowBlocks := a.rows / blockSize if a.rows%blockSize != 0 { aRowBlocks++ } aColBlocks := a.cols / blockSize if a.cols%blockSize != 0 { aColBlocks++ } bRowBlocks := b.rows / blockSize if b.rows%blockSize != 0 { bRowBlocks++ } bColBlocks := b.cols / blockSize if b.cols%blockSize != 0 { bColBlocks++ } switch { case !aTrans && !bTrans: // Cij = \sum_k Aik Bki maxKLen = a.cols parBlocks = aRowBlocks * bColBlocks case aTrans && !bTrans: // Cij = \sum_k Aki Bkj maxKLen = a.rows parBlocks = aColBlocks * bColBlocks case !aTrans && bTrans: // Cij = \sum_k Aik Bjk maxKLen = a.cols parBlocks = aRowBlocks * bRowBlocks case aTrans && bTrans: // Cij = \sum_k Aki Bjk maxKLen = a.rows parBlocks = aColBlocks * bRowBlocks } return } // dgemmSerial is serial matrix multiply func dgemmSerial(tA, tB blas.Transpose, a, b, c general, alpha float64) { switch { case tA == blas.NoTrans && tB == blas.NoTrans: dgemmSerialNotNot(a, b, c, alpha) return case tA == blas.Trans && tB == blas.NoTrans: dgemmSerialTransNot(a, b, c, alpha) return case tA == blas.NoTrans && tB == blas.Trans: dgemmSerialNotTrans(a, b, c, alpha) return case tA == blas.Trans && tB == blas.Trans: dgemmSerialTransTrans(a, b, c, alpha) return default: panic("unreachable") } } // dgemmSerial where neither a nor b are transposed func dgemmSerialNotNot(a, b, c general, alpha float64) { if debug { if a.cols != b.rows { panic("inner dimension mismatch") } if a.rows != c.rows { panic("outer dimension mismatch") } if b.cols != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for i := 0; i < a.rows; i++ { ctmp := c.data[i*c.stride : i*c.stride+c.cols] for l, v := range a.data[i*a.stride : i*a.stride+a.cols] { tmp := alpha * v if tmp != 0 { for j, w := range b.data[l*b.stride : l*b.stride+b.cols] { ctmp[j] += tmp * w } } } } } // dgemmSerial where neither a is transposed and b is not func dgemmSerialTransNot(a, b, c general, alpha float64) { if debug { if a.rows != b.rows { fmt.Println(a.rows, b.rows) panic("inner dimension mismatch") } if a.cols != c.rows { panic("outer dimension mismatch") } if b.cols != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for l := 0; l < a.rows; l++ { btmp := b.data[l*b.stride : l*b.stride+b.cols] for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { tmp := alpha * v ctmp := c.data[i*c.stride : i*c.stride+c.cols] if tmp != 0 { for j, w := range btmp { ctmp[j] += tmp * w } } } } } // dgemmSerial where neither a is not transposed and b is func dgemmSerialNotTrans(a, b, c general, alpha float64) { if debug { if a.cols != b.cols { panic("inner dimension mismatch") } if a.rows != c.rows { panic("outer dimension mismatch") } if b.rows != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for i := 0; i < a.rows; i++ { atmp := a.data[i*a.stride : i*a.stride+a.cols] ctmp := c.data[i*c.stride : i*c.stride+c.cols] for j := 0; j < b.rows; j++ { var tmp float64 for l, v := range b.data[j*b.stride : j*b.stride+b.cols] { tmp += atmp[l] * v } ctmp[j] += alpha * tmp } } } // dgemmSerial where both are transposed func dgemmSerialTransTrans(a, b, c general, alpha float64) { if debug { if a.rows != b.cols { panic("inner dimension mismatch") } if a.cols != c.rows { panic("outer dimension mismatch") } if b.rows != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for l := 0; l < a.rows; l++ { for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { ctmp := c.data[i*c.stride : i*c.stride+c.cols] if v != 0 { tmp := alpha * v for j := 0; j < b.rows; j++ { ctmp[j] += tmp * b.data[j*b.stride+l] } } } } } Replaced quit channel with a WaitGroup. This better matches what is happening. // Copyright ©2014 The gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package goblas import ( "fmt" "runtime" "sync" "github.com/gonum/blas" ) const ( blockSize = 64 // b x b matrix minParBlock = 4 // minimum number of blocks needed to go parallel buffMul = 4 // how big is the buffer relative to the number of workers ) // Dgemm computes c := beta * C + alpha * A * B. If tA or tB is blas.Trans, // A or B is transposed. // m is the number of rows in A or A transpose // n is the number of columns in B or B transpose // k is the columns of A and rows of B func (Blas) Dgemm(tA, tB blas.Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { var amat, bmat, cmat general if tA == blas.Trans { amat = general{ data: a, rows: k, cols: m, stride: lda, } } else { amat = general{ data: a, rows: m, cols: k, stride: lda, } } err := amat.check() if err != nil { panic(err) } if tB == blas.Trans { bmat = general{ data: b, rows: n, cols: k, stride: ldb, } } else { bmat = general{ data: b, rows: k, cols: n, stride: ldb, } } err = bmat.check() if err != nil { panic(err) } cmat = general{ data: c, rows: m, cols: n, stride: ldc, } err = cmat.check() if err != nil { panic(err) } if tA != blas.Trans && tA != blas.NoTrans { panic(badTranspose) } if tB != blas.Trans && tB != blas.NoTrans { panic(badTranspose) } // scale c if beta != 1 { for i := 0; i < m; i++ { ctmp := cmat.data[i*cmat.stride : i*cmat.stride+cmat.cols] for j := range ctmp { ctmp[j] *= beta } } } dgemmParallel(tA, tB, amat, bmat, cmat, alpha) } func dgemmParallel(tA, tB blas.Transpose, a, b, c general, alpha float64) { // dgemmParallel computes a parallel matrix multiplication by partitioning // a and b into sub-blocks, and updating c with the multiplication of the sub-block // In all cases, // A = [ A_11 A_12 ... A_1j // A_21 A_22 ... A_2j // ... // A_i1 A_i2 ... A_ij] // // and same for B. All of the submatrix sizes are blockSize*blockSize except // at the edges. // In all cases, there is one dimension for each matrix along which // C must be updated sequentially. // Cij = \sum_k Aik Bki, (A * B) // Cij = \sum_k Aki Bkj, (A^T * B) // Cij = \sum_k Aik Bjk, (A * B^T) // Cij = \sum_k Aki Bjk, (A^T * B^T) // // This code computes one {i, j} block sequentially along the k dimension, // and computes all of the {i, j} blocks concurrently. This // partitioning allows Cij to be updated in-place without race-conditions. // Instead of launching a goroutine for each possible concurrent computation, // a number of worker goroutines are created and channels are used to pass // available and completed cases. // // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix // multiplies, though this code does not copy matrices to attempt to eliminate // cache misses. aTrans := tA == blas.Trans bTrans := tB == blas.Trans maxKLen, parBlocks := computeNumBlocks(a, b, aTrans, bTrans) if parBlocks < minParBlock { // The matrix multiplication is small in the dimensions where it can be // computed concurrently. Just do it in serial. dgemmSerial(tA, tB, a, b, c, alpha) return } nWorkers := runtime.GOMAXPROCS(0) if parBlocks < nWorkers { nWorkers = parBlocks } // There is a tradeoff between the workers having to wait for work // and a large buffer making operations slow. buf := buffMul * nWorkers if buf > parBlocks { buf = parBlocks } sendChan := make(chan subMul, buf) // Launch workers. A worker receives an {i, j} submatrix of c, and computes // A_ik B_ki (or the transposed version) storing the result in c_ij. When the // channel is finally closed, it signals to the waitgroup that it has finished // computing. wg := &sync.WaitGroup{} wg.Add(nWorkers) for i := 0; i < nWorkers; i++ { go func() { defer wg.Done() // Make local copies of otherwise global variables to reduce shared memory. // This has a noticable effect on benchmarks in some cases. alpha := alpha aTrans := aTrans bTrans := bTrans crows := c.rows ccols := c.cols for sub := range sendChan { i := sub.i j := sub.j leni := blockSize if i+leni > crows { leni = crows - i } lenj := blockSize if j+lenj > ccols { lenj = ccols - j } cSub := c.view(i, j, leni, lenj) // Compute A_ik B_kj for all k for k := 0; k < maxKLen; k += blockSize { lenk := blockSize if k+lenk > maxKLen { lenk = maxKLen - k } var aSub, bSub general if aTrans { aSub = a.view(k, i, lenk, leni) } else { aSub = a.view(i, k, leni, lenk) } if bTrans { bSub = b.view(j, k, lenj, lenk) } else { bSub = b.view(k, j, lenk, lenj) } dgemmSerial(tA, tB, aSub, bSub, cSub, alpha) } } }() } // Send out all of the {i, j} subblocks for computation. for i := 0; i < c.rows; i += blockSize { for j := 0; j < c.cols; j += blockSize { sendChan <- subMul{ i: i, j: j, } } } close(sendChan) wg.Wait() return } type subMul struct { i, j int // index of block } // computeNumBlocks says how many blocks there are to compute. maxKLen says the length of the // k dimension, parBlocks is the number of blocks that could be computed in parallel // (the submatrices in i and j). expect is the full number of blocks that will be computed. func computeNumBlocks(a, b general, aTrans, bTrans bool) (maxKLen, parBlocks int) { aRowBlocks := a.rows / blockSize if a.rows%blockSize != 0 { aRowBlocks++ } aColBlocks := a.cols / blockSize if a.cols%blockSize != 0 { aColBlocks++ } bRowBlocks := b.rows / blockSize if b.rows%blockSize != 0 { bRowBlocks++ } bColBlocks := b.cols / blockSize if b.cols%blockSize != 0 { bColBlocks++ } switch { case !aTrans && !bTrans: // Cij = \sum_k Aik Bki maxKLen = a.cols parBlocks = aRowBlocks * bColBlocks case aTrans && !bTrans: // Cij = \sum_k Aki Bkj maxKLen = a.rows parBlocks = aColBlocks * bColBlocks case !aTrans && bTrans: // Cij = \sum_k Aik Bjk maxKLen = a.cols parBlocks = aRowBlocks * bRowBlocks case aTrans && bTrans: // Cij = \sum_k Aki Bjk maxKLen = a.rows parBlocks = aColBlocks * bRowBlocks } return } // dgemmSerial is serial matrix multiply func dgemmSerial(tA, tB blas.Transpose, a, b, c general, alpha float64) { switch { case tA == blas.NoTrans && tB == blas.NoTrans: dgemmSerialNotNot(a, b, c, alpha) return case tA == blas.Trans && tB == blas.NoTrans: dgemmSerialTransNot(a, b, c, alpha) return case tA == blas.NoTrans && tB == blas.Trans: dgemmSerialNotTrans(a, b, c, alpha) return case tA == blas.Trans && tB == blas.Trans: dgemmSerialTransTrans(a, b, c, alpha) return default: panic("unreachable") } } // dgemmSerial where neither a nor b are transposed func dgemmSerialNotNot(a, b, c general, alpha float64) { if debug { if a.cols != b.rows { panic("inner dimension mismatch") } if a.rows != c.rows { panic("outer dimension mismatch") } if b.cols != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for i := 0; i < a.rows; i++ { ctmp := c.data[i*c.stride : i*c.stride+c.cols] for l, v := range a.data[i*a.stride : i*a.stride+a.cols] { tmp := alpha * v if tmp != 0 { for j, w := range b.data[l*b.stride : l*b.stride+b.cols] { ctmp[j] += tmp * w } } } } } // dgemmSerial where neither a is transposed and b is not func dgemmSerialTransNot(a, b, c general, alpha float64) { if debug { if a.rows != b.rows { fmt.Println(a.rows, b.rows) panic("inner dimension mismatch") } if a.cols != c.rows { panic("outer dimension mismatch") } if b.cols != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for l := 0; l < a.rows; l++ { btmp := b.data[l*b.stride : l*b.stride+b.cols] for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { tmp := alpha * v ctmp := c.data[i*c.stride : i*c.stride+c.cols] if tmp != 0 { for j, w := range btmp { ctmp[j] += tmp * w } } } } } // dgemmSerial where neither a is not transposed and b is func dgemmSerialNotTrans(a, b, c general, alpha float64) { if debug { if a.cols != b.cols { panic("inner dimension mismatch") } if a.rows != c.rows { panic("outer dimension mismatch") } if b.rows != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for i := 0; i < a.rows; i++ { atmp := a.data[i*a.stride : i*a.stride+a.cols] ctmp := c.data[i*c.stride : i*c.stride+c.cols] for j := 0; j < b.rows; j++ { var tmp float64 for l, v := range b.data[j*b.stride : j*b.stride+b.cols] { tmp += atmp[l] * v } ctmp[j] += alpha * tmp } } } // dgemmSerial where both are transposed func dgemmSerialTransTrans(a, b, c general, alpha float64) { if debug { if a.rows != b.cols { panic("inner dimension mismatch") } if a.cols != c.rows { panic("outer dimension mismatch") } if b.rows != c.cols { panic("outer dimension mismatch") } } // This style is used instead of the literal [i*stride +j]) is used because // approximately 5 times faster as of go 1.3. for l := 0; l < a.rows; l++ { for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { ctmp := c.data[i*c.stride : i*c.stride+c.cols] if v != 0 { tmp := alpha * v for j := 0; j < b.rows; j++ { ctmp[j] += tmp * b.data[j*b.stride+l] } } } } }
// This Source Code Form is subject to the terms of the Mozilla Public // License, version 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. package gofetch import ( "crypto/sha512" "fmt" "io" "net/http" "net/http/httptest" "os" "path" "path/filepath" "testing" "time" "github.com/hooklift/assert" ) func TestFetchWithoutContentLength(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { file, err := os.Open("./fixtures/test") assert.Ok(t, err) assert.Cond(t, file != nil, "Failed loading fixture file") defer file.Close() _, err = io.Copy(w, file) //assert.Ok(t, err) })) defer ts.Close() progress := make(chan ProgressReport) done := make(chan bool) go func() { _, err := Fetch(Config{ URL: ts.URL, DestDir: os.TempDir(), Progress: progress, }) assert.Ok(t, err) done <- true }() var total int64 for p := range progress { total += p.WrittenBytes assert.Equals(t, int64(-1), p.Total) } assert.Equals(t, int64(10485760), total) <-done // Now we can close the test server and let the deferred function to run. } func TestFetchWithContentLength(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { file, err := os.Open("./fixtures/test") assert.Ok(t, err) assert.Cond(t, file != nil, "Failed loading fixture file") defer file.Close() http.ServeContent(w, r, file.Name(), time.Time{}, file) })) defer ts.Close() progress := make(chan ProgressReport) done := make(chan bool) go func() { _, err := Fetch(Config{ URL: ts.URL, DestDir: os.TempDir(), Progress: progress, Concurrency: 50, }) assert.Ok(t, err) done <- true }() var total int64 for p := range progress { //fmt.Printf("%d of %d\n", p.Progress, p.Total) total += p.WrittenBytes } assert.Equals(t, int64(10485760), total) <-done } func TestResume(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { file, err := os.Open("./fixtures/test") assert.Ok(t, err) assert.Cond(t, file != nil, "Failed loading fixture file") defer file.Close() http.ServeContent(w, r, file.Name(), time.Time{}, file) })) defer ts.Close() destDir := os.TempDir() chunksDir := filepath.Join(destDir, path.Base(ts.URL)+".chunks") err := os.MkdirAll(chunksDir, 0760) assert.Ok(t, err) fixtureFile, err := os.Open("./fixtures/test-resume") assert.Ok(t, err) chunkFile, err := os.Create(filepath.Join(chunksDir, "0")) assert.Ok(t, err) _, err = io.Copy(chunkFile, fixtureFile) assert.Ok(t, err) fixtureFile.Close() chunkFile.Close() done := make(chan bool) progress := make(chan ProgressReport) var file *os.File go func() { var err error file, err = Fetch(Config{ URL: ts.URL, DestDir: destDir, Progress: progress, Concurrency: 1, }) assert.Ok(t, err) done <- true }() var total int64 for p := range progress { //fmt.Printf("%d of %d\n", p.Progress, p.Total) total += p.WrittenBytes } // It should only write to disk the remaining bytes assert.Equals(t, int64(10276045), total) <-done // Fetch finished and we can now use the file without causing data races. // Checks that the donwloaded file has the same size as the test fixture fi, err := file.Stat() assert.Ok(t, err) defer file.Close() assert.Equals(t, int64(10485760), fi.Size()) // Checks file integrity hasher := sha512.New() _, err = io.Copy(hasher, file) assert.Ok(t, err) result := fmt.Sprintf("%x", hasher.Sum(nil)) assert.Equals(t, "4ff6e159db38d46a665f26e9f82b98134238c0457cc82727a5258b7184773e4967068cc0eecf3928ecd079f3aea6e22aac024847c6d76c0329c4635c4b6ae327", result) file.Close() } Sends body only for GET requests // This Source Code Form is subject to the terms of the Mozilla Public // License, version 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. package gofetch import ( "crypto/sha512" "fmt" "io" "net/http" "net/http/httptest" "os" "path" "path/filepath" "testing" "time" "github.com/hooklift/assert" ) func TestFetchWithoutContentLength(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { return } file, err := os.Open("./fixtures/test") assert.Ok(t, err) assert.Cond(t, file != nil, "Failed loading fixture file") defer file.Close() _, err = io.Copy(w, file) assert.Ok(t, err) })) defer ts.Close() progress := make(chan ProgressReport) done := make(chan bool) go func() { _, err := Fetch(Config{ URL: ts.URL, DestDir: os.TempDir(), Progress: progress, }) assert.Ok(t, err) done <- true }() var total int64 for p := range progress { total += p.WrittenBytes assert.Equals(t, int64(-1), p.Total) } assert.Equals(t, int64(10485760), total) <-done // Now we can close the test server and let the deferred function to run. } func TestFetchWithContentLength(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { file, err := os.Open("./fixtures/test") assert.Ok(t, err) assert.Cond(t, file != nil, "Failed loading fixture file") defer file.Close() http.ServeContent(w, r, file.Name(), time.Time{}, file) })) defer ts.Close() progress := make(chan ProgressReport) done := make(chan bool) go func() { _, err := Fetch(Config{ URL: ts.URL, DestDir: os.TempDir(), Progress: progress, Concurrency: 50, }) assert.Ok(t, err) done <- true }() var total int64 for p := range progress { //fmt.Printf("%d of %d\n", p.Progress, p.Total) total += p.WrittenBytes } assert.Equals(t, int64(10485760), total) <-done } func TestResume(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { file, err := os.Open("./fixtures/test") assert.Ok(t, err) assert.Cond(t, file != nil, "Failed loading fixture file") defer file.Close() http.ServeContent(w, r, file.Name(), time.Time{}, file) })) defer ts.Close() destDir := os.TempDir() chunksDir := filepath.Join(destDir, path.Base(ts.URL)+".chunks") err := os.MkdirAll(chunksDir, 0760) assert.Ok(t, err) fixtureFile, err := os.Open("./fixtures/test-resume") assert.Ok(t, err) chunkFile, err := os.Create(filepath.Join(chunksDir, "0")) assert.Ok(t, err) _, err = io.Copy(chunkFile, fixtureFile) assert.Ok(t, err) fixtureFile.Close() chunkFile.Close() done := make(chan bool) progress := make(chan ProgressReport) var file *os.File go func() { var err error file, err = Fetch(Config{ URL: ts.URL, DestDir: destDir, Progress: progress, Concurrency: 1, }) assert.Ok(t, err) done <- true }() var total int64 for p := range progress { //fmt.Printf("%d of %d\n", p.Progress, p.Total) total += p.WrittenBytes } // It should only write to disk the remaining bytes assert.Equals(t, int64(10276045), total) <-done // Fetch finished and we can now use the file without causing data races. // Checks that the donwloaded file has the same size as the test fixture fi, err := file.Stat() assert.Ok(t, err) defer file.Close() assert.Equals(t, int64(10485760), fi.Size()) // Checks file integrity hasher := sha512.New() _, err = io.Copy(hasher, file) assert.Ok(t, err) result := fmt.Sprintf("%x", hasher.Sum(nil)) assert.Equals(t, "4ff6e159db38d46a665f26e9f82b98134238c0457cc82727a5258b7184773e4967068cc0eecf3928ecd079f3aea6e22aac024847c6d76c0329c4635c4b6ae327", result) file.Close() }
package main import ( "io/ioutil" "log" "net/http" "path/filepath" "text/template" "github.com/olebedev/go-duktape" ) func readFile(file string) string { js, err := ioutil.ReadFile(file) if err != nil { log.Fatal(err) } return string(js) } func Index(w http.ResponseWriter, r *http.Request) { t, err := template.ParseFiles(filepath.Join("templates", "index.gohtml")) if err != nil { log.Fatal(err) } ctx := duktape.New() // Loading Javascript ctx.EvalString(readFile("static/duktape-polyfill.js")) ctx.EvalString(readFile("static/react.js")) ctx.EvalString(readFile("static/react-dom-server.js")) ctx.EvalString(readFile("static/components.js")) ctx.EvalString(readFile("static/server.js")) // Calling function renderServer ctx.GetGlobalString("renderServer") ctx.PushString("Claudemiro") ctx.Call(1) component := ctx.GetString(-1) t.Execute(w, component) } func main() { http.HandleFunc("/", Index) http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) if err := http.ListenAndServe(":8080", nil); err != nil { log.Fatal(err) } } Added error handling package main import ( "errors" "log" "net/http" "path/filepath" "text/template" "github.com/olebedev/go-duktape" ) func loadJSFile(ctx *duktape.Context, file string) error { ctx.EvalFile(file) result := ctx.GetString(-1) if result != "" { return errors.New(result) } ctx.Pop() return nil } func loadJSFiles(ctx *duktape.Context, files ...string) error { for _, file := range files { err := loadJSFile(ctx, file) if err != nil { return err } } return nil } func renderServer(ctx *duktape.Context, name string) (string, error) { ctx.GetGlobalString("renderServer") if ctx.IsUndefined(-1) { return "", errors.New("Could not find function 'renderServer'") } ctx.PushString(name) ctx.Call(1) result := ctx.GetString(-1) ctx.Pop() return result, nil } func Index(w http.ResponseWriter, r *http.Request) { t, err := template.ParseFiles(filepath.Join("templates", "index.gohtml")) if err != nil { log.Fatal(err) } ctx := duktape.New() err = loadJSFiles(ctx, "static/duktape-polyfill.js", "static/react.js", "static/react-dom-server.js", "static/components.js", "static/server.js", ) if err != nil { log.Fatal(err) } component, err := renderServer(ctx, "Claudemiro") if err != nil { log.Fatal(err) } t.Execute(w, component) } func main() { http.HandleFunc("/", Index) http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) if err := http.ListenAndServe(":8080", nil); err != nil { log.Fatal(err) } }
package openapi_test import ( "testing" openapi "github.com/nasa9084/go-openapi" ) func TestInfoValidate(t *testing.T) { candidates := []candidate{ {"empty", openapi.Info{}, true}, {"withTitle", openapi.Info{Title: "foo"}, true}, {"withTitleAndVersion", openapi.Info{Title: "foo", Version: "1.0"}, false}, {"withInvalidToS", openapi.Info{Title: "foo", TermsOfService: "foobar", Version: "1.0"}, true}, } testValidater(t, candidates) } add test case: Info contains invalid child package openapi_test import ( "testing" openapi "github.com/nasa9084/go-openapi" ) func TestInfoValidate(t *testing.T) { candidates := []candidate{ {"empty", openapi.Info{}, true}, {"withTitle", openapi.Info{Title: "foo"}, true}, {"withTitleAndVersion", openapi.Info{Title: "foo", Version: "1.0"}, false}, {"withInvalidToS", openapi.Info{Title: "foo", TermsOfService: "foobar", Version: "1.0"}, true}, {"withInvalidLicense", openapi.Info{Title: "foo", Version: "1.0", License: &openapi.License{URL: "foobar"}}, true}, } testValidater(t, candidates) }
package main import ( "bitmm/bitfinex" "fmt" "github.com/grd/stat" "log" "math" "os" "os/exec" "path/filepath" "strconv" "time" ) // Trade inputs const ( SYMBOL = "ltcusd" // Instrument to trade MINCHANGE = 0.0025 // Minumum change required to update prices TRADENUM = 25 // Number of trades to use in calculations MINO = 0.5 // Min order size ) var ( api = bitfinex.New(os.Getenv("BITFINEX_KEY"), os.Getenv("BITFINEX_SECRET")) apiErrors = false // Set to true on any error liveOrders = false // Set to true on any order orderTheo = 0.0 // Theo value on which the live orders are based orderPos = 0.0 // Position on which the live orders are based // Fed in as OS args: maxPos float64 // Maximum Position size minEdge float64 // Minimum edge for position entry stdMult float64 // Multiplier for standard deviation exitPercent float64 // Percent of edge for position exit ) func main() { if len(os.Args) < 5 { fmt.Printf("usage: %s <size> <minimum edge> <stdev multiplier> <exit percent edge>\n", filepath.Base(os.Args[0])) os.Exit(1) } fmt.Println("\nInitializing...") // Get maxPos, minEdge, exitPercent from user input getArgs() // Check for input to break loop inputChan := make(chan rune) go checkStdin(inputChan) // Run loop until user input is received runMainLoop(inputChan) } func getArgs() { var err error if maxPos, err = strconv.ParseFloat(os.Args[1], 64); err != nil { log.Fatal(err) } if minEdge, err = strconv.ParseFloat(os.Args[2], 64); err != nil { log.Fatal(err) } if stdMult, err = strconv.ParseFloat(os.Args[3], 64); err != nil { log.Fatal(err) } if exitPercent, err = strconv.ParseFloat(os.Args[4], 64); err != nil { log.Fatal(err) } } // Check for any user input func checkStdin(inputChan chan<- rune) { var ch rune fmt.Scanf("%c", &ch) inputChan <- ch } // Infinite loop func runMainLoop(inputChan <-chan rune) { positionChan := make(chan float64) var ( trades bitfinex.Trades orders bitfinex.Orders start time.Time position float64 theo float64 stdev float64 lastTrade int ) for { // Record time for each iteration start = time.Now() // Cancel orders and exit if anything entered by user select { case <-inputChan: exit() return default: // Continue if nothing on chan } trades = getTrades() if !apiErrors && trades[0].TID != lastTrade { // If new trades go checkPosition(positionChan) // Do calcs on trade data while waiting for position data theo = calculateTheo(trades) stdev = calculateStdev(trades) position = <-positionChan if (math.Abs(theo-orderTheo) >= MINCHANGE || math.Abs(position- orderPos) >= MINO || !liveOrders) && !apiErrors { orders = sendOrders(theo, position, stdev) } } if !apiErrors { printResults(orders, position, stdev, theo, start) // Reset for next iteration lastTrade = trades[0].TID } // Reset for next iteration apiErrors = false } } // Send orders to the exchange func sendOrders(theo, position, stdev float64) bitfinex.Orders { if liveOrders { cancelAll() } // Send new order request to the exchange params := calculateOrderParams(position, theo, stdev) orders, err := api.MultipleNewOrders(params) checkErr(err) if err == nil { liveOrders = true orderTheo = theo orderPos = position } return orders } func calculateOrderParams(position, theo, stdev float64) []bitfinex.OrderParams { var params []bitfinex.OrderParams if math.Abs(position) < MINO { // No position params = []bitfinex.OrderParams{ {SYMBOL, maxPos, theo - math.Max(stdev, minEdge), "bitfinex", "buy", "limit"}, {SYMBOL, maxPos, theo + math.Max(stdev, minEdge), "bitfinex", "sell", "limit"}, } } else if position < (-1*maxPos)+MINO { // Max short postion params = []bitfinex.OrderParams{ {SYMBOL, -1 * position, theo - stdev*exitPercent, "bitfinex", "buy", "limit"}, } } else if position > maxPos-MINO { // Max long postion params = []bitfinex.OrderParams{ {SYMBOL, position, theo + stdev*exitPercent, "bitfinex", "sell", "limit"}, } } else if (-1*maxPos)+MINO <= position && position <= -1*MINO { // Partial short params = []bitfinex.OrderParams{ {SYMBOL, maxPos, theo - math.Max(stdev, minEdge), "bitfinex", "buy", "limit"}, {SYMBOL, -1 * position, theo - stdev*exitPercent, "bitfinex", "buy", "limit"}, {SYMBOL, maxPos + position, theo + math.Max(stdev, minEdge), "bitfinex", "sell", "limit"}, } } else if MINO <= position && position <= maxPos-MINO { // Partial long params = []bitfinex.OrderParams{ {SYMBOL, maxPos - position, theo - math.Max(stdev, minEdge), "bitfinex", "buy", "limit"}, {SYMBOL, position, theo + stdev*exitPercent, "bitfinex", "sell", "limit"}, {SYMBOL, maxPos, theo + math.Max(stdev, minEdge), "bitfinex", "sell", "limit"}, } } return params } func checkPosition(positionChan chan<- float64) { var position float64 posSlice, err := api.ActivePositions() checkErr(err) for _, pos := range posSlice { if pos.Symbol == SYMBOL { position = pos.Amount } } positionChan <- position } // Get trade data func getTrades() bitfinex.Trades { trades, err := api.Trades(SYMBOL, TRADENUM) checkErr(err) return trades } // Calculate a volume and time weighted average of traded prices func calculateTheo(trades bitfinex.Trades) float64 { weightDuration := 60 // number of seconds back for a 50% weight relative to most recent mostRecent := trades[0].Timestamp var weight, timeDivisor, sum, weightTotal float64 for _, trade := range trades { timeDivisor = float64(mostRecent - trade.Timestamp + weightDuration) weight = trade.Amount / timeDivisor sum += trade.Price * weight weightTotal += weight } return sum / weightTotal } func calculateStdev(trades bitfinex.Trades) float64 { x := make(stat.Float64Slice, len(trades)-1) for i := 1; i < len(trades); i++ { x[i-1] = trades[i-1].Price - trades[i].Price } return stdMult * stat.Sd(x) } // Called on any error func checkErr(err error) { if err != nil { cancelAll() apiErrors = true } } // Call on exit func exit() { cancelAll() fmt.Println("\nCancelled all orders.") } // Cancel all orders func cancelAll() { cancelled := false for !cancelled { cancelled, _ = api.CancelAll() } liveOrders = false } // Print results func printResults(orders bitfinex.Orders, position, stdev, theo float64, start time.Time) { clearScreen() fmt.Printf("\nPosition: %.2f\n", position) fmt.Printf("Stdev: %.4f\n", stdev) fmt.Printf("Theo: %.4f\n", theo) fmt.Println("\nActive orders:") for _, order := range orders.Orders { fmt.Printf("%7.2f %s @ %6.4f\n", order.Amount, SYMBOL, order.Price) } fmt.Printf("\n%v processing time...", time.Since(start)) } // Clear the terminal between prints func clearScreen() { c := exec.Command("clear") c.Stdout = os.Stdout c.Run() } add logging package main import ( "bitmm/bitfinex" "fmt" "github.com/grd/stat" "log" "math" "os" "os/exec" "path/filepath" "strconv" "time" ) // Trade inputs const ( SYMBOL = "ltcusd" // Instrument to trade MINCHANGE = 0.0025 // Minumum change required to update prices TRADENUM = 25 // Number of trades to use in calculations MINO = 0.5 // Min order size ) var ( api = bitfinex.New(os.Getenv("BITFINEX_KEY"), os.Getenv("BITFINEX_SECRET")) apiErrors = false // Set to true on any error liveOrders = false // Set to true on any order orderTheo = 0.0 // Theo value on which the live orders are based orderPos = 0.0 // Position on which the live orders are based // Fed in as OS args: maxPos float64 // Maximum Position size minEdge float64 // Minimum edge for position entry stdMult float64 // Multiplier for standard deviation exitPercent float64 // Percent of edge for position exit ) func main() { if len(os.Args) < 5 { fmt.Printf("usage: %s <size> <minimum edge> <stdev multiplier> <exit percent edge>\n", filepath.Base(os.Args[0])) os.Exit(1) } fmt.Println("\nInitializing...") // Get maxPos, minEdge, exitPercent from user input getArgs() // Check for input to break loop inputChan := make(chan rune) go checkStdin(inputChan) // Run loop until user input is received runMainLoop(inputChan) } func getArgs() { var err error if maxPos, err = strconv.ParseFloat(os.Args[1], 64); err != nil { log.Fatal(err) } if minEdge, err = strconv.ParseFloat(os.Args[2], 64); err != nil { log.Fatal(err) } if stdMult, err = strconv.ParseFloat(os.Args[3], 64); err != nil { log.Fatal(err) } if exitPercent, err = strconv.ParseFloat(os.Args[4], 64); err != nil { log.Fatal(err) } } // Check for any user input func checkStdin(inputChan chan<- rune) { var ch rune fmt.Scanf("%c", &ch) inputChan <- ch } // Infinite loop func runMainLoop(inputChan <-chan rune) { positionChan := make(chan float64) var ( trades bitfinex.Trades orders bitfinex.Orders start time.Time position float64 theo float64 stdev float64 lastTrade int ) for { // Record time for each iteration start = time.Now() // Cancel orders and exit if anything entered by user select { case <-inputChan: exit() return default: // Continue if nothing on chan } trades = getTrades() if !apiErrors && trades[0].TID != lastTrade { // If new trades go checkPosition(positionChan) // Do calcs on trade data while waiting for position data theo = calculateTheo(trades) stdev = calculateStdev(trades) position = <-positionChan if (math.Abs(theo-orderTheo) >= MINCHANGE || math.Abs(position- orderPos) >= MINO || !liveOrders) && !apiErrors { orders = sendOrders(theo, position, stdev) } } if !apiErrors { printResults(orders, position, stdev, theo, start) // Reset for next iteration lastTrade = trades[0].TID } // Reset for next iteration apiErrors = false } } // Send orders to the exchange func sendOrders(theo, position, stdev float64) bitfinex.Orders { if liveOrders { cancelAll() } // Send new order request to the exchange params := calculateOrderParams(position, theo, stdev) orders, err := api.MultipleNewOrders(params) checkErr(err) if err == nil { liveOrders = true orderTheo = theo orderPos = position } return orders } func calculateOrderParams(position, theo, stdev float64) []bitfinex.OrderParams { var params []bitfinex.OrderParams if math.Abs(position) < MINO { // No position params = []bitfinex.OrderParams{ {SYMBOL, maxPos, theo - math.Max(stdev, minEdge), "bitfinex", "buy", "limit"}, {SYMBOL, maxPos, theo + math.Max(stdev, minEdge), "bitfinex", "sell", "limit"}, } } else if position < (-1*maxPos)+MINO { // Max short postion params = []bitfinex.OrderParams{ {SYMBOL, -1 * position, theo - stdev*exitPercent, "bitfinex", "buy", "limit"}, } } else if position > maxPos-MINO { // Max long postion params = []bitfinex.OrderParams{ {SYMBOL, position, theo + stdev*exitPercent, "bitfinex", "sell", "limit"}, } } else if (-1*maxPos)+MINO <= position && position <= -1*MINO { // Partial short params = []bitfinex.OrderParams{ {SYMBOL, maxPos, theo - math.Max(stdev, minEdge), "bitfinex", "buy", "limit"}, {SYMBOL, -1 * position, theo - stdev*exitPercent, "bitfinex", "buy", "limit"}, {SYMBOL, maxPos + position, theo + math.Max(stdev, minEdge), "bitfinex", "sell", "limit"}, } } else if MINO <= position && position <= maxPos-MINO { // Partial long params = []bitfinex.OrderParams{ {SYMBOL, maxPos - position, theo - math.Max(stdev, minEdge), "bitfinex", "buy", "limit"}, {SYMBOL, position, theo + stdev*exitPercent, "bitfinex", "sell", "limit"}, {SYMBOL, maxPos, theo + math.Max(stdev, minEdge), "bitfinex", "sell", "limit"}, } } return params } func checkPosition(positionChan chan<- float64) { var position float64 posSlice, err := api.ActivePositions() checkErr(err) for _, pos := range posSlice { if pos.Symbol == SYMBOL { position = pos.Amount } } positionChan <- position } // Get trade data func getTrades() bitfinex.Trades { trades, err := api.Trades(SYMBOL, TRADENUM) checkErr(err) return trades } // Calculate a volume and time weighted average of traded prices func calculateTheo(trades bitfinex.Trades) float64 { weightDuration := 60 // number of seconds back for a 50% weight relative to most recent mostRecent := trades[0].Timestamp var weight, timeDivisor, sum, weightTotal float64 for _, trade := range trades { timeDivisor = float64(mostRecent - trade.Timestamp + weightDuration) weight = trade.Amount / timeDivisor sum += trade.Price * weight weightTotal += weight } return sum / weightTotal } func calculateStdev(trades bitfinex.Trades) float64 { x := make(stat.Float64Slice, len(trades)-1) for i := 1; i < len(trades); i++ { x[i-1] = trades[i-1].Price - trades[i].Price } return stdMult * stat.Sd(x) } // Called on any error func checkErr(err error) { if err != nil { cancelAll() log.Println(err) apiErrors = true } } // Call on exit func exit() { cancelAll() fmt.Println("\nCancelled all orders.") } // Cancel all orders func cancelAll() { cancelled := false for !cancelled { cancelled, _ = api.CancelAll() } liveOrders = false } // Print results func printResults(orders bitfinex.Orders, position, stdev, theo float64, start time.Time) { clearScreen() fmt.Printf("\nPosition: %.2f\n", position) fmt.Printf("Stdev: %.4f\n", stdev) fmt.Printf("Theo: %.4f\n", theo) fmt.Println("\nActive orders:") for _, order := range orders.Orders { fmt.Printf("%7.2f %s @ %6.4f\n", order.Amount, SYMBOL, order.Price) } fmt.Printf("\n%v processing time...", time.Since(start)) } // Clear the terminal between prints func clearScreen() { c := exec.Command("clear") c.Stdout = os.Stdout c.Run() }
package gorillamux import ( "bytes" "encoding/json" "encoding/xml" "fmt" "net/http" "reflect" command "github.com/ungerik/go-command" ) type ResultsWriter interface { WriteResults(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error } type ResultsWriterFunc func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error func (f ResultsWriterFunc) WriteResults(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { return f(args, vars, resultVals, resultErr, writer, request) } func encodeJSON(response interface{}) ([]byte, error) { if PrettyPrint { return json.MarshalIndent(response, "", PrettyPrintIndent) } return json.Marshal(response) } var RespondJSON ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf []byte for _, resultVal := range resultVals { b, err := encodeJSON(resultVal.Interface()) if err != nil { return err } buf = append(buf, b...) } writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.Write(buf) return nil } func encodeXML(response interface{}) ([]byte, error) { if PrettyPrint { return xml.MarshalIndent(response, "", PrettyPrintIndent) } return xml.Marshal(response) } var RespondXML ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf []byte for _, resultVal := range resultVals { b, err := encodeXML(resultVal.Interface()) if err != nil { return err } buf = append(buf, b...) } writer.Header().Set("Content-Type", "application/xml; charset=utf-8") writer.Write(buf) return nil } var RespondPlaintext ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf bytes.Buffer for _, resultVal := range resultVals { fmt.Fprintf(&buf, "%s", resultVal.Interface()) } writer.Header().Add("Content-Type", "text/plain; charset=utf-8") writer.Write(buf.Bytes()) return nil } var RespondHTML ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf bytes.Buffer for _, resultVal := range resultVals { fmt.Fprintf(&buf, "%s", resultVal.Interface()) } writer.Header().Add("Content-Type", "text/html; charset=utf-8") writer.Write(buf.Bytes()) return nil } added ResultsWriter implementations RespondJSONField and RespondNothing package gorillamux import ( "bytes" "encoding/json" "encoding/xml" "fmt" "net/http" "reflect" command "github.com/ungerik/go-command" ) type ResultsWriter interface { WriteResults(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error } type ResultsWriterFunc func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error func (f ResultsWriterFunc) WriteResults(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { return f(args, vars, resultVals, resultErr, writer, request) } func encodeJSON(response interface{}) ([]byte, error) { if PrettyPrint { return json.MarshalIndent(response, "", PrettyPrintIndent) } return json.Marshal(response) } var RespondJSON ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf []byte for _, resultVal := range resultVals { b, err := encodeJSON(resultVal.Interface()) if err != nil { return err } buf = append(buf, b...) } writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.Write(buf) return nil } func RespondJSONField(fieldName string) ResultsWriterFunc { return func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) (err error) { if resultErr != nil { return resultErr } var buf []byte m := make(map[string]interface{}) if len(resultVals) > 0 { m[fieldName] = resultVals[0].Interface() } buf, err = encodeJSON(m) if err != nil { return err } writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.Write(buf) return nil } } func encodeXML(response interface{}) ([]byte, error) { if PrettyPrint { return xml.MarshalIndent(response, "", PrettyPrintIndent) } return xml.Marshal(response) } var RespondXML ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf []byte for _, resultVal := range resultVals { b, err := encodeXML(resultVal.Interface()) if err != nil { return err } buf = append(buf, b...) } writer.Header().Set("Content-Type", "application/xml; charset=utf-8") writer.Write(buf) return nil } var RespondPlaintext ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf bytes.Buffer for _, resultVal := range resultVals { fmt.Fprintf(&buf, "%s", resultVal.Interface()) } writer.Header().Add("Content-Type", "text/plain; charset=utf-8") writer.Write(buf.Bytes()) return nil } var RespondHTML ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { if resultErr != nil { return resultErr } var buf bytes.Buffer for _, resultVal := range resultVals { fmt.Fprintf(&buf, "%s", resultVal.Interface()) } writer.Header().Add("Content-Type", "text/html; charset=utf-8") writer.Write(buf.Bytes()) return nil } var RespondNothing ResultsWriterFunc = func(args command.Args, vars map[string]string, resultVals []reflect.Value, resultErr error, writer http.ResponseWriter, request *http.Request) error { return resultErr }
// Copyright (c) 2012 Guillermo Estrada. All rights reserved. // Use of this source code is governed by a MIT // license that can be found in the LICENSE file. // Package image implements blending mode functions bewteen images. // // The fundamental part of the library is the type BlendFunc, // the function is applied to each pixel where the top layer (src) // overlaps the bottom layer (dst) of both given 'image' interfaces. // // This library provides many of the widely used Blend Functions // to be used either as 'mode' parameter to the Blend() primary // function, or to use individually providing two 'color' interfaces. // You can implement your own blending modes and pass them into the // Blend() function. // // This is the list of the currently implemented Blend Functions: // // Add, Color, Color Burn, Color Dodge, Darken, Darker Color, Difference, // Divide, Exclusion, Hard Light, Hard Mix, Hue, Lighten, Lighter Color, // Linear Burn, Linear Dodge, Linear Light, Luminosity, Multiply, Overlay, // Phoenix, Pin Light, Reflex, Saturation, Screen, Soft Light, Substract, // Vivid Light. // // Check github for more details: // http://github.com/phrozen/blend package blend import ( "image" "image/color" "math" ) // Constants of max and mid values for uint16 for internal use. // This can be changed to make the algorithms use uint8 instead, // but they are kept this way to provide more acurate calculations // and to support all of the color modes in the 'image' package. const ( max = 65535.0 // equals to 0xFFFF uint16 max range of color.Color mid = max / 2.0 ) var ( ADD BlendFunc COLOR BlendFunc COLOR_BURN BlendFunc COLOR_DODGE BlendFunc DARKEN BlendFunc DARKER_COLOR BlendFunc DIFFERENCE BlendFunc DIVIDE BlendFunc EXCLUSION BlendFunc HARD_LIGHT BlendFunc HARD_MIX BlendFunc HUE BlendFunc LIGHTEN BlendFunc LIGHTER_COLOR BlendFunc LINEAR_BURN BlendFunc LINEAR_DODGE BlendFunc LINEAR_LIGHT BlendFunc LUMINOSITY BlendFunc MULTIPLY BlendFunc OVERLAY BlendFunc PHOENIX BlendFunc PIN_LIGHT BlendFunc REFLEX BlendFunc SATURATION BlendFunc SCREEN BlendFunc SOFT_LIGHT BlendFunc SUBSTRACT BlendFunc VIVID_LIGHT BlendFunc ) // Blends src image (top layer) into dst image (bottom layer) using // the BlendFunc provided by mode. BlendFunc is applied to each pixel // where the src image overlaps the dst image and returns the resulting // image or an error in case of failure. func Blend(src, dst image.Image, mode BlendFunc) (image.Image, error) { // Color model check. Needs more testing to see if there is no problem // using the interfaces, to blend images with different color models. if src.ColorModel() != dst.ColorModel() { return nil, BlendError{"Top layer(src) and bot layer(dst) have different color models."} } // Boundary check to see if we can blend all pixels in the top layer // into the bottom layer. Later and intersection will be used. if !src.Bounds().In(dst.Bounds()) { return nil, BlendError{"Top layer(src) does not fit into bottom layer(dst)."} } // Create a new RGBA or RGBA64 image to return the values. img := image.NewRGBA(dst.Bounds()) for x := 0; x < dst.Bounds().Dx(); x++ { for y := 0; y < dst.Bounds().Dy(); y++ { // If src is inside dst, we blend both pixels if p := image.Pt(x, y); p.In(src.Bounds()) { img.Set(x, y, mode(src.At(x, y), dst.At(x, y))) } else { // else we copy dst pixel. img.Set(x, y, dst.At(x, y)) } } } return img, nil } type BlendFunc func(src, dst color.Color) color.Color func blend_per_channel(src, dst color.Color, bf func(float64, float64) float64) color.Color { s, d := color2rgbaf64(src), color2rgbaf64(dst) return rgbaf64{bf(s.r, d.r), bf(s.g, d.g), bf(s.b, d.b), d.a} } // Blending modes supported by Photoshop in order. /*-------------------------------------------------------*/ // DARKEN func darken(src, dst color.Color) color.Color { return blend_per_channel(src, dst, darken_per_ch) } func darken_per_ch(s, d float64) float64 { return math.Min(s, d) } // MULTIPLY func multiply(src, dst color.Color) color.Color { return blend_per_channel(src, dst, multiply_per_ch) } func multiply_per_ch(s, d float64) float64 { return s * d / max } // COLOR BURN func color_burn(src, dst color.Color) color.Color { return blend_per_channel(src, dst, color_burn_per_ch) } func color_burn_per_ch(s, d float64) float64 { if s == 0.0 { return s } return math.Max(0.0, max-((max-d)*max/s)) } // LINEAR BURN func linear_burn(src, dst color.Color) color.Color { return blend_per_channel(src, dst, linear_burn_per_ch) } func linear_burn_per_ch(s, d float64) float64 { if (s + d) < max { return 0.0 } return s + d - max } // DARKER COLOR func darker_color(src, dst color.Color) color.Color { s, d := color2rgbaf64(src), color2rgbaf64(dst) if s.r+s.g+s.b > d.r+d.g+d.b { return dst } return src } /*-------------------------------------------------------*/ // LIGHTEN func lighten(src, dst color.Color) color.Color { return blend_per_channel(src, dst, lighten_per_ch) } func lighten_per_ch(s, d float64) float64 { return math.Max(s, d) } // SCREEN func screen(src, dst color.Color) color.Color { return blend_per_channel(src, dst, screen_per_ch) } func screen_per_ch(s, d float64) float64 { return s + d - s*d/max } // COLOR DODGE func color_dodge(src, dst color.Color) color.Color { return blend_per_channel(src, dst, color_dodge_per_ch) } func color_dodge_per_ch(s, d float64) float64 { if s == max { return s } return math.Min(max, (d * max / (max - s))) } // LINEAR DODGE func linear_dodge(src, dst color.Color) color.Color { return blend_per_channel(src, dst, linear_dodge_per_ch) } func linear_dodge_per_ch(s, d float64) float64 { return math.Min(s+d, max) } // LIGHTER COLOR func lighter_color(src, dst color.Color) color.Color { s, d := color2rgbaf64(src), color2rgbaf64(dst) if s.r+s.g+s.b > d.r+d.g+d.b { return src } return dst } /*-------------------------------------------------------*/ // OVERLAY func overlay(src, dst color.Color) color.Color { return blend_per_channel(src, dst, overlay_per_ch) } func overlay_per_ch(s, d float64) float64 { if d < mid { return 2 * s * d / max } return max - 2*(max-s)*(max-d)/max } // SOFT LIGHT func soft_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, soft_light_per_ch) } func soft_light_per_ch(s, d float64) float64 { return (d / max) * (d + (2*s/max)*(max-d)) } // HARD LIGHT func hard_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, hard_light_per_ch) } func hard_light_per_ch(s, d float64) float64 { if s > mid { return d + (max-d)*((s-mid)/mid) } return d * s / mid } // VIVID LIGHT (check) func vivid_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, vivid_light_per_ch) } func vivid_light_per_ch(s, d float64) float64 { if s < mid { return color_burn_per_ch(d, (2 * s)) } return color_dodge_per_ch(d, (2 * (s - mid))) } // LINEAR LIGHT func linear_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, linear_light_per_ch) } func linear_light_per_ch(s, d float64) float64 { if s < mid { return linear_burn_per_ch(d, (2 * s)) } return linear_dodge_per_ch(d, (2 * (s - mid))) } // PIN LIGHT func pin_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, pin_light_per_ch) } func pin_light_per_ch(s, d float64) float64 { if s < mid { return darken_per_ch(d, (2 * s)) } return lighten_per_ch(d, (2 * (s - mid))) } // HARD MIX (check) func hard_mix(src, dst color.Color) color.Color { return blend_per_channel(src, dst, hard_mix_per_ch) } func hard_mix_per_ch(s, d float64) float64 { if vivid_light_per_ch(s, d) < mid { return 0.0 } return max } /*-------------------------------------------------------*/ // DIFFERENCE func difference(src, dst color.Color) color.Color { return blend_per_channel(src, dst, difference_per_ch) } func difference_per_ch(s, d float64) float64 { return math.Abs(s - d) } // EXCLUSION func exclusion(src, dst color.Color) color.Color { return blend_per_channel(src, dst, exclusion_per_ch) } func exclusion_per_ch(s, d float64) float64 { return s + d - s*d/mid } // SUBSTRACT func substract(src, dst color.Color) color.Color { return blend_per_channel(src, dst, substract_per_ch) } func substract_per_ch(s, d float64) float64 { if d-s < 0.0 { return 0.0 } return d - s } // DIVIDE func divide(src, dst color.Color) color.Color { return blend_per_channel(src, dst, divide_per_ch) } func divide_per_ch(s, d float64) float64 { return (d*max)/s + 1.0 } // Blending modes that use HSL color model transformations. /*-------------------------------------------------------*/ // HUE func hue(src, dst color.Color) color.Color { s := rgb2hsl(src) if s.s == 0.0 { return dst } d := rgb2hsl(dst) return hsl2rgb(s.h, d.s, d.l) } // SATURATION func saturation(src, dst color.Color) color.Color { s := rgb2hsl(src) d := rgb2hsl(dst) return hsl2rgb(d.h, s.s, d.l) } // COLOR "added _ to avoid namespace conflict with 'color' package" func color_(src, dst color.Color) color.Color { s := rgb2hsl(src) d := rgb2hsl(dst) return hsl2rgb(s.h, s.s, d.l) } // LUMINOSITY func luminosity(src, dst color.Color) color.Color { s := rgb2hsl(src) d := rgb2hsl(dst) return hsl2rgb(d.h, d.s, s.l) } // This blending modes are not implemented in Photoshop // or GIMP at the moment, but produced their desired results. /*-------------------------------------------------------*/ // ADD func add(src, dst color.Color) color.Color { return blend_per_channel(src, dst, add_per_ch) } func add_per_ch(s, d float64) float64 { if s+d > max { return max } return s + d } // REFLEX (a.k.a GLOW) func reflex(src, dst color.Color) color.Color { return blend_per_channel(src, dst, reflex_per_ch) } func reflex_per_ch(s, d float64) float64 { if s == max { return s } return math.Min(max, (d * d / (max - s))) } // PHOENIX func phoenix(src, dst color.Color) color.Color { return blend_per_channel(src, dst, phoenix_per_ch) } func phoenix_per_ch(s, d float64) float64 { return math.Min(s, d) - math.Max(s, d) + max } // Init function maps the blendingmode functions. func init() { DARKEN = darken MULTIPLY = multiply COLOR_BURN = color_burn LINEAR_BURN = linear_burn DARKER_COLOR = darker_color LIGHTEN = lighten SCREEN = screen COLOR_DODGE = color_dodge LINEAR_DODGE = linear_dodge LIGHTER_COLOR = lighter_color OVERLAY = overlay SOFT_LIGHT = soft_light HARD_LIGHT = hard_light VIVID_LIGHT = vivid_light LINEAR_LIGHT = linear_light PIN_LIGHT = pin_light HARD_MIX = hard_mix DIFFERENCE = difference EXCLUSION = exclusion SUBSTRACT = substract DIVIDE = divide HUE = hue SATURATION = saturation COLOR = color_ LUMINOSITY = luminosity ADD = add REFLEX = reflex PHOENIX = phoenix } Modified code documentation. // Copyright (c) 2012 Guillermo Estrada. All rights reserved. // Use of this source code is governed by a MIT // license that can be found in the LICENSE file. // Package image implements blending mode functions bewteen images. // // The fundamental part of the library is the type BlendFunc, // the function is applied to each pixel where the top layer (src) // overlaps the bottom layer (dst) of both given 'image' interfaces. // // This library provides many of the widely used blending functions // to be used either as 'mode' parameter to the Blend() primary // function, or to be used individually providing two 'color' interfaces. // You can implement your own blending modes and pass them into the // Blend() function. // // This is the list of the currently implemented blending modes: // // Add, Color, Color Burn, Color Dodge, Darken, Darker Color, Difference, // Divide, Exclusion, Hard Light, Hard Mix, Hue, Lighten, Lighter Color, // Linear Burn, Linear Dodge, Linear Light, Luminosity, Multiply, Overlay, // Phoenix, Pin Light, Reflex, Saturation, Screen, Soft Light, Substract, // Vivid Light. // // Check github for more details: // http://github.com/phrozen/blend package blend import ( "image" "image/color" "math" ) // Constants of max and mid values for uint16 for internal use. // This can be changed to make the algorithms use uint8 instead, // but they are kept this way to provide more acurate calculations // and to support all of the color modes in the 'image' package. const ( max = 65535.0 // equals to 0xFFFF uint16 max range of color.Color mid = max / 2.0 ) var ( ADD BlendFunc COLOR BlendFunc COLOR_BURN BlendFunc COLOR_DODGE BlendFunc DARKEN BlendFunc DARKER_COLOR BlendFunc DIFFERENCE BlendFunc DIVIDE BlendFunc EXCLUSION BlendFunc HARD_LIGHT BlendFunc HARD_MIX BlendFunc HUE BlendFunc LIGHTEN BlendFunc LIGHTER_COLOR BlendFunc LINEAR_BURN BlendFunc LINEAR_DODGE BlendFunc LINEAR_LIGHT BlendFunc LUMINOSITY BlendFunc MULTIPLY BlendFunc OVERLAY BlendFunc PHOENIX BlendFunc PIN_LIGHT BlendFunc REFLEX BlendFunc SATURATION BlendFunc SCREEN BlendFunc SOFT_LIGHT BlendFunc SUBSTRACT BlendFunc VIVID_LIGHT BlendFunc ) // Blends src image (top layer) into dst image (bottom layer) using // the BlendFunc provided by mode. BlendFunc is applied to each pixel // where the src image overlaps the dst image and returns the resulting // image or an error in case of failure. func Blend(src, dst image.Image, mode BlendFunc) (image.Image, error) { // Color model check. Needs more testing to see if there is no problem // using the interfaces, to blend images with different color models. if src.ColorModel() != dst.ColorModel() { return nil, BlendError{"Top layer(src) and bot layer(dst) have different color models."} } // Boundary check to see if we can blend all pixels in the top layer // into the bottom layer. Later and intersection will be used. if !src.Bounds().In(dst.Bounds()) { return nil, BlendError{"Top layer(src) does not fit into bottom layer(dst)."} } // Create a new RGBA or RGBA64 image to return the values. img := image.NewRGBA(dst.Bounds()) for x := 0; x < dst.Bounds().Dx(); x++ { for y := 0; y < dst.Bounds().Dy(); y++ { // If src is inside dst, we blend both pixels if p := image.Pt(x, y); p.In(src.Bounds()) { img.Set(x, y, mode(src.At(x, y), dst.At(x, y))) } else { // else we copy dst pixel. img.Set(x, y, dst.At(x, y)) } } } return img, nil } type BlendFunc func(src, dst color.Color) color.Color func blend_per_channel(src, dst color.Color, bf func(float64, float64) float64) color.Color { s, d := color2rgbaf64(src), color2rgbaf64(dst) return rgbaf64{bf(s.r, d.r), bf(s.g, d.g), bf(s.b, d.b), d.a} } // Blending modes supported by Photoshop in order. /*-------------------------------------------------------*/ // DARKEN func darken(src, dst color.Color) color.Color { return blend_per_channel(src, dst, darken_per_ch) } func darken_per_ch(s, d float64) float64 { return math.Min(s, d) } // MULTIPLY func multiply(src, dst color.Color) color.Color { return blend_per_channel(src, dst, multiply_per_ch) } func multiply_per_ch(s, d float64) float64 { return s * d / max } // COLOR BURN func color_burn(src, dst color.Color) color.Color { return blend_per_channel(src, dst, color_burn_per_ch) } func color_burn_per_ch(s, d float64) float64 { if s == 0.0 { return s } return math.Max(0.0, max-((max-d)*max/s)) } // LINEAR BURN func linear_burn(src, dst color.Color) color.Color { return blend_per_channel(src, dst, linear_burn_per_ch) } func linear_burn_per_ch(s, d float64) float64 { if (s + d) < max { return 0.0 } return s + d - max } // DARKER COLOR func darker_color(src, dst color.Color) color.Color { s, d := color2rgbaf64(src), color2rgbaf64(dst) if s.r+s.g+s.b > d.r+d.g+d.b { return dst } return src } /*-------------------------------------------------------*/ // LIGHTEN func lighten(src, dst color.Color) color.Color { return blend_per_channel(src, dst, lighten_per_ch) } func lighten_per_ch(s, d float64) float64 { return math.Max(s, d) } // SCREEN func screen(src, dst color.Color) color.Color { return blend_per_channel(src, dst, screen_per_ch) } func screen_per_ch(s, d float64) float64 { return s + d - s*d/max } // COLOR DODGE func color_dodge(src, dst color.Color) color.Color { return blend_per_channel(src, dst, color_dodge_per_ch) } func color_dodge_per_ch(s, d float64) float64 { if s == max { return s } return math.Min(max, (d * max / (max - s))) } // LINEAR DODGE func linear_dodge(src, dst color.Color) color.Color { return blend_per_channel(src, dst, linear_dodge_per_ch) } func linear_dodge_per_ch(s, d float64) float64 { return math.Min(s+d, max) } // LIGHTER COLOR func lighter_color(src, dst color.Color) color.Color { s, d := color2rgbaf64(src), color2rgbaf64(dst) if s.r+s.g+s.b > d.r+d.g+d.b { return src } return dst } /*-------------------------------------------------------*/ // OVERLAY func overlay(src, dst color.Color) color.Color { return blend_per_channel(src, dst, overlay_per_ch) } func overlay_per_ch(s, d float64) float64 { if d < mid { return 2 * s * d / max } return max - 2*(max-s)*(max-d)/max } // SOFT LIGHT func soft_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, soft_light_per_ch) } func soft_light_per_ch(s, d float64) float64 { return (d / max) * (d + (2*s/max)*(max-d)) } // HARD LIGHT func hard_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, hard_light_per_ch) } func hard_light_per_ch(s, d float64) float64 { if s > mid { return d + (max-d)*((s-mid)/mid) } return d * s / mid } // VIVID LIGHT (check) func vivid_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, vivid_light_per_ch) } func vivid_light_per_ch(s, d float64) float64 { if s < mid { return color_burn_per_ch(d, (2 * s)) } return color_dodge_per_ch(d, (2 * (s - mid))) } // LINEAR LIGHT func linear_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, linear_light_per_ch) } func linear_light_per_ch(s, d float64) float64 { if s < mid { return linear_burn_per_ch(d, (2 * s)) } return linear_dodge_per_ch(d, (2 * (s - mid))) } // PIN LIGHT func pin_light(src, dst color.Color) color.Color { return blend_per_channel(src, dst, pin_light_per_ch) } func pin_light_per_ch(s, d float64) float64 { if s < mid { return darken_per_ch(d, (2 * s)) } return lighten_per_ch(d, (2 * (s - mid))) } // HARD MIX (check) func hard_mix(src, dst color.Color) color.Color { return blend_per_channel(src, dst, hard_mix_per_ch) } func hard_mix_per_ch(s, d float64) float64 { if vivid_light_per_ch(s, d) < mid { return 0.0 } return max } /*-------------------------------------------------------*/ // DIFFERENCE func difference(src, dst color.Color) color.Color { return blend_per_channel(src, dst, difference_per_ch) } func difference_per_ch(s, d float64) float64 { return math.Abs(s - d) } // EXCLUSION func exclusion(src, dst color.Color) color.Color { return blend_per_channel(src, dst, exclusion_per_ch) } func exclusion_per_ch(s, d float64) float64 { return s + d - s*d/mid } // SUBSTRACT func substract(src, dst color.Color) color.Color { return blend_per_channel(src, dst, substract_per_ch) } func substract_per_ch(s, d float64) float64 { if d-s < 0.0 { return 0.0 } return d - s } // DIVIDE func divide(src, dst color.Color) color.Color { return blend_per_channel(src, dst, divide_per_ch) } func divide_per_ch(s, d float64) float64 { return (d*max)/s + 1.0 } // Blending modes that use HSL color model transformations. /*-------------------------------------------------------*/ // HUE func hue(src, dst color.Color) color.Color { s := rgb2hsl(src) if s.s == 0.0 { return dst } d := rgb2hsl(dst) return hsl2rgb(s.h, d.s, d.l) } // SATURATION func saturation(src, dst color.Color) color.Color { s := rgb2hsl(src) d := rgb2hsl(dst) return hsl2rgb(d.h, s.s, d.l) } // COLOR "added _ to avoid namespace conflict with 'color' package" func color_(src, dst color.Color) color.Color { s := rgb2hsl(src) d := rgb2hsl(dst) return hsl2rgb(s.h, s.s, d.l) } // LUMINOSITY func luminosity(src, dst color.Color) color.Color { s := rgb2hsl(src) d := rgb2hsl(dst) return hsl2rgb(d.h, d.s, s.l) } // This blending modes are not implemented in Photoshop // or GIMP at the moment, but produced their desired results. /*-------------------------------------------------------*/ // ADD func add(src, dst color.Color) color.Color { return blend_per_channel(src, dst, add_per_ch) } func add_per_ch(s, d float64) float64 { if s+d > max { return max } return s + d } // REFLEX (a.k.a GLOW) func reflex(src, dst color.Color) color.Color { return blend_per_channel(src, dst, reflex_per_ch) } func reflex_per_ch(s, d float64) float64 { if s == max { return s } return math.Min(max, (d * d / (max - s))) } // PHOENIX func phoenix(src, dst color.Color) color.Color { return blend_per_channel(src, dst, phoenix_per_ch) } func phoenix_per_ch(s, d float64) float64 { return math.Min(s, d) - math.Max(s, d) + max } // Init function maps the blendingmode functions. func init() { DARKEN = darken MULTIPLY = multiply COLOR_BURN = color_burn LINEAR_BURN = linear_burn DARKER_COLOR = darker_color LIGHTEN = lighten SCREEN = screen COLOR_DODGE = color_dodge LINEAR_DODGE = linear_dodge LIGHTER_COLOR = lighter_color OVERLAY = overlay SOFT_LIGHT = soft_light HARD_LIGHT = hard_light VIVID_LIGHT = vivid_light LINEAR_LIGHT = linear_light PIN_LIGHT = pin_light HARD_MIX = hard_mix DIFFERENCE = difference EXCLUSION = exclusion SUBSTRACT = substract DIVIDE = divide HUE = hue SATURATION = saturation COLOR = color_ LUMINOSITY = luminosity ADD = add REFLEX = reflex PHOENIX = phoenix }
package gui import ( "fmt" "glop/gin" "glop/sprite" "os" "image" "image/draw" _ "image/png" _ "image/jpeg" "gl" "gl/glu" "math" "github.com/arbaal/mathgl" ) func init() { fmt.Printf("") } type staticCellData struct { // Number of AP required to move into this square, move_cost < 0 is impassable move_cost int } type dynamicCellData struct { s *sprite.Sprite } type cell struct { staticCellData dynamicCellData } // TODO: change to float32 everywhere since that's what mathgl uses type Terrain struct { Childless EmbeddedWidget BasicZone NonThinker // Length of the side of block in the source image. block_size int // All events received by the terrain are passed to the handler handler gin.EventHandler // Focus, in map coordinates fx,fy float32 // The viewing angle, 0 means the map is viewed head-on, 90 means the map is viewed // on its edge (i.e. it would not be visible) angle float32 // Zoom factor, 1.0 is standard zoom float32 // The modelview matrix that is sent to opengl. Updated any time focus, zoom, or viewing // angle changes mat mathgl.Mat4 // Inverse of mat imat mathgl.Mat4 // All drawables that will be drawn parallel to the window upright_drawables []sprite.ZDrawable upright_positions []mathgl.Vec3 // All drawables that will be drawn on the surface of the terrain flattened_drawables []sprite.ZDrawable flattened_positions []mathgl.Vec3 // Don't need to keep the image around once it's loaded into texture memory, // only need to keep around the dimensions bg image.Image texture gl.Texture } func (t *Terrain) String() string { return "terrain" } func (t *Terrain) AddUprightDrawable(x,y float32, zd sprite.ZDrawable) { t.upright_drawables = append(t.upright_drawables, zd) t.upright_positions = append(t.upright_positions, mathgl.Vec3{ x, y, 0 }) } // x,y: board coordinates that the drawable should be drawn at. // zd: drawable that will be rendered after the terrain has been rendered, it will be rendered // with the same modelview matrix as the rest of the terrain func (t *Terrain) AddFlattenedDrawable(x,y float32, zd sprite.ZDrawable) { t.flattened_drawables = append(t.flattened_drawables, zd) t.flattened_positions = append(t.flattened_positions, mathgl.Vec3{ x, y, 0 }) } func MakeTerrain(bg_path string, block_size,dx,dy int, angle float32) (*Terrain, os.Error) { var t Terrain t.EmbeddedWidget = &BasicWidget{ CoreWidget : &t } f,err := os.Open(bg_path) if err != nil { return nil, err } defer f.Close() t.bg,_,err = image.Decode(f) if err != nil { return nil, err } t.block_size = block_size t.angle = angle rgba := image.NewRGBA(t.bg.Bounds().Dx(), t.bg.Bounds().Dy()) draw.Draw(rgba, t.bg.Bounds(), t.bg, image.Point{0,0}, draw.Over) gl.Enable(gl.TEXTURE_2D) t.texture = gl.GenTexture() t.texture.Bind(gl.TEXTURE_2D) gl.TexEnvf(gl.TEXTURE_ENV, gl.TEXTURE_ENV_MODE, gl.MODULATE) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT) glu.Build2DMipmaps(gl.TEXTURE_2D, 4, t.bg.Bounds().Dx(), t.bg.Bounds().Dy(), gl.RGBA, rgba.Pix) if err != nil { return nil,err } t.zoom = 1.0 t.makeMat() t.Request_dims.Dx = 100 t.Request_dims.Dy = 100 t.Ex = true t.Ey = true return &t, nil } func (t *Terrain) makeMat() { var m mathgl.Mat4 t.mat.Translation(float32(t.Render_region.Dx/2 + t.Render_region.X), float32(t.Render_region.Dy/2 + t.Render_region.Y), 0) m.RotationZ(45 * math.Pi / 180) t.mat.Multiply(&m) m.RotationAxisAngle(mathgl.Vec3{ X : -1, Y : 1}, -float32(t.angle) * math.Pi / 180) t.mat.Multiply(&m) s := float32(t.zoom) m.Scaling(s, s, s) t.mat.Multiply(&m) // Move the terrain so that (t.fx,t.fy) is at the origin, and hence becomes centered // in the window xoff := (t.fx + 0.5) * float32(t.block_size) yoff := (t.fy + 0.5) * float32(t.block_size) m.Translation(-xoff, -yoff, 0) t.mat.Multiply(&m) t.imat.Assign(&t.mat) t.imat.Inverse() } // Transforms a cursor position in window coordinates to board coordinates. Does not check // to make sure that the values returned represent a valid position on the board. func (t *Terrain) WindowToBoard(wx,wy int) (float32, float32) { mx := float32(wx) my := float32(wy) return t.modelviewToBoard(mx, my) } func (t *Terrain) modelviewToBoard(mx,my float32) (float32,float32) { mz := (my - float32(t.Render_region.Y + t.Render_region.Dy/2)) * float32(math.Tan(float64(t.angle * math.Pi / 180))) v := mathgl.Vec4{ X : mx, Y : my, Z : mz, W : 1 } v.Transform(&t.imat) return v.X / float32(t.block_size), v.Y / float32(t.block_size) } func (t *Terrain) boardToModelview(mx,my float32) (x,y,z float32) { v := mathgl.Vec4{ X : mx * float32(t.block_size), Y : my * float32(t.block_size), W : 1 } v.Transform(&t.mat) x,y,z = v.X, v.Y, v.Z return } func clamp(f,min,max float32) float32 { if f < min { return min } if f > max { return max } return f } // The change in x and y screen coordinates to apply to point on the terrain the is in // focus. These coordinates will be scaled by the current zoom. func (t *Terrain) Move(dx,dy float64) { if dx == 0 && dy == 0 { return } dy /= math.Sin(float64(t.angle) * math.Pi / 180) dx,dy = dy+dx, dy-dx t.fx += float32(dx) / t.zoom t.fy += float32(dy) / t.zoom t.fx = clamp(t.fx, 0, float32(t.bg.Bounds().Dx() / t.block_size)) t.fy = clamp(t.fy, 0, float32(t.bg.Bounds().Dy() / t.block_size)) t.makeMat() } // Changes the current zoom from e^(zoom) to e^(zoom+dz) func (t *Terrain) Zoom(dz float64) { if dz == 0 { return } exp := math.Log(float64(t.zoom)) + dz exp = float64(clamp(float32(exp), -1.25, 1.25)) t.zoom = float32(math.Exp(exp)) t.makeMat() } func (t *Terrain) Draw(region Region) { region.PushClipPlanes() defer region.PopClipPlanes() if t.Render_region.X != region.X || t.Render_region.Y != region.Y || t.Render_region.Dx != region.Dx || t.Render_region.Dy != region.Dy { t.Render_region = region t.makeMat() } gl.MatrixMode(gl.MODELVIEW) gl.PushMatrix() gl.LoadIdentity() gl.MultMatrixf(&t.mat[0]) defer gl.PopMatrix() gl.Disable(gl.DEPTH_TEST) gl.Disable(gl.TEXTURE_2D) gl.PolygonMode(gl.FRONT_AND_BACK, gl.FILL) gl.Color3d(1, 0, 0) gl.Enable(gl.BLEND) gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA) fdx := float32(t.bg.Bounds().Dx()) fdy := float32(t.bg.Bounds().Dy()) // Draw a simple border around the terrain gl.Color4d(1,.3,.3,1) gl.Begin(gl.QUADS) fbs := float32(t.block_size) gl.Vertex2f( -fbs, -fbs) gl.Vertex2f( -fbs, fdy+fbs) gl.Vertex2f(fdx+fbs, fdy+fbs) gl.Vertex2f(fdx+fbs, -fbs) gl.End() gl.Enable(gl.TEXTURE_2D) t.texture.Bind(gl.TEXTURE_2D) gl.Color4d(1.0, 1.0, 1.0, 1.0) gl.Begin(gl.QUADS) gl.TexCoord2f(0, 0) gl.Vertex2f(0, 0) gl.TexCoord2f(0, -1) gl.Vertex2f(0, fdy) gl.TexCoord2f(1, -1) gl.Vertex2f(fdx, fdy) gl.TexCoord2f(1, 0) gl.Vertex2f(fdx, 0) gl.End() gl.Disable(gl.TEXTURE_2D) gl.Color4f(0,0,0, 0.5) gl.Begin(gl.LINES) for i := float32(0); i < float32(t.bg.Bounds().Dx()); i += float32(t.block_size) { gl.Vertex2f(i, 0) gl.Vertex2f(i, float32(t.bg.Bounds().Dy())) } for j := float32(0); j < float32(t.bg.Bounds().Dy()); j += float32(t.block_size) { gl.Vertex2f(0, j) gl.Vertex2f(float32(t.bg.Bounds().Dx()), j) } gl.End() for i := range t.flattened_positions { v := t.flattened_positions[i] t.flattened_drawables[i].Render(v.X, v.Y, 0, float32(t.block_size)) } t.flattened_positions = t.flattened_positions[0:0] t.flattened_drawables = t.flattened_drawables[0:0] for i := range t.upright_positions { vx,vy,vz := t.boardToModelview(t.upright_positions[i].X, t.upright_positions[i].Y) t.upright_positions[i] = mathgl.Vec3{ vx, vy, vz } } sprite.ZSort(t.upright_positions, t.upright_drawables) gl.Disable(gl.TEXTURE_2D) gl.PushMatrix() gl.LoadIdentity() for i := range t.upright_positions { v := t.upright_positions[i] t.upright_drawables[i].Render(v.X, v.Y, v.Z, float32(t.zoom)) } t.upright_positions = t.upright_positions[0:0] t.upright_drawables = t.upright_drawables[0:0] gl.PopMatrix() } func (t *Terrain) SetEventHandler(handler gin.EventHandler) { t.handler = handler } func (t *Terrain) DoRespond(event_group EventGroup) (bool,bool) { if t.handler != nil { t.handler.HandleEventGroup(event_group.EventGroup) } return false,false } No longer keep around terrain texture in normal memory package gui import ( "fmt" "glop/gin" "glop/sprite" "os" "image" "image/draw" _ "image/png" _ "image/jpeg" "gl" "gl/glu" "math" "github.com/arbaal/mathgl" ) func init() { fmt.Printf("") } type staticCellData struct { // Number of AP required to move into this square, move_cost < 0 is impassable move_cost int } type dynamicCellData struct { s *sprite.Sprite } type cell struct { staticCellData dynamicCellData } // TODO: change to float32 everywhere since that's what mathgl uses type Terrain struct { Childless EmbeddedWidget BasicZone NonThinker // Length of the side of block in the source image. block_size int // All events received by the terrain are passed to the handler handler gin.EventHandler // Focus, in map coordinates fx,fy float32 // The viewing angle, 0 means the map is viewed head-on, 90 means the map is viewed // on its edge (i.e. it would not be visible) angle float32 // Zoom factor, 1.0 is standard zoom float32 // The modelview matrix that is sent to opengl. Updated any time focus, zoom, or viewing // angle changes mat mathgl.Mat4 // Inverse of mat imat mathgl.Mat4 // All drawables that will be drawn parallel to the window upright_drawables []sprite.ZDrawable upright_positions []mathgl.Vec3 // All drawables that will be drawn on the surface of the terrain flattened_drawables []sprite.ZDrawable flattened_positions []mathgl.Vec3 // Don't need to keep the image around once it's loaded into texture memory, // only need to keep around the dimensions bg_dims Dims texture gl.Texture } func (t *Terrain) String() string { return "terrain" } func (t *Terrain) AddUprightDrawable(x,y float32, zd sprite.ZDrawable) { t.upright_drawables = append(t.upright_drawables, zd) t.upright_positions = append(t.upright_positions, mathgl.Vec3{ x, y, 0 }) } // x,y: board coordinates that the drawable should be drawn at. // zd: drawable that will be rendered after the terrain has been rendered, it will be rendered // with the same modelview matrix as the rest of the terrain func (t *Terrain) AddFlattenedDrawable(x,y float32, zd sprite.ZDrawable) { t.flattened_drawables = append(t.flattened_drawables, zd) t.flattened_positions = append(t.flattened_positions, mathgl.Vec3{ x, y, 0 }) } func MakeTerrain(bg_path string, block_size,dx,dy int, angle float32) (*Terrain, os.Error) { var t Terrain t.EmbeddedWidget = &BasicWidget{ CoreWidget : &t } f,err := os.Open(bg_path) if err != nil { return nil, err } defer f.Close() bg,_,err := image.Decode(f) if err != nil { return nil, err } t.block_size = block_size t.angle = angle t.bg_dims.Dx = bg.Bounds().Dx() t.bg_dims.Dy = bg.Bounds().Dy() rgba := image.NewRGBA(t.bg_dims.Dx, t.bg_dims.Dy) draw.Draw(rgba, bg.Bounds(), bg, image.Point{0,0}, draw.Over) gl.Enable(gl.TEXTURE_2D) t.texture = gl.GenTexture() t.texture.Bind(gl.TEXTURE_2D) gl.TexEnvf(gl.TEXTURE_ENV, gl.TEXTURE_ENV_MODE, gl.MODULATE) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT) gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT) glu.Build2DMipmaps(gl.TEXTURE_2D, 4, t.bg_dims.Dx, t.bg_dims.Dy, gl.RGBA, rgba.Pix) if err != nil { return nil,err } t.zoom = 1.0 t.makeMat() t.Request_dims.Dx = 100 t.Request_dims.Dy = 100 t.Ex = true t.Ey = true return &t, nil } func (t *Terrain) makeMat() { var m mathgl.Mat4 t.mat.Translation(float32(t.Render_region.Dx/2 + t.Render_region.X), float32(t.Render_region.Dy/2 + t.Render_region.Y), 0) m.RotationZ(45 * math.Pi / 180) t.mat.Multiply(&m) m.RotationAxisAngle(mathgl.Vec3{ X : -1, Y : 1}, -float32(t.angle) * math.Pi / 180) t.mat.Multiply(&m) s := float32(t.zoom) m.Scaling(s, s, s) t.mat.Multiply(&m) // Move the terrain so that (t.fx,t.fy) is at the origin, and hence becomes centered // in the window xoff := (t.fx + 0.5) * float32(t.block_size) yoff := (t.fy + 0.5) * float32(t.block_size) m.Translation(-xoff, -yoff, 0) t.mat.Multiply(&m) t.imat.Assign(&t.mat) t.imat.Inverse() } // Transforms a cursor position in window coordinates to board coordinates. Does not check // to make sure that the values returned represent a valid position on the board. func (t *Terrain) WindowToBoard(wx,wy int) (float32, float32) { mx := float32(wx) my := float32(wy) return t.modelviewToBoard(mx, my) } func (t *Terrain) modelviewToBoard(mx,my float32) (float32,float32) { mz := (my - float32(t.Render_region.Y + t.Render_region.Dy/2)) * float32(math.Tan(float64(t.angle * math.Pi / 180))) v := mathgl.Vec4{ X : mx, Y : my, Z : mz, W : 1 } v.Transform(&t.imat) return v.X / float32(t.block_size), v.Y / float32(t.block_size) } func (t *Terrain) boardToModelview(mx,my float32) (x,y,z float32) { v := mathgl.Vec4{ X : mx * float32(t.block_size), Y : my * float32(t.block_size), W : 1 } v.Transform(&t.mat) x,y,z = v.X, v.Y, v.Z return } func clamp(f,min,max float32) float32 { if f < min { return min } if f > max { return max } return f } // The change in x and y screen coordinates to apply to point on the terrain the is in // focus. These coordinates will be scaled by the current zoom. func (t *Terrain) Move(dx,dy float64) { if dx == 0 && dy == 0 { return } dy /= math.Sin(float64(t.angle) * math.Pi / 180) dx,dy = dy+dx, dy-dx t.fx += float32(dx) / t.zoom t.fy += float32(dy) / t.zoom t.fx = clamp(t.fx, 0, float32(t.bg_dims.Dx / t.block_size)) t.fy = clamp(t.fy, 0, float32(t.bg_dims.Dy / t.block_size)) t.makeMat() } // Changes the current zoom from e^(zoom) to e^(zoom+dz) func (t *Terrain) Zoom(dz float64) { if dz == 0 { return } exp := math.Log(float64(t.zoom)) + dz exp = float64(clamp(float32(exp), -1.25, 1.25)) t.zoom = float32(math.Exp(exp)) t.makeMat() } func (t *Terrain) Draw(region Region) { region.PushClipPlanes() defer region.PopClipPlanes() if t.Render_region.X != region.X || t.Render_region.Y != region.Y || t.Render_region.Dx != region.Dx || t.Render_region.Dy != region.Dy { t.Render_region = region t.makeMat() } gl.MatrixMode(gl.MODELVIEW) gl.PushMatrix() gl.LoadIdentity() gl.MultMatrixf(&t.mat[0]) defer gl.PopMatrix() gl.Disable(gl.DEPTH_TEST) gl.Disable(gl.TEXTURE_2D) gl.PolygonMode(gl.FRONT_AND_BACK, gl.FILL) gl.Color3d(1, 0, 0) gl.Enable(gl.BLEND) gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA) fdx := float32(t.bg_dims.Dx) fdy := float32(t.bg_dims.Dy) // Draw a simple border around the terrain gl.Color4d(1,.3,.3,1) gl.Begin(gl.QUADS) fbs := float32(t.block_size) gl.Vertex2f( -fbs, -fbs) gl.Vertex2f( -fbs, fdy+fbs) gl.Vertex2f(fdx+fbs, fdy+fbs) gl.Vertex2f(fdx+fbs, -fbs) gl.End() gl.Enable(gl.TEXTURE_2D) t.texture.Bind(gl.TEXTURE_2D) gl.Color4d(1.0, 1.0, 1.0, 1.0) gl.Begin(gl.QUADS) gl.TexCoord2f(0, 0) gl.Vertex2f(0, 0) gl.TexCoord2f(0, -1) gl.Vertex2f(0, fdy) gl.TexCoord2f(1, -1) gl.Vertex2f(fdx, fdy) gl.TexCoord2f(1, 0) gl.Vertex2f(fdx, 0) gl.End() gl.Disable(gl.TEXTURE_2D) gl.Color4f(0,0,0, 0.5) gl.Begin(gl.LINES) for i := float32(0); i < float32(t.bg_dims.Dx); i += float32(t.block_size) { gl.Vertex2f(i, 0) gl.Vertex2f(i, float32(t.bg_dims.Dy)) } for j := float32(0); j < float32(t.bg_dims.Dy); j += float32(t.block_size) { gl.Vertex2f(0, j) gl.Vertex2f(float32(t.bg_dims.Dx), j) } gl.End() for i := range t.flattened_positions { v := t.flattened_positions[i] t.flattened_drawables[i].Render(v.X, v.Y, 0, float32(t.block_size)) } t.flattened_positions = t.flattened_positions[0:0] t.flattened_drawables = t.flattened_drawables[0:0] for i := range t.upright_positions { vx,vy,vz := t.boardToModelview(t.upright_positions[i].X, t.upright_positions[i].Y) t.upright_positions[i] = mathgl.Vec3{ vx, vy, vz } } sprite.ZSort(t.upright_positions, t.upright_drawables) gl.Disable(gl.TEXTURE_2D) gl.PushMatrix() gl.LoadIdentity() for i := range t.upright_positions { v := t.upright_positions[i] t.upright_drawables[i].Render(v.X, v.Y, v.Z, float32(t.zoom)) } t.upright_positions = t.upright_positions[0:0] t.upright_drawables = t.upright_drawables[0:0] gl.PopMatrix() } func (t *Terrain) SetEventHandler(handler gin.EventHandler) { t.handler = handler } func (t *Terrain) DoRespond(event_group EventGroup) (bool,bool) { if t.handler != nil { t.handler.HandleEventGroup(event_group.EventGroup) } return false,false }
/****************************************************************************\ `colorize` is a simple package which returns an ascii colorized string version of an input string \****************************************************************************/ package colorize import "testing" func TestColorizeWithStringColor(test *testing.T) { // Define our test cases for the colorize with string color cases cases := []struct { input, color, expected string }{ // Non-standard cases { "Test", "BLACK", "\x1b[30mTest\x1b[0m" }, { "Test", "Black", "\x1b[30mTest\x1b[0m" }, { "Test", "INVALID", "Test" }, { "Test", "", "Test" }, // Typical usage { "Test", "black", "\x1b[30mTest\x1b[0m" }, { "Test", "red", "\x1b[31mTest\x1b[0m" }, { "Test", "green", "\x1b[32mTest\x1b[0m" }, { "Test", "yellow", "\x1b[33mTest\x1b[0m" }, { "Test", "blue", "\x1b[34mTest\x1b[0m" }, { "Test", "magenta", "\x1b[35mTest\x1b[0m" }, { "Test", "cyan", "\x1b[36mTest\x1b[0m" }, { "Test", "white", "\x1b[37mTest\x1b[0m" }, } // Run tests for _, tc := range cases { actual := Colorize(tc.input, tc.color) if actual != tc.expected { test.Errorf("Colorize(%q, %q) == %q, expected %q", tc.input, tc.color, actual, tc.expected) } } } minor prettification for struct decl /****************************************************************************\ `colorize` is a simple package which returns an ascii colorized string version of an input string \****************************************************************************/ package colorize import "testing" func TestColorizeWithStringColor(test *testing.T) { // Define our test cases for the colorize with string color cases cases := [] struct { input, color, expected string }{ // Non-standard cases { "Test", "BLACK", "\x1b[30mTest\x1b[0m" }, { "Test", "Black", "\x1b[30mTest\x1b[0m" }, { "Test", "INVALID", "Test" }, { "Test", "", "Test" }, // Typical usage { "Test", "black", "\x1b[30mTest\x1b[0m" }, { "Test", "red", "\x1b[31mTest\x1b[0m" }, { "Test", "green", "\x1b[32mTest\x1b[0m" }, { "Test", "yellow", "\x1b[33mTest\x1b[0m" }, { "Test", "blue", "\x1b[34mTest\x1b[0m" }, { "Test", "magenta", "\x1b[35mTest\x1b[0m" }, { "Test", "cyan", "\x1b[36mTest\x1b[0m" }, { "Test", "white", "\x1b[37mTest\x1b[0m" }, } // Run tests for _, tc := range cases { actual := Colorize(tc.input, tc.color) if actual != tc.expected { test.Errorf("Colorize(%q, %q) == %q, expected %q", tc.input, tc.color, actual, tc.expected) } } }
package command import ( "fmt" "strconv" "strings" "github.com/dustin/go-humanize" "github.com/hashicorp/nomad/api" ) type StatsCommand struct { Meta } func (f *StatsCommand) Help() string { return "Dispalys stats of an allocation or a task running on a nomad client" } func (f *StatsCommand) Synopsis() string { return "Dispalys stats of an allocation or a task running on a nomad client" } func (f *StatsCommand) Run(args []string) int { var verbose bool flags := f.Meta.FlagSet("fs-list", FlagSetClient) flags.BoolVar(&verbose, "verbose", false, "") flags.Usage = func() { f.Ui.Output(f.Help()) } if err := flags.Parse(args); err != nil { return 1 } args = flags.Args() if len(args) < 1 { f.Ui.Error("allocation id is a required parameter") return 1 } client, err := f.Meta.Client() if err != nil { f.Ui.Error(fmt.Sprintf("Error initializing client: %v", err)) return 1 } var allocID, task string allocID = strings.TrimSpace(args[0]) if len(args) == 2 { task = strings.TrimSpace(args[1]) } // Truncate the id unless full length is requested length := shortId if verbose { length = fullId } // Query the allocation info if len(allocID) == 1 { f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters.")) return 1 } if len(allocID)%2 == 1 { // Identifiers must be of even length, so we strip off the last byte // to provide a consistent user experience. allocID = allocID[:len(allocID)-1] } allocs, _, err := client.Allocations().PrefixList(allocID) if err != nil { f.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err)) return 1 } if len(allocs) == 0 { f.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID)) return 1 } if len(allocs) > 1 { // Format the allocs out := make([]string, len(allocs)+1) out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status" for i, alloc := range allocs { out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s", limit(alloc.ID, length), limit(alloc.EvalID, length), alloc.JobID, alloc.TaskGroup, alloc.DesiredStatus, alloc.ClientStatus, ) } f.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out))) return 0 } // Prefix lookup matched a single allocation alloc, _, err := client.Allocations().Info(allocs[0].ID, nil) if err != nil { f.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err)) return 1 } stats, err := client.Allocations().Stats(alloc, nil) if err != nil { f.Ui.Error(fmt.Sprintf("unable to get stats: %v", err)) return 1 } if task == "" { f.printAllocResourceUsage(alloc, stats) } else { f.printTaskResourceUsage(task, stats) } return 0 } func (f *StatsCommand) printTaskResourceUsage(task string, resourceUsage map[string]*api.TaskResourceUsage) { tu, ok := resourceUsage[task] if !ok { return } memoryStats := tu.ResourceUsage.MemoryStats cpuStats := tu.ResourceUsage.CpuStats f.Ui.Output(fmt.Sprintf("===> Task: %q", task)) f.Ui.Output("Memory Stats") out := make([]string, 2) out[0] = "RSS|Cache|Swap|Max Usage|Kernel Usage|KernelMaxUsage" out[1] = fmt.Sprintf("%v|%v|%v|%v|%v|%v", humanize.Bytes(memoryStats.RSS), humanize.Bytes(memoryStats.Cache), humanize.Bytes(memoryStats.Swap), humanize.Bytes(memoryStats.MaxUsage), humanize.Bytes(memoryStats.KernelUsage), humanize.Bytes(memoryStats.KernelMaxUsage), ) f.Ui.Output(formatList(out)) f.Ui.Output("") f.Ui.Output("CPU Stats") out = make([]string, 2) out[0] = "Percent|Throttled Periods|Throttled Time" percent := strconv.FormatFloat(cpuStats.Percent, 'f', 2, 64) out[1] = fmt.Sprintf("%v|%v|%v", percent, cpuStats.ThrottledPeriods, cpuStats.ThrottledTime) f.Ui.Output(formatList(out)) } func (f *StatsCommand) printAllocResourceUsage(alloc *api.Allocation, resourceUsage map[string]*api.TaskResourceUsage) { f.Ui.Output(fmt.Sprintf("Resource Usage of Tasks running in Allocation %q", alloc.ID)) for task, _ := range alloc.TaskStates { f.printTaskResourceUsage(task, resourceUsage) } } Making task a flag in the stats command package command import ( "fmt" "strconv" "strings" "github.com/dustin/go-humanize" "github.com/hashicorp/nomad/api" ) type StatsCommand struct { Meta } func (f *StatsCommand) Help() string { helpText := ` Usage: nomad node-status [options] <alloc-id> Displays statistics related to resource usage of tasks in an allocation. Use the -task flag to query statistics of an individual task running in an allocation. General Options: ` + generalOptionsUsage() + ` Node Stats Options: -task Display statistics for a specific task in an allocation. ` return strings.TrimSpace(helpText) } func (f *StatsCommand) Synopsis() string { return "Dispalys stats of an allocation or a task running on a nomad client" } func (f *StatsCommand) Run(args []string) int { var verbose bool var task string flags := f.Meta.FlagSet("fs-list", FlagSetClient) flags.BoolVar(&verbose, "verbose", false, "") flags.StringVar(&task, "task", "", "") flags.Usage = func() { f.Ui.Output(f.Help()) } if err := flags.Parse(args); err != nil { return 1 } args = flags.Args() if len(args) < 1 { f.Ui.Error("allocation id is a required parameter") return 1 } client, err := f.Meta.Client() if err != nil { f.Ui.Error(fmt.Sprintf("Error initializing client: %v", err)) return 1 } var allocID string allocID = strings.TrimSpace(args[0]) // Truncate the id unless full length is requested length := shortId if verbose { length = fullId } // Query the allocation info if len(allocID) == 1 { f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters.")) return 1 } if len(allocID)%2 == 1 { // Identifiers must be of even length, so we strip off the last byte // to provide a consistent user experience. allocID = allocID[:len(allocID)-1] } allocs, _, err := client.Allocations().PrefixList(allocID) if err != nil { f.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err)) return 1 } if len(allocs) == 0 { f.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID)) return 1 } if len(allocs) > 1 { // Format the allocs out := make([]string, len(allocs)+1) out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status" for i, alloc := range allocs { out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s", limit(alloc.ID, length), limit(alloc.EvalID, length), alloc.JobID, alloc.TaskGroup, alloc.DesiredStatus, alloc.ClientStatus, ) } f.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out))) return 0 } // Prefix lookup matched a single allocation alloc, _, err := client.Allocations().Info(allocs[0].ID, nil) if err != nil { f.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err)) return 1 } stats, err := client.Allocations().Stats(alloc, nil) if err != nil { f.Ui.Error(fmt.Sprintf("unable to get stats: %v", err)) return 1 } if task == "" { f.printAllocResourceUsage(alloc, stats) } else { f.printTaskResourceUsage(task, stats) } return 0 } func (f *StatsCommand) printTaskResourceUsage(task string, resourceUsage map[string]*api.TaskResourceUsage) { tu, ok := resourceUsage[task] if !ok { return } memoryStats := tu.ResourceUsage.MemoryStats cpuStats := tu.ResourceUsage.CpuStats f.Ui.Output(fmt.Sprintf("===> Task: %q", task)) f.Ui.Output("Memory Stats") out := make([]string, 2) out[0] = "RSS|Cache|Swap|Max Usage|Kernel Usage|KernelMaxUsage" out[1] = fmt.Sprintf("%v|%v|%v|%v|%v|%v", humanize.Bytes(memoryStats.RSS), humanize.Bytes(memoryStats.Cache), humanize.Bytes(memoryStats.Swap), humanize.Bytes(memoryStats.MaxUsage), humanize.Bytes(memoryStats.KernelUsage), humanize.Bytes(memoryStats.KernelMaxUsage), ) f.Ui.Output(formatList(out)) f.Ui.Output("") f.Ui.Output("CPU Stats") out = make([]string, 2) out[0] = "Percent|Throttled Periods|Throttled Time" percent := strconv.FormatFloat(cpuStats.Percent, 'f', 2, 64) out[1] = fmt.Sprintf("%v|%v|%v", percent, cpuStats.ThrottledPeriods, cpuStats.ThrottledTime) f.Ui.Output(formatList(out)) } func (f *StatsCommand) printAllocResourceUsage(alloc *api.Allocation, resourceUsage map[string]*api.TaskResourceUsage) { f.Ui.Output(fmt.Sprintf("Resource Usage of Tasks running in Allocation %q", alloc.ID)) for task, _ := range alloc.TaskStates { f.printTaskResourceUsage(task, resourceUsage) } }
package bloom import ( "github.com/pmylund/go-bitset" "encoding/binary" "fmt" "hash" "hash/fnv" "math" ) type filter struct { m uint32 k uint32 h hash.Hash64 } func (f *filter) bits(data []byte) []uint32 { f.h.Reset() f.h.Write(data) d := f.h.Sum(nil) a := binary.BigEndian.Uint32(d[4:8]) b := binary.BigEndian.Uint32(d[0:4]) is := make([]uint32, f.k) for i := uint32(0); i < f.k; i++ { is[i] = (a + b*i) % f.m } return is } func newFilter(m, k uint32) *filter { return &filter{ m: m, k: k, h: fnv.New64(), } } func estimates(n uint32, p float64) (uint32, uint32) { nf := float64(n) log2 := math.Log(2) m := -1 * nf * math.Log(p) / math.Pow(log2, 2) k := math.Ceil(log2 * m / nf) words := m + 31>>5 if words >= math.MaxInt32 { panic(fmt.Sprintf("A 32-bit bloom filter with n %d and p %f requires a slice of %f 32-bit words, but slices cannot contain more than %d elements. Please use the equivalent 64-bit bloom filter, e.g. New64(), instead.", n, p, words, math.MaxInt32-1)) } return uint32(m), uint32(k) } // A standard bloom filter using the 64-bit FNV-1a hash function. type Filter struct { *filter b *bitset.Bitset32 } // Check whether data was previously added to the filter. Returns true if // yes, with a false positive chance near the ratio specified upon creation // of the filter. The result cannot be falsely negative. func (f *Filter) Test(data []byte) bool { for _, i := range f.bits(data) { if !f.b.Test(i) { return false } } return true } // Add data to the filter. func (f *Filter) Add(data []byte) { for _, i := range f.bits(data) { f.b.Set(i) } } // Resets the filter. func (f *Filter) Reset() { f.b.Reset() } // Create a bloom filter with an expected n number of items, and an acceptable // false positive rate of p, e.g. 0.01. func New(n int, p float64) *Filter { m, k := estimates(uint32(n), p) f := &Filter{ newFilter(m, k), bitset.New32(m), } return f } // A counting bloom filter using the 64-bit FNV-1a hash function. Supports // removing items from the filter. type CountingFilter struct { *filter b []*bitset.Bitset32 } // Checks whether data was previously added to the filter. Returns true if // yes, with a false positive chance near the ratio specified upon creation // of the filter. The result cannot cannot be falsely negative (unless one // has removed an item that wasn't actually added to the filter previously.) func (f *CountingFilter) Test(data []byte) bool { b := f.b[0] for _, v := range f.bits(data) { if !b.Test(v) { return false } } return true } // Adds data to the filter. func (f *CountingFilter) Add(data []byte) { for _, v := range f.bits(data) { done := false for _, ov := range f.b { if !ov.Test(v) { done = true ov.Set(v) break } } if !done { nb := bitset.New32(f.b[0].Len()) f.b = append(f.b, nb) nb.Set(v) } } } // Removes data from the filter. This exact data must have been previously added // to the filter, or future results will be inconsistent. func (f *CountingFilter) Remove(data []byte) { last := len(f.b) - 1 for _, v := range f.bits(data) { for oi := last; oi >= 0; oi-- { ov := f.b[oi] if ov.Test(v) { ov.Clear(v) break } } } } // Resets the filter. func (f *CountingFilter) Reset() { f.b = f.b[:1] f.b[0].Reset() } // Create a counting bloom filter with an expected n number of items, and an // acceptable false positive rate of p. Counting bloom filters support // the removal of items from the filter. func NewCounting(n int, p float64) *CountingFilter { m, k := estimates(uint32(n), p) f := &CountingFilter{ newFilter(m, k), []*bitset.Bitset32{bitset.New32(m)}, } return f } // A layered bloom filter using the 64-bit FNV-1a hash function. type LayeredFilter struct { *filter b []*bitset.Bitset32 } // Checks whether data was previously added to the filter. Returns the number of // the last layer where the data was added, e.g. 1 for the first layer, and a // boolean indicating whether the data was added to the filter at all. The check // has a false positive chance near the ratio specified upon creation of the // filter. The result cannot be falsely negative. func (f *LayeredFilter) Test(data []byte) (int, bool) { is := f.bits(data) for i := len(f.b) - 1; i >= 0; i-- { v := f.b[i] last := len(is) - 1 for oi, ov := range is { if !v.Test(ov) { break } if oi == last { // Every test was positive at this layer return i + 1, true } } } return 0, false } // Adds data to the filter. Returns the number of the layer where the data // was added, e.g. 1 for the first layer. func (f *LayeredFilter) Add(data []byte) int { is := f.bits(data) var ( i int v *bitset.Bitset32 ) for i, v = range f.b { here := false for _, ov := range is { if here { v.Set(ov) } else if !v.Test(ov) { here = true v.Set(ov) } } if here { return i + 1 } } nb := bitset.New32(f.b[0].Len()) f.b = append(f.b, nb) for _, v := range is { nb.Set(v) } return i + 2 } // Resets the filter. func (f *LayeredFilter) Reset() { f.b = f.b[:1] f.b[0].Reset() } // Create a layered bloom filter with an expected n number of items, and an // acceptable false positive rate of p. Layered bloom filters can be used // to keep track of a certain, arbitrary count of items, e.g. to check if some // given data was added to the filter 10 times or less. func NewLayered(n int, p float64) *LayeredFilter { m, k := estimates(uint32(n), p) f := &LayeredFilter{ newFilter(m, k), []*bitset.Bitset32{bitset.New32(m)}, } return f } Also panic if m overflows uint32 package bloom import ( "github.com/pmylund/go-bitset" "encoding/binary" "fmt" "hash" "hash/fnv" "math" ) type filter struct { m uint32 k uint32 h hash.Hash64 } func (f *filter) bits(data []byte) []uint32 { f.h.Reset() f.h.Write(data) d := f.h.Sum(nil) a := binary.BigEndian.Uint32(d[4:8]) b := binary.BigEndian.Uint32(d[0:4]) is := make([]uint32, f.k) for i := uint32(0); i < f.k; i++ { is[i] = (a + b*i) % f.m } return is } func newFilter(m, k uint32) *filter { return &filter{ m: m, k: k, h: fnv.New64(), } } func estimates(n uint32, p float64) (uint32, uint32) { nf := float64(n) log2 := math.Log(2) m := -1 * nf * math.Log(p) / math.Pow(log2, 2) k := math.Ceil(log2 * m / nf) words := m + 31>>5 if words >= math.MaxInt32 { panic(fmt.Sprintf("A 32-bit bloom filter with n %d and p %f requires a 32-bit bitset with a slice of %f words, but slices cannot contain more than %d elements. Please use the equivalent 64-bit bloom filter, e.g. New64(), instead.", n, p, words, math.MaxInt32-1)) } else if m > math.MaxUint32 { panic(fmt.Sprintf("A 32-bit bloom filter with n %d and p %f requires a 32-bit bitset with %d bits, but this number overflows an uint32. Please use the equivalent 64-bit bloom filter, e.g. New64(), instead.", n, p, m)) } return uint32(m), uint32(k) } // A standard bloom filter using the 64-bit FNV-1a hash function. type Filter struct { *filter b *bitset.Bitset32 } // Check whether data was previously added to the filter. Returns true if // yes, with a false positive chance near the ratio specified upon creation // of the filter. The result cannot be falsely negative. func (f *Filter) Test(data []byte) bool { for _, i := range f.bits(data) { if !f.b.Test(i) { return false } } return true } // Add data to the filter. func (f *Filter) Add(data []byte) { for _, i := range f.bits(data) { f.b.Set(i) } } // Resets the filter. func (f *Filter) Reset() { f.b.Reset() } // Create a bloom filter with an expected n number of items, and an acceptable // false positive rate of p, e.g. 0.01. func New(n int, p float64) *Filter { m, k := estimates(uint32(n), p) f := &Filter{ newFilter(m, k), bitset.New32(m), } return f } // A counting bloom filter using the 64-bit FNV-1a hash function. Supports // removing items from the filter. type CountingFilter struct { *filter b []*bitset.Bitset32 } // Checks whether data was previously added to the filter. Returns true if // yes, with a false positive chance near the ratio specified upon creation // of the filter. The result cannot cannot be falsely negative (unless one // has removed an item that wasn't actually added to the filter previously.) func (f *CountingFilter) Test(data []byte) bool { b := f.b[0] for _, v := range f.bits(data) { if !b.Test(v) { return false } } return true } // Adds data to the filter. func (f *CountingFilter) Add(data []byte) { for _, v := range f.bits(data) { done := false for _, ov := range f.b { if !ov.Test(v) { done = true ov.Set(v) break } } if !done { nb := bitset.New32(f.b[0].Len()) f.b = append(f.b, nb) nb.Set(v) } } } // Removes data from the filter. This exact data must have been previously added // to the filter, or future results will be inconsistent. func (f *CountingFilter) Remove(data []byte) { last := len(f.b) - 1 for _, v := range f.bits(data) { for oi := last; oi >= 0; oi-- { ov := f.b[oi] if ov.Test(v) { ov.Clear(v) break } } } } // Resets the filter. func (f *CountingFilter) Reset() { f.b = f.b[:1] f.b[0].Reset() } // Create a counting bloom filter with an expected n number of items, and an // acceptable false positive rate of p. Counting bloom filters support // the removal of items from the filter. func NewCounting(n int, p float64) *CountingFilter { m, k := estimates(uint32(n), p) f := &CountingFilter{ newFilter(m, k), []*bitset.Bitset32{bitset.New32(m)}, } return f } // A layered bloom filter using the 64-bit FNV-1a hash function. type LayeredFilter struct { *filter b []*bitset.Bitset32 } // Checks whether data was previously added to the filter. Returns the number of // the last layer where the data was added, e.g. 1 for the first layer, and a // boolean indicating whether the data was added to the filter at all. The check // has a false positive chance near the ratio specified upon creation of the // filter. The result cannot be falsely negative. func (f *LayeredFilter) Test(data []byte) (int, bool) { is := f.bits(data) for i := len(f.b) - 1; i >= 0; i-- { v := f.b[i] last := len(is) - 1 for oi, ov := range is { if !v.Test(ov) { break } if oi == last { // Every test was positive at this layer return i + 1, true } } } return 0, false } // Adds data to the filter. Returns the number of the layer where the data // was added, e.g. 1 for the first layer. func (f *LayeredFilter) Add(data []byte) int { is := f.bits(data) var ( i int v *bitset.Bitset32 ) for i, v = range f.b { here := false for _, ov := range is { if here { v.Set(ov) } else if !v.Test(ov) { here = true v.Set(ov) } } if here { return i + 1 } } nb := bitset.New32(f.b[0].Len()) f.b = append(f.b, nb) for _, v := range is { nb.Set(v) } return i + 2 } // Resets the filter. func (f *LayeredFilter) Reset() { f.b = f.b[:1] f.b[0].Reset() } // Create a layered bloom filter with an expected n number of items, and an // acceptable false positive rate of p. Layered bloom filters can be used // to keep track of a certain, arbitrary count of items, e.g. to check if some // given data was added to the filter 10 times or less. func NewLayered(n int, p float64) *LayeredFilter { m, k := estimates(uint32(n), p) f := &LayeredFilter{ newFilter(m, k), []*bitset.Bitset32{bitset.New32(m)}, } return f }
package command import ( "strings" pb "gopkg.in/cheggaaa/pb.v1" "github.com/Sirupsen/logrus" "github.com/mitchellh/cli" "github.com/nerdalize/nerd/nerd/client" "github.com/nerdalize/nerd/nerd/client/credentials" "github.com/nerdalize/nerd/nerd/client/credentials/provider" "github.com/nerdalize/nerd/nerd/conf" "github.com/pkg/errors" ) type stdoutkw struct{} //Write writes a key to stdout. func (kw *stdoutkw) Write(k string) (err error) { // _, err = fmt.Fprintf(os.Stdout, "%v\n", k) logrus.Info(k) return nil } //NewClient creates a new NerdAPIClient with two credential providers. func NewClient(ui cli.Ui) (*client.NerdAPIClient, error) { c, err := conf.Read() if err != nil { return nil, errors.Wrap(err, "failed to read config") } key, err := credentials.ParseECDSAPublicKeyFromPemBytes([]byte(c.Auth.PublicKey)) if err != nil { return nil, errors.Wrap(err, "ECDSA Public Key is invalid") } return client.NewNerdAPI(client.NerdAPIConfig{ Credentials: provider.NewChainCredentials( key, provider.NewEnv(), provider.NewConfig(), provider.NewAuthAPI(UserPassProvider(ui), client.NewAuthAPI(c.Auth.APIEndpoint)), ), URL: c.NerdAPIEndpoint, ProjectID: c.CurrentProject, }) } //UserPassProvider prompts the username and password on stdin. func UserPassProvider(ui cli.Ui) func() (string, string, error) { return func() (string, string, error) { ui.Info("Please enter your Nerdalize username and password.") user, err := ui.Ask("Username: ") if err != nil { return "", "", errors.Wrap(err, "failed to read username") } pass, err := ui.AskSecret("Password: ") if err != nil { return "", "", errors.Wrap(err, "failed to read password") } return user, pass, nil } } //ErrorCauser returns the error that is one level up in the error chain. func ErrorCauser(err error) error { type causer interface { Cause() error } if err2, ok := err.(causer); ok { err = err2.Cause() } return err } //printUserFacing will try to get the user facing error message from the error chain and print it. func printUserFacing(err error, verbose bool) { cause := errors.Cause(err) type userFacing interface { UserFacingMsg() string Underlying() error } if uerr, ok := cause.(userFacing); ok { logrus.Info(uerr.UserFacingMsg()) logrus.Debugf("Underlying error: %v", uerr.Underlying()) logrus.Exit(-1) } } //HandleError handles the way errors are presented to the user. func HandleError(err error, verbose bool) { printUserFacing(err, verbose) // when there's are more than 1 message on the message stack, only print the top one for user friendlyness. if errors.Cause(err) != nil { logrus.Info(strings.Replace(err.Error(), ": "+ErrorCauser(ErrorCauser(err)).Error(), "", 1)) } logrus.Debugf("Underlying error: %+v", err) logrus.Exit(-1) } //ProgressBar creates a new CLI progess bar and adds input from the progressCh to the bar. func ProgressBar(total int64, progressCh <-chan int64, doneCh chan<- struct{}) { bar := pb.New64(total).Start() for elem := range progressCh { bar.Add64(elem) } bar.Finish() doneCh <- struct{}{} } Add byte units to progress bar package command import ( "strings" pb "gopkg.in/cheggaaa/pb.v1" "github.com/Sirupsen/logrus" "github.com/mitchellh/cli" "github.com/nerdalize/nerd/nerd/client" "github.com/nerdalize/nerd/nerd/client/credentials" "github.com/nerdalize/nerd/nerd/client/credentials/provider" "github.com/nerdalize/nerd/nerd/conf" "github.com/pkg/errors" ) type stdoutkw struct{} //Write writes a key to stdout. func (kw *stdoutkw) Write(k string) (err error) { // _, err = fmt.Fprintf(os.Stdout, "%v\n", k) logrus.Info(k) return nil } //NewClient creates a new NerdAPIClient with two credential providers. func NewClient(ui cli.Ui) (*client.NerdAPIClient, error) { c, err := conf.Read() if err != nil { return nil, errors.Wrap(err, "failed to read config") } key, err := credentials.ParseECDSAPublicKeyFromPemBytes([]byte(c.Auth.PublicKey)) if err != nil { return nil, errors.Wrap(err, "ECDSA Public Key is invalid") } return client.NewNerdAPI(client.NerdAPIConfig{ Credentials: provider.NewChainCredentials( key, provider.NewEnv(), provider.NewConfig(), provider.NewAuthAPI(UserPassProvider(ui), client.NewAuthAPI(c.Auth.APIEndpoint)), ), URL: c.NerdAPIEndpoint, ProjectID: c.CurrentProject, }) } //UserPassProvider prompts the username and password on stdin. func UserPassProvider(ui cli.Ui) func() (string, string, error) { return func() (string, string, error) { ui.Info("Please enter your Nerdalize username and password.") user, err := ui.Ask("Username: ") if err != nil { return "", "", errors.Wrap(err, "failed to read username") } pass, err := ui.AskSecret("Password: ") if err != nil { return "", "", errors.Wrap(err, "failed to read password") } return user, pass, nil } } //ErrorCauser returns the error that is one level up in the error chain. func ErrorCauser(err error) error { type causer interface { Cause() error } if err2, ok := err.(causer); ok { err = err2.Cause() } return err } //printUserFacing will try to get the user facing error message from the error chain and print it. func printUserFacing(err error, verbose bool) { cause := errors.Cause(err) type userFacing interface { UserFacingMsg() string Underlying() error } if uerr, ok := cause.(userFacing); ok { logrus.Info(uerr.UserFacingMsg()) logrus.Debugf("Underlying error: %v", uerr.Underlying()) logrus.Exit(-1) } } //HandleError handles the way errors are presented to the user. func HandleError(err error, verbose bool) { printUserFacing(err, verbose) // when there's are more than 1 message on the message stack, only print the top one for user friendlyness. if errors.Cause(err) != nil { logrus.Info(strings.Replace(err.Error(), ": "+ErrorCauser(ErrorCauser(err)).Error(), "", 1)) } logrus.Debugf("Underlying error: %+v", err) logrus.Exit(-1) } //ProgressBar creates a new CLI progess bar and adds input from the progressCh to the bar. func ProgressBar(total int64, progressCh <-chan int64, doneCh chan<- struct{}) { bar := pb.New64(total).Start() bar.SetUnits(pb.U_BYTES) for elem := range progressCh { bar.Add64(elem) } bar.Finish() doneCh <- struct{}{} }
package commands import ( "fmt" "os" "reflect" "github.com/github/hub/github" "github.com/github/hub/utils" ) var cmdFork = &Command{ Run: fork, Usage: "fork [--no-remote]", Short: "Make a fork of a remote repository on GitHub and add as remote", Long: `Forks the original project (referenced by "origin" remote) on GitHub and adds a new remote for it under your username. `, } var flagForkNoRemote bool func init() { cmdFork.Flag.BoolVar(&flagForkNoRemote, "no-remote", false, "") CmdRunner.Use(cmdFork) } /* $ gh fork [ repo forked on GitHub ] > git remote add -f YOUR_USER git@github.com:YOUR_USER/CURRENT_REPO.git $ gh fork --no-remote [ repo forked on GitHub ] */ func fork(cmd *Command, args *Args) { localRepo := github.LocalRepo() project, err := localRepo.MainProject() utils.Check(err) configs := github.CurrentConfigs() host, err := configs.PromptForHost(project.Host) if err != nil { utils.Check(github.FormatError("forking repository", err)) } forkProject := github.NewProject(host.User, project.Name, project.Host) client := github.NewClient(project.Host) existingRepo, err := client.Repository(forkProject) if err == nil { var parentURL *github.URL if parent := existingRepo.Parent; parent != nil { parentURL, _ = github.ParseURL(parent.HTMLURL) } if parentURL == nil || !reflect.DeepEqual(parentURL.Project, project) { err = fmt.Errorf("Error creating fork: %s already exists on %s", forkProject, forkProject.Host) utils.Check(err) } } else { if !args.Noop { _, err := client.ForkRepository(project) utils.Check(err) } } if flagForkNoRemote { os.Exit(0) } else { originRemote, _ := localRepo.OriginRemote() originURL := originRemote.URL.String() url := forkProject.GitURL("", "", true) args.Replace("git", "remote", "add", "-f", forkProject.Owner, originURL) args.After("git", "remote", "set-url", forkProject.Owner, url) args.After("echo", fmt.Sprintf("new remote: %s", forkProject.Owner)) } } Fix error message when checking main project in fork package commands import ( "fmt" "os" "reflect" "github.com/github/hub/github" "github.com/github/hub/utils" ) var cmdFork = &Command{ Run: fork, Usage: "fork [--no-remote]", Short: "Make a fork of a remote repository on GitHub and add as remote", Long: `Forks the original project (referenced by "origin" remote) on GitHub and adds a new remote for it under your username. `, } var flagForkNoRemote bool func init() { cmdFork.Flag.BoolVar(&flagForkNoRemote, "no-remote", false, "") CmdRunner.Use(cmdFork) } /* $ gh fork [ repo forked on GitHub ] > git remote add -f YOUR_USER git@github.com:YOUR_USER/CURRENT_REPO.git $ gh fork --no-remote [ repo forked on GitHub ] */ func fork(cmd *Command, args *Args) { localRepo := github.LocalRepo() project, err := localRepo.MainProject() if err != nil { utils.Check(fmt.Errorf("Error: repository under 'origin' remote is not a GitHub project")) } configs := github.CurrentConfigs() host, err := configs.PromptForHost(project.Host) if err != nil { utils.Check(github.FormatError("forking repository", err)) } forkProject := github.NewProject(host.User, project.Name, project.Host) client := github.NewClient(project.Host) existingRepo, err := client.Repository(forkProject) if err == nil { var parentURL *github.URL if parent := existingRepo.Parent; parent != nil { parentURL, _ = github.ParseURL(parent.HTMLURL) } if parentURL == nil || !reflect.DeepEqual(parentURL.Project, project) { err = fmt.Errorf("Error creating fork: %s already exists on %s", forkProject, forkProject.Host) utils.Check(err) } } else { if !args.Noop { _, err := client.ForkRepository(project) utils.Check(err) } } if flagForkNoRemote { os.Exit(0) } else { originRemote, _ := localRepo.OriginRemote() originURL := originRemote.URL.String() url := forkProject.GitURL("", "", true) args.Replace("git", "remote", "add", "-f", forkProject.Owner, originURL) args.After("git", "remote", "set-url", forkProject.Owner, url) args.After("echo", fmt.Sprintf("new remote: %s", forkProject.Owner)) } }
package main_test import ( "database/sql" "flag" "github.com/adrianmacneil/dbmate" "github.com/codegangsta/cli" "github.com/stretchr/testify/require" "io" "net/url" "os" "path/filepath" "testing" ) var stubsDir string func testContext(t *testing.T) *cli.Context { var err error if stubsDir == "" { stubsDir, err = filepath.Abs("./stubs") require.Nil(t, err) } err = os.Chdir(stubsDir) require.Nil(t, err) u := testURL(t) err = os.Setenv("DATABASE_URL", u.String()) require.Nil(t, err) app := main.NewApp() flagset := flag.NewFlagSet(app.Name, flag.ContinueOnError) for _, f := range app.Flags { f.Apply(flagset) } return cli.NewContext(app, flagset, nil) } func testURL(t *testing.T) *url.URL { str := os.Getenv("POSTGRES_PORT") require.NotEmpty(t, str, "missing POSTGRES_PORT environment variable") u, err := url.Parse(str) require.Nil(t, err) u.Scheme = "postgres" u.User = url.User("postgres") u.Path = "/dbmate" u.RawQuery = "sslmode=disable" return u } func mustClose(c io.Closer) { if err := c.Close(); err != nil { panic(err) } } func TestGetDatabaseUrl(t *testing.T) { ctx := testContext(t) err := os.Setenv("DATABASE_URL", "postgres://example.org/db") require.Nil(t, err) u, err := main.GetDatabaseURL(ctx) require.Nil(t, err) require.Equal(t, "postgres", u.Scheme) require.Equal(t, "example.org", u.Host) require.Equal(t, "/db", u.Path) } func TestMigrateCommand(t *testing.T) { ctx := testContext(t) // drop and recreate database err := main.DropCommand(ctx) require.Nil(t, err) err = main.CreateCommand(ctx) require.Nil(t, err) // migrate err = main.MigrateCommand(ctx) require.Nil(t, err) // verify results u := testURL(t) db, err := sql.Open("postgres", u.String()) require.Nil(t, err) defer mustClose(db) count := 0 err = db.QueryRow(`select count(*) from schema_migrations where version = '20151129054053'`).Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) err = db.QueryRow("select count(*) from users").Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) } func TestUpCommand(t *testing.T) { ctx := testContext(t) // drop database err := main.DropCommand(ctx) require.Nil(t, err) // create and migrate err = main.UpCommand(ctx) require.Nil(t, err) // verify results u := testURL(t) db, err := sql.Open("postgres", u.String()) require.Nil(t, err) defer mustClose(db) count := 0 err = db.QueryRow(`select count(*) from schema_migrations where version = '20151129054053'`).Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) err = db.QueryRow("select count(*) from users").Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) } func TestRollbackCommand(t *testing.T) { ctx := testContext(t) // drop, recreate, and migrate database err := main.DropCommand(ctx) require.Nil(t, err) err = main.CreateCommand(ctx) require.Nil(t, err) err = main.MigrateCommand(ctx) require.Nil(t, err) // verify migration u := testURL(t) db, err := sql.Open("postgres", u.String()) require.Nil(t, err) defer mustClose(db) count := 0 err = db.QueryRow(`select count(*) from schema_migrations where version = '20151129054053'`).Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) // rollback err = main.RollbackCommand(ctx) require.Nil(t, err) // verify rollback err = db.QueryRow("select count(*) from schema_migrations").Scan(&count) require.Nil(t, err) require.Equal(t, 0, count) err = db.QueryRow("select count(*) from users").Scan(&count) require.Equal(t, "pq: relation \"users\" does not exist", err.Error()) } Abstract command tests to support multiple databases package main_test import ( "database/sql" "flag" "github.com/adrianmacneil/dbmate" "github.com/codegangsta/cli" "github.com/stretchr/testify/require" "io" "net/url" "os" "path/filepath" "testing" ) var stubsDir string func testContext(t *testing.T, u *url.URL) *cli.Context { var err error if stubsDir == "" { stubsDir, err = filepath.Abs("./stubs") require.Nil(t, err) } err = os.Chdir(stubsDir) require.Nil(t, err) err = os.Setenv("DATABASE_URL", u.String()) require.Nil(t, err) app := main.NewApp() flagset := flag.NewFlagSet(app.Name, flag.ContinueOnError) for _, f := range app.Flags { f.Apply(flagset) } return cli.NewContext(app, flagset, nil) } func postgresTestURL(t *testing.T) *url.URL { str := os.Getenv("POSTGRES_PORT") require.NotEmpty(t, str, "missing POSTGRES_PORT environment variable") u, err := url.Parse(str) require.Nil(t, err) u.Scheme = "postgres" u.User = url.User("postgres") u.Path = "/dbmate" u.RawQuery = "sslmode=disable" return u } func testURLs(t *testing.T) []*url.URL { return []*url.URL{ postgresTestURL(t), } } func mustClose(c io.Closer) { if err := c.Close(); err != nil { panic(err) } } func TestGetDatabaseUrl(t *testing.T) { envURL, err := url.Parse("foo://example.org/db") require.Nil(t, err) ctx := testContext(t, envURL) u, err := main.GetDatabaseURL(ctx) require.Nil(t, err) require.Equal(t, "foo", u.Scheme) require.Equal(t, "example.org", u.Host) require.Equal(t, "/db", u.Path) } func testMigrateCommandURL(t *testing.T, u *url.URL) { ctx := testContext(t, u) // drop and recreate database err := main.DropCommand(ctx) require.Nil(t, err) err = main.CreateCommand(ctx) require.Nil(t, err) // migrate err = main.MigrateCommand(ctx) require.Nil(t, err) // verify results db, err := sql.Open(u.Scheme, u.String()) require.Nil(t, err) defer mustClose(db) count := 0 err = db.QueryRow(`select count(*) from schema_migrations where version = '20151129054053'`).Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) err = db.QueryRow("select count(*) from users").Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) } func TestMigrateCommand(t *testing.T) { for _, u := range testURLs(t) { testMigrateCommandURL(t, u) } } func testUpCommandURL(t *testing.T, u *url.URL) { ctx := testContext(t, u) // drop database err := main.DropCommand(ctx) require.Nil(t, err) // create and migrate err = main.UpCommand(ctx) require.Nil(t, err) // verify results db, err := sql.Open(u.Scheme, u.String()) require.Nil(t, err) defer mustClose(db) count := 0 err = db.QueryRow(`select count(*) from schema_migrations where version = '20151129054053'`).Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) err = db.QueryRow("select count(*) from users").Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) } func TestUpCommand(t *testing.T) { for _, u := range testURLs(t) { testUpCommandURL(t, u) } } func testRollbackCommandURL(t *testing.T, u *url.URL) { ctx := testContext(t, u) // drop, recreate, and migrate database err := main.DropCommand(ctx) require.Nil(t, err) err = main.CreateCommand(ctx) require.Nil(t, err) err = main.MigrateCommand(ctx) require.Nil(t, err) // verify migration db, err := sql.Open(u.Scheme, u.String()) require.Nil(t, err) defer mustClose(db) count := 0 err = db.QueryRow(`select count(*) from schema_migrations where version = '20151129054053'`).Scan(&count) require.Nil(t, err) require.Equal(t, 1, count) // rollback err = main.RollbackCommand(ctx) require.Nil(t, err) // verify rollback err = db.QueryRow("select count(*) from schema_migrations").Scan(&count) require.Nil(t, err) require.Equal(t, 0, count) err = db.QueryRow("select count(*) from users").Scan(&count) require.Equal(t, "pq: relation \"users\" does not exist", err.Error()) } func TestRollbackCommand(t *testing.T) { for _, u := range testURLs(t) { testRollbackCommandURL(t, u) } }
package common import ( "bytes" "encoding/gob" "fmt" "reflect" "strconv" "strings" "github.com/xitongsys/parquet-go/parquet" ) // `parquet:"name=Name, type=FIXED_LEN_BYTE_ARRAY, length=12"` type Tag struct { InName string ExName string Type string KeyType string ValueType string BaseType string KeyBaseType string ValueBaseType string Length int32 KeyLength int32 ValueLength int32 Scale int32 KeyScale int32 ValueScale int32 Precision int32 KeyPrecision int32 ValuePrecision int32 FieldID int32 KeyFieldID int32 ValueFieldID int32 Encoding parquet.Encoding KeyEncoding parquet.Encoding ValueEncoding parquet.Encoding RepetitionType parquet.FieldRepetitionType KeyRepetitionType parquet.FieldRepetitionType ValueRepetitionType parquet.FieldRepetitionType } func NewTag() *Tag { return &Tag{} } func StringToTag(tag string) *Tag { mp := NewTag() tagStr := strings.Replace(tag, "\t", "", -1) tags := strings.Split(tagStr, ",") for _, tag := range tags { tag = strings.TrimSpace(tag) kv := strings.Split(tag, "=") key := kv[0] key = strings.ToLower(key) key = strings.TrimSpace(key) val := kv[1] val = strings.TrimSpace(val) valInt32 := func() int32 { valInt, err := strconv.Atoi(val) if err != nil { panic(err) } return int32(valInt) } switch key { case "type": mp.Type = val case "keytype": mp.KeyType = val case "valuetype": mp.ValueType = val case "basetype": mp.BaseType = val case "keybasetype": mp.KeyBaseType = val case "valuebasetype": mp.ValueBaseType = val case "length": mp.Length = valInt32() case "keylength": mp.KeyLength = valInt32() case "valuelength": mp.ValueLength = valInt32() case "scale": mp.Scale = valInt32() case "keyscale": mp.KeyScale = valInt32() case "valuescale": mp.ValueScale = valInt32() case "precision": mp.Precision = valInt32() case "keyprecision": mp.KeyPrecision = valInt32() case "valueprecision": mp.ValuePrecision = valInt32() case "fieldid": mp.FieldID = valInt32() case "keyfieldid": mp.KeyFieldID = valInt32() case "valuefieldid": mp.ValueFieldID = valInt32() case "name": if mp.InName == "" { mp.InName = HeadToUpper(val) } mp.ExName = val case "inname": mp.InName = val case "repetitiontype": switch strings.ToLower(val) { case "repeated": mp.RepetitionType = parquet.FieldRepetitionType_REPEATED case "required": mp.RepetitionType = parquet.FieldRepetitionType_REQUIRED case "optional": mp.RepetitionType = parquet.FieldRepetitionType_OPTIONAL default: panic(fmt.Errorf("Unknown repetitiontype: '%v'", val)) } case "keyrepetitiontype": switch strings.ToLower(val) { case "repeated": mp.KeyRepetitionType = parquet.FieldRepetitionType_REPEATED case "required": mp.KeyRepetitionType = parquet.FieldRepetitionType_REQUIRED case "optional": mp.KeyRepetitionType = parquet.FieldRepetitionType_OPTIONAL default: panic(fmt.Errorf("Unknown keyrepetitiontype: '%v'", val)) } case "valuerepetitiontype": switch strings.ToLower(val) { case "repeated": mp.ValueRepetitionType = parquet.FieldRepetitionType_REPEATED case "required": mp.ValueRepetitionType = parquet.FieldRepetitionType_REQUIRED case "optional": mp.ValueRepetitionType = parquet.FieldRepetitionType_OPTIONAL default: panic(fmt.Errorf("Unknown valuerepetitiontype: '%v'", val)) } case "encoding": switch strings.ToLower(val) { case "plain": mp.Encoding = parquet.Encoding_PLAIN case "rle": mp.Encoding = parquet.Encoding_RLE case "delta_binary_packed": mp.Encoding = parquet.Encoding_DELTA_BINARY_PACKED case "delta_length_byte_array": mp.Encoding = parquet.Encoding_DELTA_LENGTH_BYTE_ARRAY case "delta_byte_array": mp.Encoding = parquet.Encoding_DELTA_BYTE_ARRAY case "plain_dictionary": mp.Encoding = parquet.Encoding_PLAIN_DICTIONARY case "rle_dictionary": mp.Encoding = parquet.Encoding_RLE_DICTIONARY default: panic(fmt.Errorf("Unknown encoding type: '%v'", val)) } case "keyencoding": switch strings.ToLower(val) { case "rle": mp.KeyEncoding = parquet.Encoding_RLE case "delta_binary_packed": mp.KeyEncoding = parquet.Encoding_DELTA_BINARY_PACKED case "delta_length_byte_array": mp.KeyEncoding = parquet.Encoding_DELTA_LENGTH_BYTE_ARRAY case "delta_byte_array": mp.KeyEncoding = parquet.Encoding_DELTA_BYTE_ARRAY case "plain_dictionary": mp.KeyEncoding = parquet.Encoding_PLAIN_DICTIONARY default: panic(fmt.Errorf("Unknown keyencoding type: '%v'", val)) } case "valueencoding": switch strings.ToLower(val) { case "rle": mp.ValueEncoding = parquet.Encoding_RLE case "delta_binary_packed": mp.ValueEncoding = parquet.Encoding_DELTA_BINARY_PACKED case "delta_length_byte_array": mp.ValueEncoding = parquet.Encoding_DELTA_LENGTH_BYTE_ARRAY case "delta_byte_array": mp.ValueEncoding = parquet.Encoding_DELTA_BYTE_ARRAY case "plain_dictionary": mp.ValueEncoding = parquet.Encoding_PLAIN_DICTIONARY default: panic(fmt.Errorf("Unknown valueencoding type: '%v'", val)) } default: panic(fmt.Errorf("Unrecognized tag '%v'", key)) } } return mp } func NewSchemaElementFromTagMap(info *Tag) *parquet.SchemaElement { schema := parquet.NewSchemaElement() schema.Name = info.InName schema.TypeLength = &info.Length schema.Scale = &info.Scale schema.Precision = &info.Precision schema.FieldID = &info.FieldID schema.RepetitionType = &info.RepetitionType schema.NumChildren = nil typeName := info.Type if t, err := parquet.TypeFromString(typeName); err == nil { schema.Type = &t } else { ct, _ := parquet.ConvertedTypeFromString(typeName) schema.ConvertedType = &ct if typeName == "INT_8" || typeName == "INT_16" || typeName == "INT_32" || typeName == "UINT_8" || typeName == "UINT_16" || typeName == "UINT_32" || typeName == "DATE" || typeName == "TIME_MILLIS" { schema.Type = parquet.TypePtr(parquet.Type_INT32) } else if typeName == "INT_64" || typeName == "UINT_64" || typeName == "TIME_MICROS" || typeName == "TIMESTAMP_MICROS" || typeName == "TIMESTAMP_MILLIS" { schema.Type = parquet.TypePtr(parquet.Type_INT64) } else if typeName == "UTF8" { schema.Type = parquet.TypePtr(parquet.Type_BYTE_ARRAY) } else if typeName == "INTERVAL" { schema.Type = parquet.TypePtr(parquet.Type_FIXED_LEN_BYTE_ARRAY) var ln int32 = 12 schema.TypeLength = &ln } else if typeName == "DECIMAL" { t, _ = parquet.TypeFromString(info.BaseType) schema.Type = &t } } return schema } func DeepCopy(src, dst interface{}) { var buf bytes.Buffer gob.NewEncoder(&buf).Encode(src) gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst) return } //Get key tag map for map func GetKeyTagMap(src *Tag) *Tag { res := NewTag() res.InName = "Key" res.ExName = "key" res.Type = src.KeyType res.BaseType = src.KeyBaseType res.Length = src.KeyLength res.Scale = src.KeyScale res.Precision = src.KeyPrecision res.FieldID = src.KeyFieldID res.Encoding = src.KeyEncoding res.RepetitionType = parquet.FieldRepetitionType_REQUIRED return res } //Get value tag map for map func GetValueTagMap(src *Tag) *Tag { res := NewTag() res.InName = "Value" res.ExName = "value" res.Type = src.ValueType res.BaseType = src.ValueBaseType res.Length = src.ValueLength res.Scale = src.ValueScale res.Precision = src.ValuePrecision res.FieldID = src.ValueFieldID res.Encoding = src.ValueEncoding res.RepetitionType = src.ValueRepetitionType return res } //Convert the first letter of a string to uppercase func HeadToUpper(str string) string { ln := len(str) if ln <= 0 { return str } return strings.ToUpper(str[0:1]) + str[1:] } //Get the number of bits needed by the num; 0 needs 0, 1 need 1, 2 need 2, 3 need 2 .... func BitNum(num uint64) uint64 { var bitn uint64 = 0 for ; num != 0; num >>= 1 { bitn++ } return bitn } func CmpIntBinary(as string, bs string, order string, signed bool) bool { abs, bbs := []byte(as), []byte(bs) la, lb := len(abs), len(bbs) if order == "LittleEndian" { for i, j := 0, len(abs)-1; i < j; i, j = i+1, j-1 { abs[i], abs[j] = abs[j], abs[i] } for i, j := 0, len(bbs)-1; i < j; i, j = i+1, j-1 { bbs[i], bbs[j] = bbs[j], bbs[i] } } if !signed { if la < lb { abs = append(make([]byte, lb-la), abs...) } else if lb < la { bbs = append(make([]byte, la-lb), bbs...) } } else { if la < lb { sb := (abs[0] >> 7) & 1 pre := make([]byte, lb-la) if sb == 1 { for i := 0; i < lb-la; i++ { pre[i] = byte(0xFF) } } abs = append(pre, abs...) } else if la > lb { sb := (bbs[0] >> 7) & 1 pre := make([]byte, la-lb) if sb == 1 { for i := 0; i < la-lb; i++ { pre[i] = byte(0xFF) } } bbs = append(pre, bbs...) } asb, bsb := (abs[0]>>7)&1, (bbs[0]>>7)&1 if asb < bsb { return false } else if asb > bsb { return true } } for i := 0; i < len(abs); i++ { if abs[i] < bbs[i] { return true } else if abs[i] > bbs[i] { return false } } return false } //Compare two values: //a<b return true //a>=b return false func Cmp(ai interface{}, bi interface{}, pT *parquet.Type, cT *parquet.ConvertedType) bool { if ai == nil && bi != nil { return true } else if ai == nil && bi == nil { return false } else if ai != nil && bi == nil { return false } if cT == nil { if *pT == parquet.Type_BOOLEAN { a, b := ai.(bool), bi.(bool) if !a && b { return true } return false } else if *pT == parquet.Type_INT32 { return ai.(int32) < bi.(int32) } else if *pT == parquet.Type_INT64 { return ai.(int64) < bi.(int64) } else if *pT == parquet.Type_INT96 { a, b := []byte(ai.(string)), []byte(bi.(string)) fa, fb := a[11]>>7, b[11]>>7 if fa > fb { return true } else if fa < fb { return false } for i := 11; i >= 0; i-- { if a[i] < b[i] { return true } else if a[i] > b[i] { return false } } return false } else if *pT == parquet.Type_FLOAT { return ai.(float32) < bi.(float32) } else if *pT == parquet.Type_DOUBLE { return ai.(float64) < bi.(float64) } else if *pT == parquet.Type_BYTE_ARRAY { return ai.(string) < bi.(string) } else if *pT == parquet.Type_FIXED_LEN_BYTE_ARRAY { return ai.(string) < bi.(string) } } if *cT == parquet.ConvertedType_UTF8 { return ai.(string) < bi.(string) } else if *cT == parquet.ConvertedType_INT_8 || *cT == parquet.ConvertedType_INT_16 || *cT == parquet.ConvertedType_INT_32 || *cT == parquet.ConvertedType_DATE || *cT == parquet.ConvertedType_TIME_MILLIS { return ai.(int32) < bi.(int32) } else if *cT == parquet.ConvertedType_UINT_8 || *cT == parquet.ConvertedType_UINT_16 || *cT == parquet.ConvertedType_UINT_32 { return uint32(ai.(int32)) < uint32(bi.(int32)) } else if *cT == parquet.ConvertedType_INT_64 || *cT == parquet.ConvertedType_TIME_MICROS || *cT == parquet.ConvertedType_TIMESTAMP_MILLIS || *cT == parquet.ConvertedType_TIMESTAMP_MICROS { return ai.(int64) < bi.(int64) } else if *cT == parquet.ConvertedType_UINT_64 { return uint64(ai.(int64)) < uint64(bi.(int64)) } else if *cT == parquet.ConvertedType_INTERVAL { a, b := []byte(ai.(string)), []byte(bi.(string)) for i := 11; i >= 0; i-- { if a[i] > b[i] { return false } else if a[i] < b[i] { return true } } return false } else if *cT == parquet.ConvertedType_DECIMAL { if *pT == parquet.Type_BYTE_ARRAY { as, bs := ai.(string), bi.(string) return CmpIntBinary(as, bs, "BigEndian", true) } else if *pT == parquet.Type_FIXED_LEN_BYTE_ARRAY { as, bs := ai.(string), bi.(string) return CmpIntBinary(as, bs, "BigEndian", true) } else if *pT == parquet.Type_INT32 { return ai.(int32) < bi.(int32) } else if *pT == parquet.Type_INT64 { return ai.(int64) < bi.(int64) } } return false } //Get the maximum of two parquet values func Max(a interface{}, b interface{}, pT *parquet.Type, cT *parquet.ConvertedType) interface{} { if a == nil { return b } if b == nil { return a } if Cmp(a, b, pT, cT) { return b } return a } //Get the minimum of two parquet values func Min(a interface{}, b interface{}, pT *parquet.Type, cT *parquet.ConvertedType) interface{} { if a == nil { return b } if b == nil { return a } if Cmp(a, b, pT, cT) { return a } return b } //Get the size of a parquet value func SizeOf(val reflect.Value) int64 { var size int64 switch val.Type().Kind() { case reflect.Ptr: if val.IsNil() { return 0 } return SizeOf(val.Elem()) case reflect.Slice: for i := 0; i < val.Len(); i++ { size += SizeOf(val.Index(i)) } return size case reflect.Struct: for i := 0; i < val.Type().NumField(); i++ { size += SizeOf(val.Field(i)) } return size case reflect.Map: keys := val.MapKeys() for i := 0; i < len(keys); i++ { size += SizeOf(keys[i]) size += SizeOf(val.MapIndex(keys[i])) } return size } switch val.Type().Name() { case "bool": return 1 case "int32": return 4 case "int64": return 8 case "string": return int64(val.Len()) case "float32": return 4 case "float64": return 8 } return 4 } //Convert path slice to string func PathToStr(path []string) string { return strings.Join(path, ".") } //Convert string to path slice func StrToPath(str string) []string { return strings.Split(str, ".") } //Get the pathStr index in a path func PathStrIndex(str string) int { return len(strings.Split(str, ".")) } support JSON/BSON package common import ( "bytes" "encoding/gob" "fmt" "reflect" "strconv" "strings" "github.com/xitongsys/parquet-go/parquet" ) // `parquet:"name=Name, type=FIXED_LEN_BYTE_ARRAY, length=12"` type Tag struct { InName string ExName string Type string KeyType string ValueType string BaseType string KeyBaseType string ValueBaseType string Length int32 KeyLength int32 ValueLength int32 Scale int32 KeyScale int32 ValueScale int32 Precision int32 KeyPrecision int32 ValuePrecision int32 FieldID int32 KeyFieldID int32 ValueFieldID int32 Encoding parquet.Encoding KeyEncoding parquet.Encoding ValueEncoding parquet.Encoding RepetitionType parquet.FieldRepetitionType KeyRepetitionType parquet.FieldRepetitionType ValueRepetitionType parquet.FieldRepetitionType } func NewTag() *Tag { return &Tag{} } func StringToTag(tag string) *Tag { mp := NewTag() tagStr := strings.Replace(tag, "\t", "", -1) tags := strings.Split(tagStr, ",") for _, tag := range tags { tag = strings.TrimSpace(tag) kv := strings.Split(tag, "=") key := kv[0] key = strings.ToLower(key) key = strings.TrimSpace(key) val := kv[1] val = strings.TrimSpace(val) valInt32 := func() int32 { valInt, err := strconv.Atoi(val) if err != nil { panic(err) } return int32(valInt) } switch key { case "type": mp.Type = val case "keytype": mp.KeyType = val case "valuetype": mp.ValueType = val case "basetype": mp.BaseType = val case "keybasetype": mp.KeyBaseType = val case "valuebasetype": mp.ValueBaseType = val case "length": mp.Length = valInt32() case "keylength": mp.KeyLength = valInt32() case "valuelength": mp.ValueLength = valInt32() case "scale": mp.Scale = valInt32() case "keyscale": mp.KeyScale = valInt32() case "valuescale": mp.ValueScale = valInt32() case "precision": mp.Precision = valInt32() case "keyprecision": mp.KeyPrecision = valInt32() case "valueprecision": mp.ValuePrecision = valInt32() case "fieldid": mp.FieldID = valInt32() case "keyfieldid": mp.KeyFieldID = valInt32() case "valuefieldid": mp.ValueFieldID = valInt32() case "name": if mp.InName == "" { mp.InName = HeadToUpper(val) } mp.ExName = val case "inname": mp.InName = val case "repetitiontype": switch strings.ToLower(val) { case "repeated": mp.RepetitionType = parquet.FieldRepetitionType_REPEATED case "required": mp.RepetitionType = parquet.FieldRepetitionType_REQUIRED case "optional": mp.RepetitionType = parquet.FieldRepetitionType_OPTIONAL default: panic(fmt.Errorf("Unknown repetitiontype: '%v'", val)) } case "keyrepetitiontype": switch strings.ToLower(val) { case "repeated": mp.KeyRepetitionType = parquet.FieldRepetitionType_REPEATED case "required": mp.KeyRepetitionType = parquet.FieldRepetitionType_REQUIRED case "optional": mp.KeyRepetitionType = parquet.FieldRepetitionType_OPTIONAL default: panic(fmt.Errorf("Unknown keyrepetitiontype: '%v'", val)) } case "valuerepetitiontype": switch strings.ToLower(val) { case "repeated": mp.ValueRepetitionType = parquet.FieldRepetitionType_REPEATED case "required": mp.ValueRepetitionType = parquet.FieldRepetitionType_REQUIRED case "optional": mp.ValueRepetitionType = parquet.FieldRepetitionType_OPTIONAL default: panic(fmt.Errorf("Unknown valuerepetitiontype: '%v'", val)) } case "encoding": switch strings.ToLower(val) { case "plain": mp.Encoding = parquet.Encoding_PLAIN case "rle": mp.Encoding = parquet.Encoding_RLE case "delta_binary_packed": mp.Encoding = parquet.Encoding_DELTA_BINARY_PACKED case "delta_length_byte_array": mp.Encoding = parquet.Encoding_DELTA_LENGTH_BYTE_ARRAY case "delta_byte_array": mp.Encoding = parquet.Encoding_DELTA_BYTE_ARRAY case "plain_dictionary": mp.Encoding = parquet.Encoding_PLAIN_DICTIONARY case "rle_dictionary": mp.Encoding = parquet.Encoding_RLE_DICTIONARY default: panic(fmt.Errorf("Unknown encoding type: '%v'", val)) } case "keyencoding": switch strings.ToLower(val) { case "rle": mp.KeyEncoding = parquet.Encoding_RLE case "delta_binary_packed": mp.KeyEncoding = parquet.Encoding_DELTA_BINARY_PACKED case "delta_length_byte_array": mp.KeyEncoding = parquet.Encoding_DELTA_LENGTH_BYTE_ARRAY case "delta_byte_array": mp.KeyEncoding = parquet.Encoding_DELTA_BYTE_ARRAY case "plain_dictionary": mp.KeyEncoding = parquet.Encoding_PLAIN_DICTIONARY default: panic(fmt.Errorf("Unknown keyencoding type: '%v'", val)) } case "valueencoding": switch strings.ToLower(val) { case "rle": mp.ValueEncoding = parquet.Encoding_RLE case "delta_binary_packed": mp.ValueEncoding = parquet.Encoding_DELTA_BINARY_PACKED case "delta_length_byte_array": mp.ValueEncoding = parquet.Encoding_DELTA_LENGTH_BYTE_ARRAY case "delta_byte_array": mp.ValueEncoding = parquet.Encoding_DELTA_BYTE_ARRAY case "plain_dictionary": mp.ValueEncoding = parquet.Encoding_PLAIN_DICTIONARY default: panic(fmt.Errorf("Unknown valueencoding type: '%v'", val)) } default: panic(fmt.Errorf("Unrecognized tag '%v'", key)) } } return mp } func NewSchemaElementFromTagMap(info *Tag) *parquet.SchemaElement { schema := parquet.NewSchemaElement() schema.Name = info.InName schema.TypeLength = &info.Length schema.Scale = &info.Scale schema.Precision = &info.Precision schema.FieldID = &info.FieldID schema.RepetitionType = &info.RepetitionType schema.NumChildren = nil typeName := info.Type if t, err := parquet.TypeFromString(typeName); err == nil { schema.Type = &t } else { ct, _ := parquet.ConvertedTypeFromString(typeName) schema.ConvertedType = &ct if typeName == "INT_8" || typeName == "INT_16" || typeName == "INT_32" || typeName == "UINT_8" || typeName == "UINT_16" || typeName == "UINT_32" || typeName == "DATE" || typeName == "TIME_MILLIS" { schema.Type = parquet.TypePtr(parquet.Type_INT32) } else if typeName == "INT_64" || typeName == "UINT_64" || typeName == "TIME_MICROS" || typeName == "TIMESTAMP_MICROS" || typeName == "TIMESTAMP_MILLIS" { schema.Type = parquet.TypePtr(parquet.Type_INT64) } else if typeName == "UTF8" || typeName == "JSON" || typeName == "BSON" { schema.Type = parquet.TypePtr(parquet.Type_BYTE_ARRAY) } else if typeName == "INTERVAL" { schema.Type = parquet.TypePtr(parquet.Type_FIXED_LEN_BYTE_ARRAY) var ln int32 = 12 schema.TypeLength = &ln } else if typeName == "DECIMAL" { t, _ = parquet.TypeFromString(info.BaseType) schema.Type = &t } } return schema } func DeepCopy(src, dst interface{}) { var buf bytes.Buffer gob.NewEncoder(&buf).Encode(src) gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst) return } //Get key tag map for map func GetKeyTagMap(src *Tag) *Tag { res := NewTag() res.InName = "Key" res.ExName = "key" res.Type = src.KeyType res.BaseType = src.KeyBaseType res.Length = src.KeyLength res.Scale = src.KeyScale res.Precision = src.KeyPrecision res.FieldID = src.KeyFieldID res.Encoding = src.KeyEncoding res.RepetitionType = parquet.FieldRepetitionType_REQUIRED return res } //Get value tag map for map func GetValueTagMap(src *Tag) *Tag { res := NewTag() res.InName = "Value" res.ExName = "value" res.Type = src.ValueType res.BaseType = src.ValueBaseType res.Length = src.ValueLength res.Scale = src.ValueScale res.Precision = src.ValuePrecision res.FieldID = src.ValueFieldID res.Encoding = src.ValueEncoding res.RepetitionType = src.ValueRepetitionType return res } //Convert the first letter of a string to uppercase func HeadToUpper(str string) string { ln := len(str) if ln <= 0 { return str } return strings.ToUpper(str[0:1]) + str[1:] } //Get the number of bits needed by the num; 0 needs 0, 1 need 1, 2 need 2, 3 need 2 .... func BitNum(num uint64) uint64 { var bitn uint64 = 0 for ; num != 0; num >>= 1 { bitn++ } return bitn } func CmpIntBinary(as string, bs string, order string, signed bool) bool { abs, bbs := []byte(as), []byte(bs) la, lb := len(abs), len(bbs) if order == "LittleEndian" { for i, j := 0, len(abs)-1; i < j; i, j = i+1, j-1 { abs[i], abs[j] = abs[j], abs[i] } for i, j := 0, len(bbs)-1; i < j; i, j = i+1, j-1 { bbs[i], bbs[j] = bbs[j], bbs[i] } } if !signed { if la < lb { abs = append(make([]byte, lb-la), abs...) } else if lb < la { bbs = append(make([]byte, la-lb), bbs...) } } else { if la < lb { sb := (abs[0] >> 7) & 1 pre := make([]byte, lb-la) if sb == 1 { for i := 0; i < lb-la; i++ { pre[i] = byte(0xFF) } } abs = append(pre, abs...) } else if la > lb { sb := (bbs[0] >> 7) & 1 pre := make([]byte, la-lb) if sb == 1 { for i := 0; i < la-lb; i++ { pre[i] = byte(0xFF) } } bbs = append(pre, bbs...) } asb, bsb := (abs[0]>>7)&1, (bbs[0]>>7)&1 if asb < bsb { return false } else if asb > bsb { return true } } for i := 0; i < len(abs); i++ { if abs[i] < bbs[i] { return true } else if abs[i] > bbs[i] { return false } } return false } //Compare two values: //a<b return true //a>=b return false func Cmp(ai interface{}, bi interface{}, pT *parquet.Type, cT *parquet.ConvertedType) bool { if ai == nil && bi != nil { return true } else if ai == nil && bi == nil { return false } else if ai != nil && bi == nil { return false } if cT == nil { if *pT == parquet.Type_BOOLEAN { a, b := ai.(bool), bi.(bool) if !a && b { return true } return false } else if *pT == parquet.Type_INT32 { return ai.(int32) < bi.(int32) } else if *pT == parquet.Type_INT64 { return ai.(int64) < bi.(int64) } else if *pT == parquet.Type_INT96 { a, b := []byte(ai.(string)), []byte(bi.(string)) fa, fb := a[11]>>7, b[11]>>7 if fa > fb { return true } else if fa < fb { return false } for i := 11; i >= 0; i-- { if a[i] < b[i] { return true } else if a[i] > b[i] { return false } } return false } else if *pT == parquet.Type_FLOAT { return ai.(float32) < bi.(float32) } else if *pT == parquet.Type_DOUBLE { return ai.(float64) < bi.(float64) } else if *pT == parquet.Type_BYTE_ARRAY { return ai.(string) < bi.(string) } else if *pT == parquet.Type_FIXED_LEN_BYTE_ARRAY { return ai.(string) < bi.(string) } } if *cT == parquet.ConvertedType_UTF8 { return ai.(string) < bi.(string) } else if *cT == parquet.ConvertedType_INT_8 || *cT == parquet.ConvertedType_INT_16 || *cT == parquet.ConvertedType_INT_32 || *cT == parquet.ConvertedType_DATE || *cT == parquet.ConvertedType_TIME_MILLIS { return ai.(int32) < bi.(int32) } else if *cT == parquet.ConvertedType_UINT_8 || *cT == parquet.ConvertedType_UINT_16 || *cT == parquet.ConvertedType_UINT_32 { return uint32(ai.(int32)) < uint32(bi.(int32)) } else if *cT == parquet.ConvertedType_INT_64 || *cT == parquet.ConvertedType_TIME_MICROS || *cT == parquet.ConvertedType_TIMESTAMP_MILLIS || *cT == parquet.ConvertedType_TIMESTAMP_MICROS { return ai.(int64) < bi.(int64) } else if *cT == parquet.ConvertedType_UINT_64 { return uint64(ai.(int64)) < uint64(bi.(int64)) } else if *cT == parquet.ConvertedType_INTERVAL { a, b := []byte(ai.(string)), []byte(bi.(string)) for i := 11; i >= 0; i-- { if a[i] > b[i] { return false } else if a[i] < b[i] { return true } } return false } else if *cT == parquet.ConvertedType_DECIMAL { if *pT == parquet.Type_BYTE_ARRAY { as, bs := ai.(string), bi.(string) return CmpIntBinary(as, bs, "BigEndian", true) } else if *pT == parquet.Type_FIXED_LEN_BYTE_ARRAY { as, bs := ai.(string), bi.(string) return CmpIntBinary(as, bs, "BigEndian", true) } else if *pT == parquet.Type_INT32 { return ai.(int32) < bi.(int32) } else if *pT == parquet.Type_INT64 { return ai.(int64) < bi.(int64) } } return false } //Get the maximum of two parquet values func Max(a interface{}, b interface{}, pT *parquet.Type, cT *parquet.ConvertedType) interface{} { if a == nil { return b } if b == nil { return a } if Cmp(a, b, pT, cT) { return b } return a } //Get the minimum of two parquet values func Min(a interface{}, b interface{}, pT *parquet.Type, cT *parquet.ConvertedType) interface{} { if a == nil { return b } if b == nil { return a } if Cmp(a, b, pT, cT) { return a } return b } //Get the size of a parquet value func SizeOf(val reflect.Value) int64 { var size int64 switch val.Type().Kind() { case reflect.Ptr: if val.IsNil() { return 0 } return SizeOf(val.Elem()) case reflect.Slice: for i := 0; i < val.Len(); i++ { size += SizeOf(val.Index(i)) } return size case reflect.Struct: for i := 0; i < val.Type().NumField(); i++ { size += SizeOf(val.Field(i)) } return size case reflect.Map: keys := val.MapKeys() for i := 0; i < len(keys); i++ { size += SizeOf(keys[i]) size += SizeOf(val.MapIndex(keys[i])) } return size } switch val.Type().Name() { case "bool": return 1 case "int32": return 4 case "int64": return 8 case "string": return int64(val.Len()) case "float32": return 4 case "float64": return 8 } return 4 } //Convert path slice to string func PathToStr(path []string) string { return strings.Join(path, ".") } //Convert string to path slice func StrToPath(str string) []string { return strings.Split(str, ".") } //Get the pathStr index in a path func PathStrIndex(str string) int { return len(strings.Split(str, ".")) }
package common import "log" const ( TEXT = 1 BINARY = 2 ) type DataType int type Message struct { from *User to *User // Possible values: // control // message // file messageType string dataType DataType data []byte } func NewMessage(from *User, to *User, messageType string, dataType DataType, data []byte) Message { return Message{ from: from, to: to, messageType: messageType, dataType: dataType, data: data, } } func (m Message) String() string { if m.dataType != TEXT { log.Println("Message data is not a text type") return "" } return string(m.data) } func (m Message) Binary() []byte { if m.dataType != BINARY { log.Println("Message data is not binary type") return []byte{} } return m.data } func (m Message) From() *User { return m.from } func (m Message) To() *User { return m.to } func (m Message) DataType() DataType { return m.dataType } func (m Message) Raw() []byte { return m.data } type User struct { Id string In chan Message Out chan Message } Rewrite common types to coresponds to protocol description Rewrite Message struct to use userIds for 'from' and 'to'. Add expire date for temporary messages. Use channels with pointer to Message struct package common import ( "log" "time" ) const ( TEXT = 1 BINARY = 2 ) type DataType int type Message struct { from string to string cmdType string cmd string expireDate time.Time dataType DataType data []byte } func NewMessage(from string, to string, cmdType string, cmd string, expireDate time.Time, dataType DataType, data []byte) Message { return Message{ from: from, to: to, cmdType: cmdType, cmd: cmd, expireDate: expireDate, dataType: dataType, data: data, } } func (m Message) String() string { if m.dataType != TEXT { log.Println("Message data is not a text type") return "" } return string(m.data) } func (m Message) Binary() []byte { if m.dataType != BINARY { log.Println("Message data is not binary type") return []byte{} } return m.data } func (m Message) From() string { return m.from } func (m Message) To() string { return m.to } func (m Message) CmdType() string { return m.cmdType } func (m Message) Cmd() string { return m.cmd } func (m Message) ExpireDate() time.Time { return m.expireDate } func (m Message) DataType() DataType { return m.dataType } func (m Message) Raw() []byte { return m.data } type User struct { Id string In chan *Message Out chan *Message }
package common import ( "os" "strconv" "time" ) // Default service addresses and URLS of Argo CD internal services const ( // DefaultRepoServerAddr is the gRPC address of the Argo CD repo server DefaultRepoServerAddr = "argocd-repo-server:8081" // DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against DefaultDexServerAddr = "http://argocd-dex-server:5556" // DefaultRedisAddr is the default redis address DefaultRedisAddr = "argocd-redis:6379" ) // Kubernetes ConfigMap and Secret resource names which hold Argo CD settings const ( ArgoCDConfigMapName = "argocd-cm" ArgoCDSecretName = "argocd-secret" ArgoCDRBACConfigMapName = "argocd-rbac-cm" // Contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods ArgoCDKnownHostsConfigMapName = "argocd-ssh-known-hosts-cm" // Contains TLS certificate data for connecting repositories. Will get mounted as volume to pods ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm" ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm" ) // Some default configurables const ( DefaultSystemNamespace = "kube-system" DefaultRepoType = "git" ) // Default listener ports for ArgoCD components const ( DefaultPortAPIServer = 8080 DefaultPortRepoServer = 8081 DefaultPortArgoCDMetrics = 8082 DefaultPortArgoCDAPIServerMetrics = 8083 DefaultPortRepoServerMetrics = 8084 ) // Default paths on the pod's file system const ( // The default path where TLS certificates for repositories are located DefaultPathTLSConfig = "/app/config/tls" // The default path where SSH known hosts are stored DefaultPathSSHConfig = "/app/config/ssh" // Default name for the SSH known hosts file DefaultSSHKnownHostsName = "ssh_known_hosts" // Default path to GnuPG home directory DefaultGnuPgHomePath = "/app/config/gpg/keys" ) const ( DefaultSyncRetryDuration = 5 * time.Second DefaultSyncRetryMaxDuration = 3 * time.Minute DefaultSyncRetryFactor = int64(2) ) // Argo CD application related constants const ( // KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc" // DefaultAppProjectName contains name of 'default' app project, which is available in every Argo CD installation DefaultAppProjectName = "default" // ArgoCDAdminUsername is the username of the 'admin' user ArgoCDAdminUsername = "admin" // ArgoCDUserAgentName is the default user-agent name used by the gRPC API client library and grpc-gateway ArgoCDUserAgentName = "argocd-client" // AuthCookieName is the HTTP cookie name where we store our auth token AuthCookieName = "argocd.token" // RevisionHistoryLimit is the max number of successful sync to keep in history RevisionHistoryLimit = 10 // ChangePasswordSSOTokenMaxAge is the max token age for password change operation ChangePasswordSSOTokenMaxAge = time.Minute * 5 ) // Dex related constants const ( // DexAPIEndpoint is the endpoint where we serve the Dex API server DexAPIEndpoint = "/api/dex" // LoginEndpoint is Argo CD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page LoginEndpoint = "/auth/login" // CallbackEndpoint is Argo CD's final callback endpoint we reach after OAuth 2.0 login flow has been completed CallbackEndpoint = "/auth/callback" // DexCallbackEndpoint is Argo CD's final callback endpoint when Dex is configured DexCallbackEndpoint = "/api/dex/callback" // ArgoCDClientAppName is name of the Oauth client app used when registering our web app to dex ArgoCDClientAppName = "Argo CD" // ArgoCDClientAppID is the Oauth client ID we will use when registering our app to dex ArgoCDClientAppID = "argo-cd" // ArgoCDCLIClientAppName is name of the Oauth client app used when registering our CLI to dex ArgoCDCLIClientAppName = "Argo CD CLI" // ArgoCDCLIClientAppID is the Oauth client ID we will use when registering our CLI to dex ArgoCDCLIClientAppID = "argo-cd-cli" ) // Resource metadata labels and annotations (keys and values) used by Argo CD components const ( // LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application // The Argo CD application name is used as the instance name LabelKeyAppInstance = "app.kubernetes.io/instance" // LegacyLabelApplicationName is the legacy label (v0.10 and below) and is superceded by 'app.kubernetes.io/instance' LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name" // LabelKeySecretType contains the type of argocd secret (currently: 'cluster') LabelKeySecretType = "argocd.argoproj.io/secret-type" // LabelValueSecretTypeCluster indicates a secret type of cluster LabelValueSecretTypeCluster = "cluster" // AnnotationCompareOptions is a comma-separated list of options for comparison AnnotationCompareOptions = "argocd.argoproj.io/compare-options" // AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed. // Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh. AnnotationKeyRefresh = "argocd.argoproj.io/refresh" // AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application. AnnotationKeyManagedBy = "managed-by" // AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD AnnotationValueManagedByArgoCD = "argocd.argoproj.io" // ResourcesFinalizerName the finalizer value which we inject to finalize deletion of an application ResourcesFinalizerName = "resources-finalizer.argocd.argoproj.io" // AnnotationKeyLinkPrefix tells the UI to add an external link icon to the application node // that links to the value given in the annotation. // The annotation key must be followed by a unique identifier. Ex: link.argocd.argoproj.io/dashboard // It's valid to have multiple annotions that match the prefix. // Values can simply be a url or they can have // an optional link title separated by a "|" // Ex: "http://grafana.example.com/d/yu5UH4MMz/deployments" // Ex: "Go to Dashboard|http://grafana.example.com/d/yu5UH4MMz/deployments" AnnotationKeyLinkPrefix = "link.argocd.argoproj.io/" ) // Environment variables for tuning and debugging Argo CD const ( // EnvVarSSODebug is an environment variable to enable additional OAuth debugging in the API server EnvVarSSODebug = "ARGOCD_SSO_DEBUG" // EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG" // EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using // the current kubectl context (for development purposes) EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER" // Overrides the location where SSH known hosts for repo access data is stored EnvVarSSHDataPath = "ARGOCD_SSH_DATA_PATH" // Overrides the location where TLS certificate for repo access data is stored EnvVarTLSDataPath = "ARGOCD_TLS_DATA_PATH" // Specifies number of git remote operations attempts count EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT" // Overrides git submodule support, true by default EnvGitSubmoduleEnabled = "ARGOCD_GIT_MODULES_ENABLED" // EnvK8sClientQPS is the QPS value used for the kubernetes client (default: 50) EnvK8sClientQPS = "ARGOCD_K8S_CLIENT_QPS" // EnvK8sClientBurst is the burst value used for the kubernetes client (default: twice the client QPS) EnvK8sClientBurst = "ARGOCD_K8S_CLIENT_BURST" // EnvClusterCacheResyncDuration is the env variable that holds cluster cache re-sync duration EnvClusterCacheResyncDuration = "ARGOCD_CLUSTER_CACHE_RESYNC_DURATION" // EnvK8sClientMaxIdleConnections is the number of max idle connections in K8s REST client HTTP transport (default: 500) EnvK8sClientMaxIdleConnections = "ARGOCD_K8S_CLIENT_MAX_IDLE_CONNECTIONS" // EnvGnuPGHome is the path to ArgoCD's GnuPG keyring for signature verification EnvGnuPGHome = "ARGOCD_GNUPGHOME" // EnvWatchAPIBufferSize is the buffer size used to transfer K8S watch events to watch API consumer EnvWatchAPIBufferSize = "ARGOCD_WATCH_API_BUFFER_SIZE" // EnvPauseGenerationAfterFailedAttempts will pause manifest generation after the specified number of failed generation attempts EnvPauseGenerationAfterFailedAttempts = "ARGOCD_PAUSE_GEN_AFTER_FAILED_ATTEMPTS" // EnvPauseGenerationMinutes pauses manifest generation for the specified number of minutes, after sufficient manifest generation failures EnvPauseGenerationMinutes = "ARGOCD_PAUSE_GEN_MINUTES" // EnvPauseGenerationRequests pauses manifest generation for the specified number of requests, after sufficient manifest generation failures EnvPauseGenerationRequests = "ARGOCD_PAUSE_GEN_REQUESTS" // EnvControllerReplicas is the number of controller replicas EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS" // EnvControllerShard is the shard number that should be handled by controller EnvControllerShard = "ARGOCD_CONTROLLER_SHARD" ) const ( // MinClientVersion is the minimum client version that can interface with this API server. // When introducing breaking changes to the API or datastructures, this number should be bumped. // The value here may be lower than the current value in VERSION MinClientVersion = "1.4.0" // CacheVersion is a objects version cached using util/cache/cache.go. // Number should be bumped in case of backward incompatible change to make sure cache is invalidated after upgrade. CacheVersion = "1.8.0" ) // GetGnuPGHomePath retrieves the path to use for GnuPG home directory, which is either taken from GNUPGHOME environment or a default value func GetGnuPGHomePath() string { if gnuPgHome := os.Getenv(EnvGnuPGHome); gnuPgHome == "" { return DefaultGnuPgHomePath } else { return gnuPgHome } } var ( // K8sClientConfigQPS controls the QPS to be used in K8s REST client configs K8sClientConfigQPS float32 = 50 // K8sClientConfigBurst controls the burst to be used in K8s REST client configs K8sClientConfigBurst int = 100 // K8sMaxIdleConnections controls the number of max idle connections in K8s REST client HTTP transport K8sMaxIdleConnections = 500 // K8sMaxIdleConnections controls the duration of cluster cache refresh K8SClusterResyncDuration = 12 * time.Hour ) func init() { if envQPS := os.Getenv(EnvK8sClientQPS); envQPS != "" { if qps, err := strconv.ParseFloat(envQPS, 32); err != nil { K8sClientConfigQPS = float32(qps) } } if envBurst := os.Getenv(EnvK8sClientBurst); envBurst != "" { if burst, err := strconv.Atoi(envBurst); err != nil { K8sClientConfigBurst = burst } } else { K8sClientConfigBurst = 2 * int(K8sClientConfigQPS) } if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" { if maxConn, err := strconv.Atoi(envMaxConn); err != nil { K8sMaxIdleConnections = maxConn } } if clusterResyncDurationStr := os.Getenv(EnvClusterCacheResyncDuration); clusterResyncDurationStr != "" { if duration, err := time.ParseDuration(clusterResyncDurationStr); err == nil { K8SClusterResyncDuration = duration } } } fix: bump cache version to avoid nil pointer error (#4525) package common import ( "os" "strconv" "time" ) // Default service addresses and URLS of Argo CD internal services const ( // DefaultRepoServerAddr is the gRPC address of the Argo CD repo server DefaultRepoServerAddr = "argocd-repo-server:8081" // DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against DefaultDexServerAddr = "http://argocd-dex-server:5556" // DefaultRedisAddr is the default redis address DefaultRedisAddr = "argocd-redis:6379" ) // Kubernetes ConfigMap and Secret resource names which hold Argo CD settings const ( ArgoCDConfigMapName = "argocd-cm" ArgoCDSecretName = "argocd-secret" ArgoCDRBACConfigMapName = "argocd-rbac-cm" // Contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods ArgoCDKnownHostsConfigMapName = "argocd-ssh-known-hosts-cm" // Contains TLS certificate data for connecting repositories. Will get mounted as volume to pods ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm" ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm" ) // Some default configurables const ( DefaultSystemNamespace = "kube-system" DefaultRepoType = "git" ) // Default listener ports for ArgoCD components const ( DefaultPortAPIServer = 8080 DefaultPortRepoServer = 8081 DefaultPortArgoCDMetrics = 8082 DefaultPortArgoCDAPIServerMetrics = 8083 DefaultPortRepoServerMetrics = 8084 ) // Default paths on the pod's file system const ( // The default path where TLS certificates for repositories are located DefaultPathTLSConfig = "/app/config/tls" // The default path where SSH known hosts are stored DefaultPathSSHConfig = "/app/config/ssh" // Default name for the SSH known hosts file DefaultSSHKnownHostsName = "ssh_known_hosts" // Default path to GnuPG home directory DefaultGnuPgHomePath = "/app/config/gpg/keys" ) const ( DefaultSyncRetryDuration = 5 * time.Second DefaultSyncRetryMaxDuration = 3 * time.Minute DefaultSyncRetryFactor = int64(2) ) // Argo CD application related constants const ( // KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc" // DefaultAppProjectName contains name of 'default' app project, which is available in every Argo CD installation DefaultAppProjectName = "default" // ArgoCDAdminUsername is the username of the 'admin' user ArgoCDAdminUsername = "admin" // ArgoCDUserAgentName is the default user-agent name used by the gRPC API client library and grpc-gateway ArgoCDUserAgentName = "argocd-client" // AuthCookieName is the HTTP cookie name where we store our auth token AuthCookieName = "argocd.token" // RevisionHistoryLimit is the max number of successful sync to keep in history RevisionHistoryLimit = 10 // ChangePasswordSSOTokenMaxAge is the max token age for password change operation ChangePasswordSSOTokenMaxAge = time.Minute * 5 ) // Dex related constants const ( // DexAPIEndpoint is the endpoint where we serve the Dex API server DexAPIEndpoint = "/api/dex" // LoginEndpoint is Argo CD's shorthand login endpoint which redirects to dex's OAuth 2.0 provider's consent page LoginEndpoint = "/auth/login" // CallbackEndpoint is Argo CD's final callback endpoint we reach after OAuth 2.0 login flow has been completed CallbackEndpoint = "/auth/callback" // DexCallbackEndpoint is Argo CD's final callback endpoint when Dex is configured DexCallbackEndpoint = "/api/dex/callback" // ArgoCDClientAppName is name of the Oauth client app used when registering our web app to dex ArgoCDClientAppName = "Argo CD" // ArgoCDClientAppID is the Oauth client ID we will use when registering our app to dex ArgoCDClientAppID = "argo-cd" // ArgoCDCLIClientAppName is name of the Oauth client app used when registering our CLI to dex ArgoCDCLIClientAppName = "Argo CD CLI" // ArgoCDCLIClientAppID is the Oauth client ID we will use when registering our CLI to dex ArgoCDCLIClientAppID = "argo-cd-cli" ) // Resource metadata labels and annotations (keys and values) used by Argo CD components const ( // LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application // The Argo CD application name is used as the instance name LabelKeyAppInstance = "app.kubernetes.io/instance" // LegacyLabelApplicationName is the legacy label (v0.10 and below) and is superceded by 'app.kubernetes.io/instance' LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name" // LabelKeySecretType contains the type of argocd secret (currently: 'cluster') LabelKeySecretType = "argocd.argoproj.io/secret-type" // LabelValueSecretTypeCluster indicates a secret type of cluster LabelValueSecretTypeCluster = "cluster" // AnnotationCompareOptions is a comma-separated list of options for comparison AnnotationCompareOptions = "argocd.argoproj.io/compare-options" // AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed. // Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh. AnnotationKeyRefresh = "argocd.argoproj.io/refresh" // AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application. AnnotationKeyManagedBy = "managed-by" // AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD AnnotationValueManagedByArgoCD = "argocd.argoproj.io" // ResourcesFinalizerName the finalizer value which we inject to finalize deletion of an application ResourcesFinalizerName = "resources-finalizer.argocd.argoproj.io" // AnnotationKeyLinkPrefix tells the UI to add an external link icon to the application node // that links to the value given in the annotation. // The annotation key must be followed by a unique identifier. Ex: link.argocd.argoproj.io/dashboard // It's valid to have multiple annotions that match the prefix. // Values can simply be a url or they can have // an optional link title separated by a "|" // Ex: "http://grafana.example.com/d/yu5UH4MMz/deployments" // Ex: "Go to Dashboard|http://grafana.example.com/d/yu5UH4MMz/deployments" AnnotationKeyLinkPrefix = "link.argocd.argoproj.io/" ) // Environment variables for tuning and debugging Argo CD const ( // EnvVarSSODebug is an environment variable to enable additional OAuth debugging in the API server EnvVarSSODebug = "ARGOCD_SSO_DEBUG" // EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG" // EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using // the current kubectl context (for development purposes) EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER" // Overrides the location where SSH known hosts for repo access data is stored EnvVarSSHDataPath = "ARGOCD_SSH_DATA_PATH" // Overrides the location where TLS certificate for repo access data is stored EnvVarTLSDataPath = "ARGOCD_TLS_DATA_PATH" // Specifies number of git remote operations attempts count EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT" // Overrides git submodule support, true by default EnvGitSubmoduleEnabled = "ARGOCD_GIT_MODULES_ENABLED" // EnvK8sClientQPS is the QPS value used for the kubernetes client (default: 50) EnvK8sClientQPS = "ARGOCD_K8S_CLIENT_QPS" // EnvK8sClientBurst is the burst value used for the kubernetes client (default: twice the client QPS) EnvK8sClientBurst = "ARGOCD_K8S_CLIENT_BURST" // EnvClusterCacheResyncDuration is the env variable that holds cluster cache re-sync duration EnvClusterCacheResyncDuration = "ARGOCD_CLUSTER_CACHE_RESYNC_DURATION" // EnvK8sClientMaxIdleConnections is the number of max idle connections in K8s REST client HTTP transport (default: 500) EnvK8sClientMaxIdleConnections = "ARGOCD_K8S_CLIENT_MAX_IDLE_CONNECTIONS" // EnvGnuPGHome is the path to ArgoCD's GnuPG keyring for signature verification EnvGnuPGHome = "ARGOCD_GNUPGHOME" // EnvWatchAPIBufferSize is the buffer size used to transfer K8S watch events to watch API consumer EnvWatchAPIBufferSize = "ARGOCD_WATCH_API_BUFFER_SIZE" // EnvPauseGenerationAfterFailedAttempts will pause manifest generation after the specified number of failed generation attempts EnvPauseGenerationAfterFailedAttempts = "ARGOCD_PAUSE_GEN_AFTER_FAILED_ATTEMPTS" // EnvPauseGenerationMinutes pauses manifest generation for the specified number of minutes, after sufficient manifest generation failures EnvPauseGenerationMinutes = "ARGOCD_PAUSE_GEN_MINUTES" // EnvPauseGenerationRequests pauses manifest generation for the specified number of requests, after sufficient manifest generation failures EnvPauseGenerationRequests = "ARGOCD_PAUSE_GEN_REQUESTS" // EnvControllerReplicas is the number of controller replicas EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS" // EnvControllerShard is the shard number that should be handled by controller EnvControllerShard = "ARGOCD_CONTROLLER_SHARD" ) const ( // MinClientVersion is the minimum client version that can interface with this API server. // When introducing breaking changes to the API or datastructures, this number should be bumped. // The value here may be lower than the current value in VERSION MinClientVersion = "1.4.0" // CacheVersion is a objects version cached using util/cache/cache.go. // Number should be bumped in case of backward incompatible change to make sure cache is invalidated after upgrade. CacheVersion = "1.8.1" ) // GetGnuPGHomePath retrieves the path to use for GnuPG home directory, which is either taken from GNUPGHOME environment or a default value func GetGnuPGHomePath() string { if gnuPgHome := os.Getenv(EnvGnuPGHome); gnuPgHome == "" { return DefaultGnuPgHomePath } else { return gnuPgHome } } var ( // K8sClientConfigQPS controls the QPS to be used in K8s REST client configs K8sClientConfigQPS float32 = 50 // K8sClientConfigBurst controls the burst to be used in K8s REST client configs K8sClientConfigBurst int = 100 // K8sMaxIdleConnections controls the number of max idle connections in K8s REST client HTTP transport K8sMaxIdleConnections = 500 // K8sMaxIdleConnections controls the duration of cluster cache refresh K8SClusterResyncDuration = 12 * time.Hour ) func init() { if envQPS := os.Getenv(EnvK8sClientQPS); envQPS != "" { if qps, err := strconv.ParseFloat(envQPS, 32); err != nil { K8sClientConfigQPS = float32(qps) } } if envBurst := os.Getenv(EnvK8sClientBurst); envBurst != "" { if burst, err := strconv.Atoi(envBurst); err != nil { K8sClientConfigBurst = burst } } else { K8sClientConfigBurst = 2 * int(K8sClientConfigQPS) } if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" { if maxConn, err := strconv.Atoi(envMaxConn); err != nil { K8sMaxIdleConnections = maxConn } } if clusterResyncDurationStr := os.Getenv(EnvClusterCacheResyncDuration); clusterResyncDurationStr != "" { if duration, err := time.ParseDuration(clusterResyncDurationStr); err == nil { K8SClusterResyncDuration = duration } } }
package common import ( log "github.com/Sirupsen/logrus" "gopkg.in/gcfg.v1" ) type Config struct { // configuration for the logging Logging struct { // whether or not log outputs in JSON UseJSON bool Level *string } // server configuration Server struct { Port int // if true, listens on 0.0.0.0 Global bool } // MongoDB configuration Mongo struct { Port int Host string } // Debugging configuration Debug struct { Enable bool ProfileLength int } Benchmark struct { BrokerURL *string BrokerPort *int StepSpacing *int // How long between increasing client/producer counts (seconds) ConfigurationName *string // Named bundle of query/metadata } } func LoadConfig(filename string) (config *Config) { config = new(Config) err := gcfg.ReadFileInto(config, filename) if err != nil { log.WithFields(log.Fields{ "location": filename, "error": err, }).Error("Couldn't load configuration file at given location. Trying local ./config.ini") } else { return } err = gcfg.ReadFileInto(config, "./config.ini") if err != nil { log.WithField("error", err).Fatal("Couldn't load configuration file at ./config.ini") } return } func SetupLogging(config *Config) { if config.Logging.UseJSON { log.SetFormatter(&log.JSONFormatter{}) } loglevel, err := log.ParseLevel(*config.Logging.Level) if err != nil { log.Error(err) loglevel = log.InfoLevel // default to Info } log.SetLevel(loglevel) } Restructure the configuration struct This allows us to more easily set parameters programmatically. See server_test.go in subsequent commit. package common import ( log "github.com/Sirupsen/logrus" "gopkg.in/gcfg.v1" ) // configuration for the logging type LoggingConfig struct { // whether or not log outputs in JSON UseJSON bool Level string } // server configuration type ServerConfig struct { Port int // if true, listens on 0.0.0.0 Global bool } // MongoDB configuration type MongoConfig struct { Port int Host string } // Debugging configuration type DebugConfig struct { Enable bool ProfileLength int } type BenchmarkConfig struct { BrokerURL *string BrokerPort *int StepSpacing *int // How long between increasing client/producer counts (seconds) ConfigurationName *string // Named bundle of query/metadata } type Config struct { Logging LoggingConfig Server ServerConfig Mongo MongoConfig Debug DebugConfig Benchmark BenchmarkConfig } func LoadConfig(filename string) (config *Config) { config = new(Config) err := gcfg.ReadFileInto(config, filename) if err != nil { log.WithFields(log.Fields{ "location": filename, "error": err, }).Error("Couldn't load configuration file at given location. Trying local ./config.ini") } else { return } err = gcfg.ReadFileInto(config, "./config.ini") if err != nil { log.WithField("error", err).Fatal("Couldn't load configuration file at ./config.ini") } return } func SetupLogging(config *Config) { if config.Logging.UseJSON { log.SetFormatter(&log.JSONFormatter{}) } loglevel, err := log.ParseLevel(config.Logging.Level) if err != nil { log.Error(err) loglevel = log.InfoLevel // default to Info } log.SetLevel(loglevel) }
package main import ( "flag" "fmt" "github.com/jroimartin/gocui" "github.com/jzelinskie/geddit" "github.com/toqueteos/webbrowser" "html" "log" "os" "strings" ) type pagetype int const ( List pagetype = iota Comments Empty Help ) var submissions []*geddit.Submission var views []*gocui.View var votes []*gocui.View var allViews []*gocui.View var session *geddit.LoginSession var opts geddit.ListingOptions var after string var count int var subreddit string var currentSubmission *geddit.Submission var currentPageType pagetype func getCredentials() (string, string) { username := flag.String("u", "", "Username") password := flag.String("p", "", "Password") flag.StringVar(&subreddit, "s", "", "Subreddit") flag.Parse() if *username == "" { *username = os.Getenv("BORED_USERNAME") } if *password == "" { *password = os.Getenv("BORED_PASSWORD") } if subreddit == "" { subreddit = os.Getenv("BORED_SUBREDDIT") } if *username == "" || *password == "" { log.Panicln("bored requires a username and password") os.Exit(2) } return *username, *password } func login() *geddit.LoginSession { username, password := getCredentials() session, _ := geddit.NewLoginSession(username, password, "geddit") return session } func load(g *gocui.Gui, limit int) []*geddit.Submission { opts = geddit.ListingOptions{ Limit: limit, After: after, } if subreddit == "" { submissions, _ = session.Frontpage(geddit.DefaultPopularity, opts) } else { submissions, _ = session.SubredditSubmissions(subreddit, geddit.DefaultPopularity, opts) } return submissions } func layoutList(g *gocui.Gui) { maxX, maxY := g.Size() i := 0 y := 0 for y < maxY-1 && i < len(submissions) { name := fmt.Sprintf("submission-%d", i) if v, err := g.SetView(fmt.Sprintf("vote-%d", i), -1, y, 1, y+2); err != nil && v != nil { votes = append(votes, v) v.Frame = false fmt.Fprint(v, "•") allViews = append(allViews, v) } if s, err := g.SetView(name, 1, y, maxX, y+2); err != nil && s != nil { if i == 0 { g.SetCurrentView(name) } views = append(views, s) s.Frame = false subm := submissions[i] after = subm.Title title := html.UnescapeString(subm.Title) tag := "LINK" if subm.IsSelf { tag = "SELF" } if subm.IsNSFW { tag = "NSFW+" + tag } fmt.Fprintf(s, "[%s] %s", tag, title) allViews = append(allViews, s) } y += 1 i += 1 } setColor(g.CurrentView()) currentSubmission = getCurrentSubmission(g.CurrentView()) } func layoutComments(g *gocui.Gui) { maxX, maxY := g.Size() if vote, err := g.SetView("post-vote", -1, 0, 1, 2); err != nil && vote != nil { vote.Frame = false fmt.Fprint(vote, "•") allViews = append(allViews, vote) } if title, err := g.SetView("post-title", 1, 0, maxX, 2); err != nil && title != nil { title.Frame = false fmt.Fprintf(title, "%s", html.UnescapeString(currentSubmission.Title)) allViews = append(allViews, title) } if text, err := g.SetView("post-text", -1, 1, maxX, maxY); err != nil && text != nil && currentSubmission.IsSelf { text.Frame = false text.Wrap = true fmt.Fprintf(text, "%s", strings.Replace(html.UnescapeString(currentSubmission.Selftext), "\n\n", "\n", -1)) allViews = append(allViews, text) g.SetCurrentView("post-text") } } func layoutHelp(g *gocui.Gui) { maxX, maxY := g.Size() if help, err := g.SetView("help", -1, 0, maxX, maxY); err != nil && help != nil { help.Frame = false help.Wrap = true fmt.Fprint(help, "Keybinds:\n\th - this screen\n\tq - quit\n\tj - navigate/scroll up\n\tk - navigate/scroll down\n\tr - refresh\n\tf - front page\n\tc - comments (self text view)\n\tl - open link url\n\tenter - open reddit permalink\n\n") allViews = append(allViews, help) g.SetCurrentView("help") } } func layout(g *gocui.Gui) error { maxX, count := g.Size() count = count - 1 if title, err := g.SetView("title", -1, -1, maxX, 1); err != nil { title.Frame = false title.BgColor = gocui.ColorBlue title.FgColor = gocui.ColorWhite fmt.Fprintln(title, "Bored v0.0.1") allViews = append(allViews, title) } if currentPageType == List { layoutList(g) } else if currentPageType == Comments { layoutComments(g) } else if currentPageType == Help { layoutHelp(g) } return nil } func quit(g *gocui.Gui, _ *gocui.View) error { return gocui.Quit } func help(g *gocui.Gui, v *gocui.View) error { currentPageType = Help clearViews(g, v) return nil } func cursorDown(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { current := v.Name() next := 0 for i, v := range views { if v.Name() == current { next = i + 1 } } if next >= len(views) { next = len(views) - 1 } return g.SetCurrentView(views[next].Name()) } else if currentPageType == Comments || currentPageType == Help { x, y := v.Origin() v.SetOrigin(x, y+1) } return nil } func cursorUp(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { current := v.Name() prev := 0 for i, v := range views { if v.Name() == current { prev = i - 1 } } if prev < 0 { prev = 0 } return g.SetCurrentView(views[prev].Name()) } else if currentPageType == Comments || currentPageType == Help { x, y := v.Origin() v.SetOrigin(x, y-1) } return nil } func upvote(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { for i, w := range views { if w == v { if votes[i].FgColor == gocui.ColorGreen { votes[i].FgColor = gocui.ColorDefault session.Vote(currentSubmission, geddit.RemoveVote) } else { votes[i].FgColor = gocui.ColorGreen session.Vote(currentSubmission, geddit.UpVote) } } } } else if currentPageType == Comments { vote, _ := g.View("post-vote") if vote.FgColor == gocui.ColorGreen { vote.FgColor = gocui.ColorDefault session.Vote(currentSubmission, geddit.RemoveVote) } else { vote.FgColor = gocui.ColorGreen session.Vote(currentSubmission, geddit.UpVote) } } return nil } func downvote(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { for i, w := range views { if w == v { if votes[i].FgColor == gocui.ColorRed { votes[i].FgColor = gocui.ColorDefault session.Vote(submissions[i], geddit.RemoveVote) } else { votes[i].FgColor = gocui.ColorRed session.Vote(submissions[i], geddit.DownVote) } } } } else if currentPageType == Comments { vote, _ := g.View("post-vote") if vote.FgColor == gocui.ColorRed { vote.FgColor = gocui.ColorDefault session.Vote(currentSubmission, geddit.RemoveVote) } else { vote.FgColor = gocui.ColorRed session.Vote(currentSubmission, geddit.DownVote) } } return nil } func enter(g *gocui.Gui, v *gocui.View) error { webbrowser.Open("https://www.reddit.com/" + currentSubmission.Permalink) return nil } func link(g *gocui.Gui, v *gocui.View) error { webbrowser.Open(currentSubmission.URL) return nil } func front(g *gocui.Gui, v *gocui.View) error { currentPageType = List clearViews(g, v) return nil } func comments(g *gocui.Gui, v *gocui.View) error { currentPageType = Comments clearViews(g, v) return nil } func refresh(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { currentPageType = Empty clearViews(g, v) load(g, count) return front(g, v) } else if currentPageType == Comments || currentPageType == Help { clearViews(g, v) } return nil } func clearViews(g *gocui.Gui, v *gocui.View) { for _, w := range allViews { g.DeleteView(w.Name()) } views = nil votes = nil allViews = nil g.Flush() } func setKeybinds(g *gocui.Gui) { if err := g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, quit); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", gocui.KeyCtrlD, gocui.ModNone, quit); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'q', gocui.ModNone, quit); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'h', gocui.ModNone, help); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'j', gocui.ModNone, cursorDown); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'k', gocui.ModNone, cursorUp); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'a', gocui.ModNone, upvote); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'z', gocui.ModNone, downvote); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", gocui.KeyEnter, gocui.ModNone, enter); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'l', gocui.ModNone, link); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'f', gocui.ModNone, front); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'c', gocui.ModNone, comments); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'r', gocui.ModNone, refresh); err != nil { log.Panicln(err) } } func setColor(v *gocui.View) { for _, w := range views { w.FgColor = gocui.ColorDefault } v.FgColor = gocui.ColorBlue } func getCurrentSubmission(v *gocui.View) *geddit.Submission { for i, w := range views { if w == v { if i < len(submissions) { return submissions[i] } } } return submissions[0] } func main() { var err error session = login() after = "" g := gocui.NewGui() if err := g.Init(); err != nil { log.Panicln(err) } _, count = g.Size() count = count - 1 submissions = load(g, count) currentPageType = List g.BgColor = gocui.ColorDefault defer g.Close() g.SetLayout(layout) setKeybinds(g) err = g.MainLoop() if err != nil && err != gocui.Quit { log.Panicln(err) } } Fix incorrect help text package main import ( "flag" "fmt" "github.com/jroimartin/gocui" "github.com/jzelinskie/geddit" "github.com/toqueteos/webbrowser" "html" "log" "os" "strings" ) type pagetype int const ( List pagetype = iota Comments Empty Help ) var submissions []*geddit.Submission var views []*gocui.View var votes []*gocui.View var allViews []*gocui.View var session *geddit.LoginSession var opts geddit.ListingOptions var after string var count int var subreddit string var currentSubmission *geddit.Submission var currentPageType pagetype func getCredentials() (string, string) { username := flag.String("u", "", "Username") password := flag.String("p", "", "Password") flag.StringVar(&subreddit, "s", "", "Subreddit") flag.Parse() if *username == "" { *username = os.Getenv("BORED_USERNAME") } if *password == "" { *password = os.Getenv("BORED_PASSWORD") } if subreddit == "" { subreddit = os.Getenv("BORED_SUBREDDIT") } if *username == "" || *password == "" { log.Panicln("bored requires a username and password") os.Exit(2) } return *username, *password } func login() *geddit.LoginSession { username, password := getCredentials() session, _ := geddit.NewLoginSession(username, password, "geddit") return session } func load(g *gocui.Gui, limit int) []*geddit.Submission { opts = geddit.ListingOptions{ Limit: limit, After: after, } if subreddit == "" { submissions, _ = session.Frontpage(geddit.DefaultPopularity, opts) } else { submissions, _ = session.SubredditSubmissions(subreddit, geddit.DefaultPopularity, opts) } return submissions } func layoutList(g *gocui.Gui) { maxX, maxY := g.Size() i := 0 y := 0 for y < maxY-1 && i < len(submissions) { name := fmt.Sprintf("submission-%d", i) if v, err := g.SetView(fmt.Sprintf("vote-%d", i), -1, y, 1, y+2); err != nil && v != nil { votes = append(votes, v) v.Frame = false fmt.Fprint(v, "•") allViews = append(allViews, v) } if s, err := g.SetView(name, 1, y, maxX, y+2); err != nil && s != nil { if i == 0 { g.SetCurrentView(name) } views = append(views, s) s.Frame = false subm := submissions[i] after = subm.Title title := html.UnescapeString(subm.Title) tag := "LINK" if subm.IsSelf { tag = "SELF" } if subm.IsNSFW { tag = "NSFW+" + tag } fmt.Fprintf(s, "[%s] %s", tag, title) allViews = append(allViews, s) } y += 1 i += 1 } setColor(g.CurrentView()) currentSubmission = getCurrentSubmission(g.CurrentView()) } func layoutComments(g *gocui.Gui) { maxX, maxY := g.Size() if vote, err := g.SetView("post-vote", -1, 0, 1, 2); err != nil && vote != nil { vote.Frame = false fmt.Fprint(vote, "•") allViews = append(allViews, vote) } if title, err := g.SetView("post-title", 1, 0, maxX, 2); err != nil && title != nil { title.Frame = false fmt.Fprintf(title, "%s", html.UnescapeString(currentSubmission.Title)) allViews = append(allViews, title) } if text, err := g.SetView("post-text", -1, 1, maxX, maxY); err != nil && text != nil && currentSubmission.IsSelf { text.Frame = false text.Wrap = true fmt.Fprintf(text, "%s", strings.Replace(html.UnescapeString(currentSubmission.Selftext), "\n\n", "\n", -1)) allViews = append(allViews, text) g.SetCurrentView("post-text") } } func layoutHelp(g *gocui.Gui) { maxX, maxY := g.Size() if help, err := g.SetView("help", -1, 0, maxX, maxY); err != nil && help != nil { help.Frame = false help.Wrap = true fmt.Fprint(help, "Keybinds:\n\th - this screen\n\tq - quit\n\tj - navigate/scroll down\n\tk - navigate/scroll up\n\tr - refresh\n\tf - front page\n\tc - comments (self text view)\n\tl - open link url\n\tenter - open reddit permalink\n\n") allViews = append(allViews, help) g.SetCurrentView("help") } } func layout(g *gocui.Gui) error { maxX, count := g.Size() count = count - 1 if title, err := g.SetView("title", -1, -1, maxX, 1); err != nil { title.Frame = false title.BgColor = gocui.ColorBlue title.FgColor = gocui.ColorWhite fmt.Fprintln(title, "Bored v0.0.1") allViews = append(allViews, title) } if currentPageType == List { layoutList(g) } else if currentPageType == Comments { layoutComments(g) } else if currentPageType == Help { layoutHelp(g) } return nil } func quit(g *gocui.Gui, _ *gocui.View) error { return gocui.Quit } func help(g *gocui.Gui, v *gocui.View) error { currentPageType = Help clearViews(g, v) return nil } func cursorDown(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { current := v.Name() next := 0 for i, v := range views { if v.Name() == current { next = i + 1 } } if next >= len(views) { next = len(views) - 1 } return g.SetCurrentView(views[next].Name()) } else if currentPageType == Comments || currentPageType == Help { x, y := v.Origin() v.SetOrigin(x, y+1) } return nil } func cursorUp(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { current := v.Name() prev := 0 for i, v := range views { if v.Name() == current { prev = i - 1 } } if prev < 0 { prev = 0 } return g.SetCurrentView(views[prev].Name()) } else if currentPageType == Comments || currentPageType == Help { x, y := v.Origin() v.SetOrigin(x, y-1) } return nil } func upvote(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { for i, w := range views { if w == v { if votes[i].FgColor == gocui.ColorGreen { votes[i].FgColor = gocui.ColorDefault session.Vote(currentSubmission, geddit.RemoveVote) } else { votes[i].FgColor = gocui.ColorGreen session.Vote(currentSubmission, geddit.UpVote) } } } } else if currentPageType == Comments { vote, _ := g.View("post-vote") if vote.FgColor == gocui.ColorGreen { vote.FgColor = gocui.ColorDefault session.Vote(currentSubmission, geddit.RemoveVote) } else { vote.FgColor = gocui.ColorGreen session.Vote(currentSubmission, geddit.UpVote) } } return nil } func downvote(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { for i, w := range views { if w == v { if votes[i].FgColor == gocui.ColorRed { votes[i].FgColor = gocui.ColorDefault session.Vote(submissions[i], geddit.RemoveVote) } else { votes[i].FgColor = gocui.ColorRed session.Vote(submissions[i], geddit.DownVote) } } } } else if currentPageType == Comments { vote, _ := g.View("post-vote") if vote.FgColor == gocui.ColorRed { vote.FgColor = gocui.ColorDefault session.Vote(currentSubmission, geddit.RemoveVote) } else { vote.FgColor = gocui.ColorRed session.Vote(currentSubmission, geddit.DownVote) } } return nil } func enter(g *gocui.Gui, v *gocui.View) error { webbrowser.Open("https://www.reddit.com/" + currentSubmission.Permalink) return nil } func link(g *gocui.Gui, v *gocui.View) error { webbrowser.Open(currentSubmission.URL) return nil } func front(g *gocui.Gui, v *gocui.View) error { currentPageType = List clearViews(g, v) return nil } func comments(g *gocui.Gui, v *gocui.View) error { currentPageType = Comments clearViews(g, v) return nil } func refresh(g *gocui.Gui, v *gocui.View) error { if currentPageType == List { currentPageType = Empty clearViews(g, v) load(g, count) return front(g, v) } else if currentPageType == Comments || currentPageType == Help { clearViews(g, v) } return nil } func clearViews(g *gocui.Gui, v *gocui.View) { for _, w := range allViews { g.DeleteView(w.Name()) } views = nil votes = nil allViews = nil g.Flush() } func setKeybinds(g *gocui.Gui) { if err := g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, quit); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", gocui.KeyCtrlD, gocui.ModNone, quit); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'q', gocui.ModNone, quit); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'h', gocui.ModNone, help); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'j', gocui.ModNone, cursorDown); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'k', gocui.ModNone, cursorUp); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'a', gocui.ModNone, upvote); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'z', gocui.ModNone, downvote); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", gocui.KeyEnter, gocui.ModNone, enter); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'l', gocui.ModNone, link); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'f', gocui.ModNone, front); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'c', gocui.ModNone, comments); err != nil { log.Panicln(err) } if err := g.SetKeybinding("", 'r', gocui.ModNone, refresh); err != nil { log.Panicln(err) } } func setColor(v *gocui.View) { for _, w := range views { w.FgColor = gocui.ColorDefault } v.FgColor = gocui.ColorBlue } func getCurrentSubmission(v *gocui.View) *geddit.Submission { for i, w := range views { if w == v { if i < len(submissions) { return submissions[i] } } } return submissions[0] } func main() { var err error session = login() after = "" g := gocui.NewGui() if err := g.Init(); err != nil { log.Panicln(err) } _, count = g.Size() count = count - 1 submissions = load(g, count) currentPageType = List g.BgColor = gocui.ColorDefault defer g.Close() g.SetLayout(layout) setKeybinds(g) err = g.MainLoop() if err != nil && err != gocui.Quit { log.Panicln(err) } }
package lex import ( "encoding/json" "testing" u "github.com/araddon/gou" "github.com/bmizerany/assert" ) func verifyJsonTokenTypes(t *testing.T, expString string, tokens []TokenType) { l := NewJsonLexer(expString) for _, goodToken := range tokens { tok := l.NextToken() //u.Debugf("%#v %#v", tok, goodToken) assert.Equalf(t, tok.T, goodToken, "want='%v' has %v ", goodToken, tok) } } func verifyJsonTokens(t *testing.T, expString string, tokens []Token) { l := NewJsonLexer(expString) for i, goodToken := range tokens { tok := l.NextToken() //u.Debugf("%#v %#v", tok, goodToken) assert.Equalf(t, tok.T, goodToken.T, "%d want token type ='%v' has %v ", i, goodToken.T, tok.T) assert.Equalf(t, tok.V, goodToken.V, "%d want token value='%v' has %v ", i, goodToken.V, tok.V) } } func TestLexJsonTokens(t *testing.T) { verifyJsonTokens(t, `["a",2,"b",true,{"name":"world"}]`, []Token{ tv(TokenLeftBracket, "["), tv(TokenValue, "a"), tv(TokenComma, ","), tv(TokenInteger, "2"), tv(TokenComma, ","), tv(TokenValue, "b"), tv(TokenComma, ","), tv(TokenBool, "true"), tv(TokenComma, ","), tv(TokenLeftBrace, "{"), tv(TokenIdentity, "name"), tv(TokenColon, ":"), tv(TokenValue, "world"), tv(TokenRightBrace, "}"), tv(TokenRightBracket, "]"), }) } func TestLexJsonDialect(t *testing.T) { // The lexer should be able to parse json verifyJsonTokenTypes(t, ` { "key1":"value2" ,"key2":45, "key3":["a",2,"b",true], "key4":{"hello":"value","age":55} } `, []TokenType{TokenLeftBrace, TokenIdentity, TokenColon, TokenValue, TokenComma, TokenIdentity, TokenColon, TokenInteger, TokenComma, TokenIdentity, TokenColon, TokenLeftBracket, TokenValue, TokenComma, TokenInteger, TokenComma, TokenValue, TokenComma, TokenBool, TokenRightBracket, TokenComma, TokenIdentity, TokenColon, TokenLeftBrace, TokenIdentity, TokenColon, TokenValue, TokenComma, TokenIdentity, TokenColon, TokenInteger, TokenRightBrace, TokenRightBrace, }) } /* Benchmark testing BenchmarkJsonLexer1 10000 115692 ns/op BenchmarkJsonMarshal 10000 103930 ns/op go test -bench="Json" go test -bench="JsonLexer" --cpuprofile cpu.out go tool pprof lex.test cpu.out web */ func BenchmarkJsonLexer1(b *testing.B) { jsonData := `{ "took":62436, "errors":true, "items":[{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}}] }` b.StartTimer() for i := 0; i < b.N; i++ { l := NewJsonLexer(jsonData) for { tok := l.NextToken() if tok.T == TokenEOF { break } } } } func BenchmarkJsonMarshal(b *testing.B) { jsonData := `{ "took":62436, "errors":true, "items":[{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}}] }` b.StartTimer() for i := 0; i < b.N; i++ { m := make(u.JsonHelper) err := json.Unmarshal([]byte(jsonData), &m) if err != nil { b.Fail() } } } json lex benchmark package lex import ( "encoding/json" "testing" u "github.com/araddon/gou" "github.com/bmizerany/assert" ) func verifyJsonTokenTypes(t *testing.T, expString string, tokens []TokenType) { l := NewJsonLexer(expString) for _, goodToken := range tokens { tok := l.NextToken() //u.Debugf("%#v %#v", tok, goodToken) assert.Equalf(t, tok.T, goodToken, "want='%v' has %v ", goodToken, tok) } } func verifyJsonTokens(t *testing.T, expString string, tokens []Token) { l := NewJsonLexer(expString) for i, goodToken := range tokens { tok := l.NextToken() //u.Debugf("%#v %#v", tok, goodToken) assert.Equalf(t, tok.T, goodToken.T, "%d want token type ='%v' has %v ", i, goodToken.T, tok.T) assert.Equalf(t, tok.V, goodToken.V, "%d want token value='%v' has %v ", i, goodToken.V, tok.V) } } func TestLexJsonTokens(t *testing.T) { verifyJsonTokens(t, `["a",2,"b",true,{"name":"world"}]`, []Token{ tv(TokenLeftBracket, "["), tv(TokenValue, "a"), tv(TokenComma, ","), tv(TokenInteger, "2"), tv(TokenComma, ","), tv(TokenValue, "b"), tv(TokenComma, ","), tv(TokenBool, "true"), tv(TokenComma, ","), tv(TokenLeftBrace, "{"), tv(TokenIdentity, "name"), tv(TokenColon, ":"), tv(TokenValue, "world"), tv(TokenRightBrace, "}"), tv(TokenRightBracket, "]"), }) } func TestLexJsonDialect(t *testing.T) { // The lexer should be able to parse json verifyJsonTokenTypes(t, ` { "key1":"value2" ,"key2":45, "key3":["a",2,"b",true], "key4":{"hello":"value","age":55} } `, []TokenType{TokenLeftBrace, TokenIdentity, TokenColon, TokenValue, TokenComma, TokenIdentity, TokenColon, TokenInteger, TokenComma, TokenIdentity, TokenColon, TokenLeftBracket, TokenValue, TokenComma, TokenInteger, TokenComma, TokenValue, TokenComma, TokenBool, TokenRightBracket, TokenComma, TokenIdentity, TokenColon, TokenLeftBrace, TokenIdentity, TokenColon, TokenValue, TokenComma, TokenIdentity, TokenColon, TokenInteger, TokenRightBrace, TokenRightBrace, }) } /* Benchmark testing BenchmarkJsonLexer1 10000 121277 ns/op BenchmarkJsonLexer2 500000 2982 ns/op BenchmarkJsonMarshal 10000 106905 ns/op go test -bench="Json" go test -bench="JsonLexer" --cpuprofile cpu.out go tool pprof lex.test cpu.out web */ func BenchmarkJsonLexer1(b *testing.B) { jsonData := `{ "took":62436, "errors":true, "items":[{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}}] }` b.StartTimer() for i := 0; i < b.N; i++ { l := NewJsonLexer(jsonData) for { tok := l.NextToken() if tok.T == TokenEOF { break } } } } func BenchmarkJsonLexer2(b *testing.B) { jsonData := `{ "took":62436, "errors":true, "items":[{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}}] }` b.StartTimer() for i := 0; i < b.N; i++ { l := NewJsonLexer(jsonData) tokenLoop: for { tok := l.NextToken() switch { case tok.T == TokenEOF: break case tok.T == TokenIdentity && tok.V == "errors": tok = l.NextToken() tok = l.NextToken() if tok.T == TokenBool && tok.V == "true" { break tokenLoop // early exit } } } } } func BenchmarkJsonMarshal(b *testing.B) { jsonData := `{ "took":62436, "errors":true, "items":[{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}},{"delete":{"_index":"testdel","_type":"type1","_id":"2","status":503,"error":"UnavailableShardsException[[testdel][3] Primary shard is not active or isn't assigned to a known node. Timeout: [1m], request: org.elasticsearch.action.bulk.BulkShardRequest@633961d0]"}}] }` b.StartTimer() for i := 0; i < b.N; i++ { m := make(u.JsonHelper) err := json.Unmarshal([]byte(jsonData), &m) if err != nil { b.Fail() } } }