text
stringlengths
11
4.05M
package controllers import ( core "IRCService/app/core" "net" "strings" coap "github.com/dustin/go-coap" ) //DetectGameEventHandler . func DetectGameEventHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { if m.IsConfirmable() { result, _ := ci.OnDebugCmds("getevent -p") list := strings.Split(strings.Split(result, "Zinwell Gamepad F310")[0], "/dev/input/event") number := string([]rune(strings.Split(list[len(list)-1], ";")[0])) res := &coap.Message{ Type: coap.Acknowledgement, Code: coap.Content, MessageID: m.MessageID, Token: m.Token, Payload: []byte(number), } res.SetOption(coap.ContentFormat, coap.TextPlain) return res } return nil } } //GameEventHandler . func GameEventHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 2 { cmds := parsedGameKeySerial(number[0], number[1]) ci.OnCmds(cmds) } return nil } } //GameBeganHandler . func GameBeganHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 2 { cmds := parsedGameBeganKeySerial(number[0], number[1]) ci.OnCmds(cmds) } return nil } } //GameEndHandler . func GameEndHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 2 { cmds := parsedGameEndKeySerial(number[0], number[1]) ci.OnCmds(cmds) } return nil } } //GameDPADHandler . func GameDPADHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 3 { cmds := parsedGameDPadKeySerial(number[0], number[1], number[2]) ci.OnCmds(cmds) } return nil } } //GameDPADBeganHandler . func GameDPADBeganHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 3 { cmds := parsedGameDPadBeganKeySerial(number[0], number[1], number[2]) ci.OnCmds(cmds) } return nil } } //GameDPADEndHandler . func GameDPADEndHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 2 { cmds := parsedGameDPadEndKeySerial(number[0], number[1]) ci.OnCmds(cmds) } return nil } } //GameAxisEventHandler . func GameAxisEventHandler(ci core.CoapInterface) core.CoapHandler { return func(l *net.UDPConn, a *net.UDPAddr, m *coap.Message) *coap.Message { number := strings.Split(string(m.Payload), ";") if len(number) > 3 { cmds := parsedGameAxisEventKeySerial(number[0], number[1], number[2]) ci.OnCmds(cmds) } return nil } } func parsedGameKeySerial(eventNumber string, number string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 1 "+number+" 1") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 1 "+number+" 0") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds } func parsedGameAxisEventKeySerial(eventNumber string, number string, value string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 3 "+number+" "+value) cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds } func parsedGameBeganKeySerial(eventNumber string, number string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 1 "+number+" 1") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds } func parsedGameEndKeySerial(eventNumber string, number string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 1 "+number+" 0") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds } func parsedGameDPadKeySerial(eventNumber string, number string, direction string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 3 "+number+" "+direction) cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 3 "+number+" 0") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds } func parsedGameDPadBeganKeySerial(eventNumber string, number string, direction string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 3 "+number+" "+direction) cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds } func parsedGameDPadEndKeySerial(eventNumber string, number string) string { cmds := []string{} cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 3 "+number+" 0") cmds = append(cmds, "sendevent /dev/input/event"+eventNumber+" 0 0 0") fullCmds := strings.Join(cmds, ";") return fullCmds }
package environment import ( "errors" "flag" "os" ) const ( defaultDBDirectory = "./db" defaultCategoryName = "Member Channels" defaultListenName = "[ + New ]" ) type Environment struct { // Required DiscordAPIToken string // Optional Verbose bool DBFile string DefaultCategoryName string DefaultListenName string } func New() (*Environment, error) { env := &Environment{} env.optional() if err := env.required(); err != nil { return nil, err } return env, nil } func (env *Environment) required() error { discordToken := os.Getenv("DISCORD_TOKEN") if len(discordToken) == 0 { return errors.New("Missing Discord API token. Set env var DISCORD_TOKEN") } env.DiscordAPIToken = discordToken return nil } func (env *Environment) optional() { flag.BoolVar(&env.Verbose, "v", false, "Enable debug logging") flag.StringVar(&env.DBFile, "store-dir", defaultDBDirectory, "Directory to save database") flag.StringVar(&env.DefaultCategoryName, "category-channel-name", defaultCategoryName, "The default name for created the created cateogories") flag.StringVar(&env.DefaultListenName, "listen-channel-name", defaultListenName, "The default name for created the created cateogories") flag.Parse() }
// Copyright (c) 2018 Palantir Technologies. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "os" "github.com/palantir/distgo/distgo" "github.com/palantir/distgo/publisher" "github.com/palantir/distgo/publisher/artifactory" "github.com/palantir/distgo/publisher/maven" "github.com/palantir/godel-conjure-plugin/v6/conjureplugin" "github.com/pkg/errors" "github.com/spf13/cobra" ) var ( groupIDFlagVal string urlFlagVal string usernameFlagVal string passwordFlagVal string repositoryFlagVal string mavenNoPOMFlagVal bool dryRunFlagVal bool ) var publishCmd = &cobra.Command{ Use: "publish", Short: "Publish Conjure IR", RunE: func(cmd *cobra.Command, args []string) error { projectParams, err := toProjectParams(configFileFlag) if err != nil { return err } if err := os.Chdir(projectDirFlag); err != nil { return errors.Wrapf(err, "failed to set working directory") } publisherFlags, err := conjureplugin.PublisherFlags() if err != nil { return err } flagVals := make(map[distgo.PublisherFlagName]interface{}) for _, currFlag := range publisherFlags { // if flag was not explicitly provided, don't add it to the flagVals map if !cmd.Flags().Changed(string(currFlag.Name)) { continue } val, err := currFlag.GetFlagValue(cmd.Flags()) if err != nil { return err } flagVals[currFlag.Name] = val } return conjureplugin.Publish(projectParams, projectDirFlag, flagVals, dryRunFlagVal, cmd.OutOrStdout()) }, } func init() { publishCmd.Flags().BoolVar(&dryRunFlagVal, "dry-run", false, "print the operations that would be performed") publishCmd.Flags().StringVar(&groupIDFlagVal, string(publisher.GroupIDFlag.Name), "", publisher.GroupIDFlag.Description) publishCmd.Flags().StringVar(&repositoryFlagVal, string(artifactory.PublisherRepositoryFlag.Name), "", artifactory.PublisherRepositoryFlag.Description) publishCmd.Flags().StringVar(&urlFlagVal, string(publisher.ConnectionInfoURLFlag.Name), "", publisher.ConnectionInfoURLFlag.Description) publishCmd.Flags().StringVar(&usernameFlagVal, string(publisher.ConnectionInfoUsernameFlag.Name), "", publisher.ConnectionInfoUsernameFlag.Description) publishCmd.Flags().StringVar(&passwordFlagVal, string(publisher.ConnectionInfoPasswordFlag.Name), "", publisher.ConnectionInfoPasswordFlag.Description) publishCmd.Flags().BoolVar(&mavenNoPOMFlagVal, string(maven.NoPOMFlag.Name), false, maven.NoPOMFlag.Description) rootCmd.AddCommand(publishCmd) }
package server import ( "context" "fmt" "net" "net/http" "os" "os/signal" "strconv" "syscall" "time" "github.com/NYTimes/gziphandler" "github.com/golang/glog" "github.com/prebid/prebid-server/config" "github.com/prebid/prebid-server/metrics" metricsconfig "github.com/prebid/prebid-server/metrics/config" ) // Listen blocks forever, serving PBS requests on the given port. This will block forever, until the process is shut down. func Listen(cfg *config.Configuration, handler http.Handler, adminHandler http.Handler, metrics *metricsconfig.DetailedMetricsEngine) (err error) { stopSignals := make(chan os.Signal, 1) signal.Notify(stopSignals, syscall.SIGTERM, syscall.SIGINT) // Run the servers. Fan any process-stopper signals out to each server for graceful shutdowns. stopAdmin := make(chan os.Signal) stopMain := make(chan os.Signal) stopPrometheus := make(chan os.Signal) done := make(chan struct{}) adminServer := newAdminServer(cfg, adminHandler) go shutdownAfterSignals(adminServer, stopAdmin, done) if cfg.UnixSocketEnable && len(cfg.UnixSocketName) > 0 { // start the unix_socket server if config enable-it. var ( socketListener net.Listener mainServer = newSocketServer(cfg, handler) ) go shutdownAfterSignals(mainServer, stopMain, done) if socketListener, err = newUnixListener(mainServer.Addr, metrics); err != nil { glog.Errorf("Error listening for Unix-Socket connections on path %s: %v for socket server", mainServer.Addr, err) return } go runServer(mainServer, "UnixSocket", socketListener) } else { // start the TCP server var ( mainListener net.Listener mainServer = newMainServer(cfg, handler) ) go shutdownAfterSignals(mainServer, stopMain, done) if mainListener, err = newTCPListener(mainServer.Addr, metrics); err != nil { glog.Errorf("Error listening for TCP connections on %s: %v for main server", mainServer.Addr, err) return } go runServer(mainServer, "Main", mainListener) } var adminListener net.Listener if adminListener, err = newTCPListener(adminServer.Addr, nil); err != nil { glog.Errorf("Error listening for TCP connections on %s: %v for admin server", adminServer.Addr, err) return } go runServer(adminServer, "Admin", adminListener) if cfg.Metrics.Prometheus.Port != 0 { var ( prometheusListener net.Listener prometheusServer = newPrometheusServer(cfg, metrics) ) go shutdownAfterSignals(prometheusServer, stopPrometheus, done) if prometheusListener, err = newTCPListener(prometheusServer.Addr, nil); err != nil { glog.Errorf("Error listening for TCP connections on %s: %v for prometheus server", adminServer.Addr, err) return } go runServer(prometheusServer, "Prometheus", prometheusListener) wait(stopSignals, done, stopMain, stopAdmin, stopPrometheus) } else { wait(stopSignals, done, stopMain, stopAdmin) } return } func newAdminServer(cfg *config.Configuration, handler http.Handler) *http.Server { return &http.Server{ Addr: cfg.Host + ":" + strconv.Itoa(cfg.AdminPort), Handler: handler, } } func newMainServer(cfg *config.Configuration, handler http.Handler) *http.Server { serverHandler := getCompressionEnabledHandler(handler, cfg.Compression.Response) return &http.Server{ Addr: cfg.Host + ":" + strconv.Itoa(cfg.Port), Handler: serverHandler, ReadTimeout: 15 * time.Second, WriteTimeout: 15 * time.Second, } } func newSocketServer(cfg *config.Configuration, handler http.Handler) *http.Server { serverHandler := getCompressionEnabledHandler(handler, cfg.Compression.Response) return &http.Server{ Addr: cfg.UnixSocketName, Handler: serverHandler, ReadTimeout: 15 * time.Second, WriteTimeout: 15 * time.Second, } } func getCompressionEnabledHandler(h http.Handler, compressionInfo config.CompressionInfo) http.Handler { if compressionInfo.GZIP { h = gziphandler.GzipHandler(h) } return h } func runServer(server *http.Server, name string, listener net.Listener) (err error) { if server == nil { err = fmt.Errorf(">> Server is a nil_ptr.") glog.Errorf("%s server quit with error: %v", name, err) return } else if listener == nil { err = fmt.Errorf(">> Listener is a nil.") glog.Errorf("%s server quit with error: %v", name, err) return } glog.Infof("%s server starting on: %s", name, server.Addr) if err = server.Serve(listener); err != nil { glog.Errorf("%s server quit with error: %v", name, err) } return } func newTCPListener(address string, metrics metrics.MetricsEngine) (net.Listener, error) { ln, err := net.Listen("tcp", address) if err != nil { return nil, fmt.Errorf("Error listening for TCP connections on %s: %v", address, err) } // This cast is in Go's core libs as Server.ListenAndServe(), so it _should_ be safe, but just in case it changes in a future version... if casted, ok := ln.(*net.TCPListener); ok { ln = &tcpKeepAliveListener{casted} } else { glog.Warning("net.Listen(\"tcp\", \"addr\") didn't return a TCPListener as it did in Go 1.9. Things will probably work fine... but this should be investigated.") } if metrics != nil { ln = &monitorableListener{ln, metrics} } return ln, nil } func newUnixListener(address string, metrics metrics.MetricsEngine) (net.Listener, error) { ln, err := net.Listen("unix", address) if err != nil { return nil, fmt.Errorf("Error listening for Unix-Socket connections on path %s: %v", address, err) } if casted, ok := ln.(*net.UnixListener); ok { ln = &unixListener{casted} } else { glog.Warning("net.Listen(\"unix\", \"addr\") didn't return an UnixListener.") } if metrics != nil { ln = &monitorableListener{ln, metrics} } return ln, nil } func wait(inbound <-chan os.Signal, done <-chan struct{}, outbound ...chan<- os.Signal) { sig := <-inbound for i := 0; i < len(outbound); i++ { go sendSignal(outbound[i], sig) } for i := 0; i < len(outbound); i++ { <-done } } func shutdownAfterSignals(server *http.Server, stopper <-chan os.Signal, done chan<- struct{}) { sig := <-stopper ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() var s struct{} glog.Infof("Stopping %s because of signal: %s", server.Addr, sig.String()) if err := server.Shutdown(ctx); err != nil { glog.Errorf("Failed to shutdown %s: %v", server.Addr, err) } done <- s } func sendSignal(to chan<- os.Signal, sig os.Signal) { to <- sig }
package main import ( "fmt" "time" ) //func (st *Stack) Pop() int { // v := 0 // for ix := len(st) - 1; ix >= 0; ix-- { // if v = st[ix]; v != 0 { // st[ix] = 0 // return v // } // } //} //var num int = 10 //var numX2, numX3 int // //func main() { // numX2, numX3 = getX2AndX3(num) // PrintValues() // numX2, numX3 = getX2AndX3_2(num) // PrintValues() // // fmt.Printf("Multiply 2 * 5 * 6 = %d\n", MultiPly3Nums(2, 5, 6)) // // var i1 int = MultiPly3Nums(2, 5, 6) // // fmt.Printf("MultiPly 2 * 5 * 6 = %d\n", i1) //} // //func PrintValues() { // fmt.Printf("num = %d, 2x num = %d, 3x num = %d\n", num, numX2, numX3) //} // //func getX2AndX3(input int) (int, int) { // return 2 * input, 3 * input //} // //func getX2AndX3_2(input int) (x2 int, x3 int) { // x2 = 2 * input // x3 = 3 * input // // return x2, x3 // return //} // //func MultiPly3Nums(a int, b int, c int) int { // // var product int = a * b * c // // return product // return a * b * c //} //返回值与函数的参数 //func main() { // fmt.Printf("Multiply 2 * 5 * 6 = %d\n", MultiPly3Nums(2, 5, 6)) // // var i1 int = MultiPly3Nums(2, 5, 6) // // fmt.Printf("MultiPly 2 * 5 * 6 = %d\n", i1) //} // //func MultiPly3Nums(a, b, c int) int { // return a * b * c // // var product int = a * b * c // // return product //} //空白符 blank identifier //空白符可以用来匹配一些不需要的值,然后直接丢弃不使用 //func main() { // var i1 int // var f1 float32 // i1, _, f1 = ThreeValues() // fmt.Printf("The int: %d, the float: %f \n", i1, f1) //} // //func ThreeValues() (int, int, float32) { // return 5, 6, 7.5 //} //func MinMax(a, b int) (min int, max int){ // if a < b { // min = a // max = b // } else { // a > b or a = b // min = b // max = a // } // return //} // //func Multiply(a, b int, reply *int) { // *reply = a * b //} // //func main() { // var min, max int // min, max = MinMax(78, 65) // fmt.Printf("Minmium is: %d, Maxnmium is:%d\n", min, max) // // //传指针,允许在外部函数中对主函数的值进行修改 // n := 0 // reply := &n // Multiply(5, 6, reply) // fmt.Printf("Multiply 5 * 6 = %d\n", n) //} //传递变长参数 //func main() { // x := min(1, 3, 2, 0) // fmt.Printf("The minimum is: %d\n", x) // slice := []int{7,9,3,5,1} // x = min(slice...) // fmt.Printf("The minimum in the slice is: %d", x) //} // //func min(s ...int) int { // if len(s)==0 { // return 0 // } // min := s[0] // for _, v := range s { // if v < min { // min = v // } // } // return min //} // defer 将需要执行的操作推迟到函数返回之前才执行,一般用于释放某些已分配资源的释放,降低内存泄漏的风险 //func main() { // function1() // functionA() // functionB() // doDBOperations() //} // //func function1() { // fmt.Printf("In function1 at the top\n") // defer function2() // fmt.Printf("In funtion1 at the bottom!\n") //} // //func function2() { // fmt.Printf("Function2: Deferred until the end of the calling function!\n") //} // //func functionA() { // i := 0 // defer fmt.Printf("This is a first: %d\n",i) // i++ // defer fmt.Printf("This is a second: %d\n", i) // return //} // //func functionB() { // for i := 0; i < 5; i++ { // defer fmt.Printf("%d ", i) // } //} // //func connectToDB() { // fmt.Println("ok, connected to db") //} // //func disconnectFromDB() { // fmt.Println("ok, disconnected from db") //} // //func doDBOperations() { // connectToDB() // fmt.Println("Defering the database disconnect.") // defer disconnectFromDB() //function called here with defer // fmt.Println("Doing some DB operations ...") // fmt.Println("Oops! some crash or network error ...") // fmt.Println("Returning from function here!") // return //terminate the program // // deferred function executed here just before actually returning, even if // // there is a return or abnormal termination before //} //递归函数 //fibonacci.go //func main() { // result := 0 // for i := 0; i <= 20; i++ { // result = fibonacci(i) // fmt.Printf("This fibonacci number %d is :%d\n", i, result) // } //} // //func fibonacci(n int) (res int) { // if n <= 1 { // return 1 // } else { // res = fibonacci(n - 1) + fibonacci(n - 2) // } // return //} //mut_recurs.go //func main() { // fmt.Printf("%d is even: is %t\n", 16, even(16)) // fmt.Printf("%d is odd: is %t\n", 17, odd(17)) // //17 is odd: is true // fmt.Printf("%d is odd : is %t\n", 18, odd(18)) // //18 is odd: is false //} // //func even(nr int) bool { // if nr == 0 { // return true // } // return odd(RevSign(nr) - 1) //} // //func odd(nr int) bool { // if nr == 0 { // return false // } // return even(RevSign(nr) - 1) //} // //func RevSign(nr int) int { // if nr < 0 { // return -nr // } // return nr //} // 将函数作为参数,称之为回调函数 //func main() { // callback(5, Add) //} //func Add(a, b int) { // fmt.Printf("The sum of %d and %d is: %d\n", a, b, a+b) //} // //func callback(y int, f func(int, int)) { // f(y, 2) //} //应用闭包:将函数作为返回值 //func Add2() func(b int) int { // return func(b int) int { // return b + 2 // } //} // //func Adder(a int) func(b int) int { // return func(b int) int { // return a + b // } //} // //func main() { // //make an Add2 function,give it a name p2, and call it: // p2 := Add2() // fmt.Printf("Call Add2 for 3 gives: %v\n", p2(3)) // //make a specialAdder function,a gets value 2: // TwoAdder := Adder(2) // fmt.Printf("The result is: %v\n", TwoAdder(3)) // // start := time.Now() // fibonacci(20) // end := time.Now() // delta := end.Sub(start) // fmt.Printf("longCalculation took this amount of time: %s\n", delta) //} //计算fibonacci数列的执行时间 //通过内存缓存提升fibonacci数列的执行效率 const LIM = 50 var fibs [LIM]uint64 func main() { var result uint64 = 0 start := time.Now() for i := 0; i < LIM; i++ { result = fibonacci(i) fmt.Printf("fibonacci(%d) is: %d\n", i, result) } end := time.Now() delta := end.Sub(start) fmt.Printf("longCalculation took this amount of time: %s\n", delta) } func fibonacci(n int) (res uint64) { // memoization: check if fibonacci(n) is already known in array: if fibs[n] != 0 { res = fibs[n] return } if n <= 1 { res = 1 } else { res = fibonacci(n-1) + fibonacci(n-2) } fibs[n] = res return }
package solutions func lengthOfLastWord(s string) int { space, result := 0, 0 for i, char := range s { if char == ' ' { space = i + 1 } else { result = i + 1 - space } } return result }
package serviceaccess_test import ( cfclient "github.com/cloudfoundry-community/go-cfclient" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/vmwarepivotallabs/cf-mgmt/serviceaccess" "github.com/vmwarepivotallabs/cf-mgmt/serviceaccess/fakes" ) var _ = Describe("ServiceInfo", func() { Context("StandardBrokers", func() { fakeClient := new(fakes.FakeCFClient) It("returns standard brokers", func() { standardBrokers := []cfclient.ServiceBroker{ cfclient.ServiceBroker{ Guid: "some-guid", Name: "some-name", }, cfclient.ServiceBroker{ Guid: "some-guid-2", Name: "some-name-2", }, cfclient.ServiceBroker{ Guid: "some-guid-3", Name: "some-name-3", }, } var error error fakeClient.ListServiceBrokersReturns(standardBrokers, error) serviceInfo, _ := serviceaccess.GetServiceInfo(fakeClient) Expect(serviceInfo.StandardBrokers()).Should(HaveLen(3)) }) It("does not return space scoped brokers", func() { brokers := []cfclient.ServiceBroker{ cfclient.ServiceBroker{ Guid: "some-guid", Name: "some-name", }, cfclient.ServiceBroker{ Guid: "some-guid-2", Name: "some-name-2", }, cfclient.ServiceBroker{ Guid: "some-guid-3", Name: "some-space-broker-name", SpaceGUID: "non-empty-guid", }, } var error error fakeClient.ListServiceBrokersReturns(brokers, error) serviceInfo, _ := serviceaccess.GetServiceInfo(fakeClient) Expect(serviceInfo.StandardBrokers()).Should(HaveLen(2)) }) }) })
package userController type updateParamsStruct struct { Name string `json:"name" valid:"required~缺少用户名"` Password string `json:"password" valid:"required~缺少用户密码"` Avatar string `json:"avatar"` Email string `json:"email"` Role string `json:"role" valid:"required~缺少角色"` }
// Copyright 2018 The gVisor Authors. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build amd64 // +build amd64 package linux // Constants for open(2). const ( O_DIRECT = 000040000 O_LARGEFILE = 000100000 O_DIRECTORY = 000200000 O_NOFOLLOW = 000400000 ) // Stat represents struct stat. // // +marshal type Stat struct { Dev uint64 Ino uint64 Nlink uint64 Mode uint32 UID uint32 GID uint32 _ int32 Rdev uint64 Size int64 Blksize int64 Blocks int64 ATime Timespec MTime Timespec CTime Timespec _ [3]int64 }
package main import "fmt" /* Write a function that takes a string as input and reverse only the vowels of a string. Example 1: Input: "hello" Output: "holle" Example 2: Input: "leetcode" Output: "leotcede" Note: The vowels does not include the letter "y". */ func main() { fmt.Println(reverseVowels("hello")) } func reverseVowels(ss string) string { l,r := 0,len(ss)-1 s := []byte(ss) for l<r { if !isVowel(s[l]) { l += 1 continue } if !isVowel(s[r]) { r -= 1 continue } s[l],s[r]=s[r],s[l] l += 1 r -= 1 } return string(s) } var vowel = map[byte]bool{'a':true,'e':true,'i':true,'o':true,'u':true,'A':true,'E':true,'I':true,'O':true,'U':true} func isVowel(b byte) bool { return vowel[b] //return map[byte]bool{'a':true,'e':true,'i':true,'o':true,'u':true,'A':true,'E':true,'I':true,'O':true,'U':true}[b] == true }
package schemes import ( regv1 "github.com/tmax-cloud/registry-operator/api/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ImageReplicateSyncJob is a scheme of image replicate sync job func ImageReplicateSyncJob(repl *regv1.ImageReplicate) *regv1.RegistryJob { labels := make(map[string]string) resName := SubresourceName(repl, SubTypeImageReplicateSyncJob) labels["app"] = "image-replicate-sync-job" labels["apps"] = resName return &regv1.RegistryJob{ ObjectMeta: v1.ObjectMeta{ Name: resName, Namespace: repl.Namespace, Labels: labels, }, Spec: regv1.RegistryJobSpec{ Priority: 100, TTL: 60, Claim: &regv1.RegistryJobClaim{ JobType: regv1.JobTypeSynchronizeExtReg, HandleObject: corev1.LocalObjectReference{ Name: repl.Spec.ToImage.RegistryName, }, }, }, } }
package main import ( "fmt" "strconv" ) var ( x int y int = 0 z = 0 ) var x1, x2 int var y1, y2 int = 1, 2 var z1, z2 = 1, "a" var i1, err1 = strconv.Atoi("10") var _, err2 = strconv.Atoi("10") var n = 1 var ( n1 = 6789 n2 = 04567 n3 = 0xCDEF f1 = 1.2 f2 = 1.2e+3 i2 = 1.2i i3 = 3 + 1.2i r1 = 'a' r2 = 'あ' b1 byte = 1.0 b2 byte = 0i s1 = "abc¥nあいうえお¥nかきくけこ" s2 = `abc あいうえお かきくけこ` s3 = "¥u3042" s4 = "¥xE3¥x81¥x82" ) const ( c1 int = 1 c2 = c1 c3 = 1 ) var ( v1 int = c3 v2 int = c3 ) const ( u1 int = 1 u2 u3 ) const ( a0 = iota a1 = iota a2 = 9 a3 = iota a4, a5 = iota, iota a6 = iota * iota ) const ( cc0 = 1 << iota cc1 cc2 ) const d0 = iota const d1 = iota func main() { x3 := 1 y3, z3 := 2, 3 fmt.Println(x, y, z) fmt.Println(x3, y3, z3) fmt.Println(n) f() fmt.Println(n) p := 1 if p == 1 { q := 2 fmt.Println(p, q) } fmt.Println(n1, n2, n3, f1, f2, i2, i3) fmt.Println(r1, r2, b1, b2) fmt.Println(s1, s2, s3, s4) fmt.Println(c1, c2, c3, v1, v2, u1, u2, u3) fmt.Println(a0, a1, a2, a3, a4, a5, a6) fmt.Println(cc0, cc1, cc2) fmt.Println(d0, d1) } func f() { n = 2 }
package transform type LookupTable map[string]string func (jp *LookupTable) LookupValue(k string) (string, bool) { s, ok := (*jp)[k] return s, ok } func (jp *LookupTable) LookupRecord(k string) (map[string]any, bool) { if x, ok := (*jp)[k]; ok { return map[string]any{"value": x}, true } return nil, false }
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. package cmd import ( "bytes" "regexp" "github.com/spf13/cobra" ) func runReplace(start string, pattern *regexp.Regexp, replacement []byte) error { return walkReplace(func(data []byte) []byte { var line bytes.Buffer var out bytes.Buffer for i := 0; i < len(data)+1; i++ { if i < len(data) && data[i] != '\n' { line.WriteByte(data[i]) continue } out.Write(pattern.ReplaceAll(line.Bytes(), replacement)) if i < len(data) { out.WriteByte('\n') } line.Reset() } return out.Bytes() }, start) } var replaceCmd = &cobra.Command{ Use: "replace <pattern> <replacement> <file or directory>", Short: "replace occurrences of pattern with replacement.", Aliases: []string{"re", "sub", "s"}, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 3 { return cmd.Usage() } pattern, err := regexp.Compile(args[0]) if err != nil { return err } return runReplace(args[2], pattern, []byte(args[1])) }, }
package session import ( "errors" "sync" "time" "github.com/gomodule/redigo/redis" "github.com/google/uuid" ) type RedisSessionMgr struct { //redis地址 addr string //密码 passwd string //连接池 pool *redis.Pool //锁 rwlock sync.RWMutex //大map sessionMap map[string]Session } //构造函数 func NewRedisSessionMgr() SessionMgr { return &RedisSessionMgr{ sessionMap: make(map[string]Session, 32), } } func (r *RedisSessionMgr) Init(addr string, options ...string) (err error) { //处理多余参数 if len(options) > 0 { r.passwd = options[0] } //创建连接池 r.pool = myPool(addr, r.passwd) r.addr = r.addr return } //自定义redis连接池 func myPool(addr, passwd string) *redis.Pool { return &redis.Pool{ MaxIdle: 64, MaxActive: 1000, IdleTimeout: 240 * time.Second, Dial: func() (redis.Conn, error) { conn, err := redis.Dial("tcp", addr) if err != nil { return nil, err } //判断是否有密码 if _, err := conn.Do("auth", passwd); err != nil { conn.Close() return nil, err } return conn, err }, //测试连接 //上线时要去掉 TestOnBorrow: func(conn redis.Conn, t time.Time) error { _, err := conn.Do("PING") return err }, } } func (r *RedisSessionMgr) CreateSession() (session Session, err error) { r.rwlock.Lock() defer r.rwlock.Unlock() id, err := uuid.NewRandom() if err != nil { return } sessionId := id.String() session = NewRedisSession(sessionId, r.pool) //把session根据sessinId加入到session的map中 r.sessionMap[sessionId] = session return } func (r *RedisSessionMgr) Get(sessionId string) (session Session, err error) { r.rwlock.Lock() defer r.rwlock.Unlock() session, ok := r.sessionMap[sessionId] if !ok { err = errors.New("session not exists") return } return }
package main import ( "fmt" "time" ) func main() { user := make(map[string]interface{}) for i := 0;i < 10;i++ { go doMap(user,i) } time.Sleep(time.Second) fmt.Println(user) } func doMap(u map[string]interface{},i int) { u["name"] = i }
package repository import "github.com/flaviowilker/rentcar/app/domain" // UserRepository ... type UserRepository interface { FindByLogin(string) (*domain.User, error) FindAll() ([]*domain.User, error) Create(*domain.User) (*domain.User, error) Update(*domain.User) (*domain.User, error) Delete(uint) (*domain.User, error) }
/* Package inmem implements the store DAO interface. This implementation is meant to help get an instance of Argus up and running quickly without a need to setup a dedicated DB. Since the current implementation is not scalable, it is recommended for test environments only. */ package inmem
package engine import ( "context" "errors" "fmt" "log" "os" "path" "path/filepath" "regexp" "runtime" "sort" "strings" "sync" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/docker/distribution/reference" dockertypes "github.com/docker/docker/api/types" "github.com/google/uuid" "github.com/jonboulle/clockwork" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/tilt-dev/clusterid" tiltanalytics "github.com/tilt-dev/tilt/internal/analytics" "github.com/tilt-dev/tilt/internal/build" "github.com/tilt-dev/tilt/internal/cloud" "github.com/tilt-dev/tilt/internal/container" "github.com/tilt-dev/tilt/internal/containerupdate" "github.com/tilt-dev/tilt/internal/controllers" apitiltfile "github.com/tilt-dev/tilt/internal/controllers/apis/tiltfile" "github.com/tilt-dev/tilt/internal/controllers/core/cluster" "github.com/tilt-dev/tilt/internal/controllers/core/cmd" "github.com/tilt-dev/tilt/internal/controllers/core/cmdimage" "github.com/tilt-dev/tilt/internal/controllers/core/configmap" "github.com/tilt-dev/tilt/internal/controllers/core/dockercomposelogstream" "github.com/tilt-dev/tilt/internal/controllers/core/dockercomposeservice" "github.com/tilt-dev/tilt/internal/controllers/core/dockerimage" "github.com/tilt-dev/tilt/internal/controllers/core/extension" "github.com/tilt-dev/tilt/internal/controllers/core/extensionrepo" "github.com/tilt-dev/tilt/internal/controllers/core/filewatch" "github.com/tilt-dev/tilt/internal/controllers/core/filewatch/fsevent" "github.com/tilt-dev/tilt/internal/controllers/core/imagemap" "github.com/tilt-dev/tilt/internal/controllers/core/kubernetesapply" "github.com/tilt-dev/tilt/internal/controllers/core/kubernetesdiscovery" "github.com/tilt-dev/tilt/internal/controllers/core/liveupdate" "github.com/tilt-dev/tilt/internal/controllers/core/podlogstream" apiportforward "github.com/tilt-dev/tilt/internal/controllers/core/portforward" ctrlsession "github.com/tilt-dev/tilt/internal/controllers/core/session" ctrltiltfile "github.com/tilt-dev/tilt/internal/controllers/core/tiltfile" "github.com/tilt-dev/tilt/internal/controllers/core/togglebutton" ctrluibutton "github.com/tilt-dev/tilt/internal/controllers/core/uibutton" ctrluiresource "github.com/tilt-dev/tilt/internal/controllers/core/uiresource" ctrluisession "github.com/tilt-dev/tilt/internal/controllers/core/uisession" "github.com/tilt-dev/tilt/internal/docker" "github.com/tilt-dev/tilt/internal/dockercompose" engineanalytics "github.com/tilt-dev/tilt/internal/engine/analytics" "github.com/tilt-dev/tilt/internal/engine/buildcontrol" "github.com/tilt-dev/tilt/internal/engine/configs" "github.com/tilt-dev/tilt/internal/engine/dockerprune" "github.com/tilt-dev/tilt/internal/engine/k8srollout" "github.com/tilt-dev/tilt/internal/engine/k8swatch" "github.com/tilt-dev/tilt/internal/engine/local" "github.com/tilt-dev/tilt/internal/engine/session" "github.com/tilt-dev/tilt/internal/engine/telemetry" "github.com/tilt-dev/tilt/internal/engine/uiresource" "github.com/tilt-dev/tilt/internal/engine/uisession" "github.com/tilt-dev/tilt/internal/feature" "github.com/tilt-dev/tilt/internal/hud" "github.com/tilt-dev/tilt/internal/hud/prompt" "github.com/tilt-dev/tilt/internal/hud/server" "github.com/tilt-dev/tilt/internal/hud/view" "github.com/tilt-dev/tilt/internal/k8s" "github.com/tilt-dev/tilt/internal/k8s/testyaml" "github.com/tilt-dev/tilt/internal/localexec" "github.com/tilt-dev/tilt/internal/openurl" "github.com/tilt-dev/tilt/internal/store" "github.com/tilt-dev/tilt/internal/store/buildcontrols" "github.com/tilt-dev/tilt/internal/store/k8sconv" "github.com/tilt-dev/tilt/internal/store/tiltfiles" "github.com/tilt-dev/tilt/internal/testutils" "github.com/tilt-dev/tilt/internal/testutils/bufsync" tiltconfigmap "github.com/tilt-dev/tilt/internal/testutils/configmap" "github.com/tilt-dev/tilt/internal/testutils/httptest" "github.com/tilt-dev/tilt/internal/testutils/manifestbuilder" "github.com/tilt-dev/tilt/internal/testutils/podbuilder" "github.com/tilt-dev/tilt/internal/testutils/servicebuilder" "github.com/tilt-dev/tilt/internal/testutils/tempdir" "github.com/tilt-dev/tilt/internal/tiltfile" "github.com/tilt-dev/tilt/internal/tiltfile/cisettings" "github.com/tilt-dev/tilt/internal/tiltfile/config" "github.com/tilt-dev/tilt/internal/tiltfile/k8scontext" "github.com/tilt-dev/tilt/internal/tiltfile/tiltextension" "github.com/tilt-dev/tilt/internal/tiltfile/version" "github.com/tilt-dev/tilt/internal/token" "github.com/tilt-dev/tilt/internal/tracer" "github.com/tilt-dev/tilt/internal/watch" "github.com/tilt-dev/tilt/internal/xdg" "github.com/tilt-dev/tilt/pkg/apis" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" "github.com/tilt-dev/tilt/pkg/assets" "github.com/tilt-dev/tilt/pkg/logger" "github.com/tilt-dev/tilt/pkg/model" "github.com/tilt-dev/wmclient/pkg/analytics" ) var originalWD string const stdTimeout = 2 * time.Second type buildCompletionChannel chan bool func init() { wd, err := os.Getwd() if err != nil { panic(err) } originalWD = wd } const ( simpleTiltfile = ` docker_build('gcr.io/windmill-public-containers/servantes/snack', '.') k8s_yaml('snack.yaml') ` simpleYAML = testyaml.SnackYaml ) // represents a single call to `BuildAndDeploy` type buildAndDeployCall struct { count int specs []model.TargetSpec state store.BuildStateSet } func (c buildAndDeployCall) firstImgTarg() model.ImageTarget { iTargs := c.imageTargets() if len(iTargs) > 0 { return iTargs[0] } return model.ImageTarget{} } func (c buildAndDeployCall) imageTargets() []model.ImageTarget { targs := make([]model.ImageTarget, 0, len(c.specs)) for _, spec := range c.specs { t, ok := spec.(model.ImageTarget) if ok { targs = append(targs, t) } } return targs } func (c buildAndDeployCall) k8s() model.K8sTarget { for _, spec := range c.specs { t, ok := spec.(model.K8sTarget) if ok { return t } } return model.K8sTarget{} } func (c buildAndDeployCall) dc() model.DockerComposeTarget { for _, spec := range c.specs { t, ok := spec.(model.DockerComposeTarget) if ok { return t } } return model.DockerComposeTarget{} } func (c buildAndDeployCall) local() model.LocalTarget { for _, spec := range c.specs { t, ok := spec.(model.LocalTarget) if ok { return t } } return model.LocalTarget{} } func (c buildAndDeployCall) dcState() store.BuildState { return c.state[c.dc().ID()] } func (c buildAndDeployCall) k8sState() store.BuildState { return c.state[c.k8s().ID()] } func (c buildAndDeployCall) oneImageState() store.BuildState { imageStates := make([]store.BuildState, 0) for k, v := range c.state { if k.Type == model.TargetTypeImage { imageStates = append(imageStates, v) } } if len(imageStates) != 1 { panic(fmt.Sprintf("More than one state: %v", c.state)) } return imageStates[0] } type fakeBuildAndDeployer struct { t *testing.T mu sync.Mutex calls chan buildAndDeployCall completeBuildsManually bool buildCompletionChans sync.Map // map[string]buildCompletionChannel; close channel at buildCompletionChans[k(targs)] to // complete the build started for targs (where k(targs) generates a unique string key for the set of targets) buildCount int // Inject the container ID of the container started by Docker Compose. // If not set, we will auto-generate an ID. nextDockerComposeContainerID container.ID nextDockerComposeContainerState *dockertypes.ContainerState targetObjectTree map[model.TargetID]podbuilder.PodObjectTree nextDeployedUID types.UID nextPodTemplateSpecHash k8s.PodTemplateSpecHash // Set this to simulate a build with no results and an error. // Do not set this directly, use fixture.SetNextBuildError nextBuildError error buildLogOutput map[model.TargetID]string resultsByID store.BuildResultSet // kClient registers deployed entities for subsequent retrieval. kClient *k8s.FakeK8sClient dcClient *dockercompose.FakeDCClient ctrlClient ctrlclient.Client kaReconciler *kubernetesapply.Reconciler dcReconciler *dockercomposeservice.Reconciler } var _ buildcontrol.BuildAndDeployer = &fakeBuildAndDeployer{} func (b *fakeBuildAndDeployer) nextImageBuildResult(ctx context.Context, iTarget model.ImageTarget) (store.ImageBuildResult, error) { var clusterNN types.NamespacedName if iTarget.IsDockerBuild() { clusterNN = types.NamespacedName{Name: iTarget.DockerBuildInfo().Cluster} } else if iTarget.IsCustomBuild() { clusterNN = types.NamespacedName{Name: iTarget.CustomBuildInfo().Cluster} } else if iTarget.IsDockerComposeBuild() { clusterNN = types.NamespacedName{Name: v1alpha1.ClusterNameDocker} } else { return store.ImageBuildResult{}, fmt.Errorf("Unknown build type. ImageTarget: %s", iTarget.ID().String()) } if clusterNN.Name == "" { clusterNN.Name = v1alpha1.ClusterNameDefault } var cluster v1alpha1.Cluster err := b.ctrlClient.Get(ctx, clusterNN, &cluster) if err != nil { return store.ImageBuildResult{}, err } refs, err := iTarget.Refs(&cluster) if err != nil { return store.ImageBuildResult{}, fmt.Errorf("determining refs: %v", err) } tag := fmt.Sprintf("tilt-%d", b.buildCount) localRefTagged := container.MustWithTag(refs.LocalRef(), tag) clusterRefTagged := container.MustWithTag(refs.ClusterRef(), tag) return store.NewImageBuildResult(iTarget.ID(), localRefTagged, clusterRefTagged), nil } func (b *fakeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RStore, specs []model.TargetSpec, state store.BuildStateSet) (brs store.BuildResultSet, err error) { b.t.Helper() b.mu.Lock() b.buildCount++ buildKey := stringifyTargetIDs(specs) b.registerBuild(buildKey) if !b.completeBuildsManually { // i.e. we should complete builds automatically: mark the build for completion now, // so we return immediately at the end of BuildAndDeploy. b.completeBuild(buildKey) } call := buildAndDeployCall{count: b.buildCount, specs: specs, state: state} if call.dc().Empty() && call.k8s().Empty() && call.local().Empty() { b.t.Fatalf("Invalid call: %+v", call) } ids := []model.TargetID{} for _, spec := range specs { id := spec.ID() ids = append(ids, id) output, ok := b.buildLogOutput[id] if ok { logger.Get(ctx).Infof("%s", output) } } defer func() { b.mu.Unlock() // block until we know we're supposed to resolve this build err2 := b.waitUntilBuildCompleted(ctx, buildKey) if err == nil { err = err2 } // don't update b.calls until the end, to ensure appropriate actions have been dispatched first select { case b.calls <- call: default: b.t.Error("writing to fakeBuildAndDeployer would block. either there's a bug or the buffer size needs to be increased") } logger.Get(ctx).Infof("fake built %s. error: %v", ids, err) }() err = b.nextBuildError b.nextBuildError = nil if err != nil { return nil, err } iTargets := model.ExtractImageTargets(specs) fakeImageExistsCheck := func(ctx context.Context, iTarget model.ImageTarget, namedTagged reference.NamedTagged) (bool, error) { return true, nil } queue, err := buildcontrol.NewImageTargetQueue(ctx, iTargets, state, fakeImageExistsCheck) if err != nil { return nil, err } err = queue.RunBuilds(func(target model.TargetSpec, depResults []store.ImageBuildResult) (store.ImageBuildResult, error) { b.t.Helper() iTarget := target.(model.ImageTarget) ibr, err := b.nextImageBuildResult(ctx, iTarget) if err != nil { return store.ImageBuildResult{}, err } var im v1alpha1.ImageMap if err := b.ctrlClient.Get(ctx, types.NamespacedName{Name: iTarget.ImageMapName()}, &im); err != nil { return store.ImageBuildResult{}, err } im.Status = *ibr.ImageMapStatus.DeepCopy() buildStartTime := apis.NowMicro() im.Status.BuildStartTime = &buildStartTime if err := b.ctrlClient.Status().Update(ctx, &im); err != nil { return store.ImageBuildResult{}, err } return ibr, nil }) result := queue.NewResults().ToBuildResultSet() if err != nil { return result, err } if !call.dc().Empty() { dcContainerID := container.ID(fmt.Sprintf("dc-%s", path.Base(call.dc().ID().Name.String()))) if b.nextDockerComposeContainerID != "" { dcContainerID = b.nextDockerComposeContainerID } b.dcClient.ContainerIDDefault = dcContainerID err = b.updateDockerComposeServiceStatus(ctx, call.dc(), iTargets) if err != nil { return result, err } dcContainerState := b.nextDockerComposeContainerState result[call.dc().ID()] = store.NewDockerComposeDeployResult( call.dc().ID(), dockercompose.ToServiceStatus(dcContainerID, string(dcContainerID), dcContainerState, nil)) } if kTarg := call.k8s(); !kTarg.Empty() { nextK8sResult := b.nextK8sDeployResult(kTarg) err = b.updateKubernetesApplyStatus(ctx, kTarg, iTargets) if err != nil { return result, err } result[call.k8s().ID()] = nextK8sResult } b.nextDockerComposeContainerID = "" for key, val := range result { b.resultsByID[key] = val } return result, nil } func (b *fakeBuildAndDeployer) updateKubernetesApplyStatus(ctx context.Context, kTarg model.K8sTarget, iTargets []model.ImageTarget) error { imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(kTarg.ImageMaps)) for _, iTarget := range iTargets { if iTarget.IsLiveUpdateOnly { continue } var im v1alpha1.ImageMap nn := types.NamespacedName{Name: iTarget.ImageMapName()} err := b.ctrlClient.Get(ctx, nn, &im) if err != nil { return err } imageMapSet[nn] = &im } clusterName := kTarg.KubernetesApplySpec.Cluster if clusterName == "" { clusterName = v1alpha1.ClusterNameDefault } var cluster v1alpha1.Cluster err := b.ctrlClient.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster) if err != nil { return err } nn := types.NamespacedName{Name: kTarg.ID().Name.String()} status := b.kaReconciler.ForceApply(ctx, nn, kTarg.KubernetesApplySpec, &cluster, imageMapSet) // We want our fake stub to only propagate apiserver problems. _ = status return nil } func (b *fakeBuildAndDeployer) updateDockerComposeServiceStatus(ctx context.Context, dcTarg model.DockerComposeTarget, iTargets []model.ImageTarget) error { imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(dcTarg.Spec.ImageMaps)) for _, iTarget := range iTargets { if iTarget.IsLiveUpdateOnly { continue } var im v1alpha1.ImageMap nn := types.NamespacedName{Name: iTarget.ImageMapName()} err := b.ctrlClient.Get(ctx, nn, &im) if err != nil { return err } imageMapSet[nn] = &im } nn := types.NamespacedName{Name: dcTarg.ID().Name.String()} status := b.dcReconciler.ForceApply(ctx, nn, dcTarg.Spec, imageMapSet, false) // We want our fake stub to only propagate apiserver problems. _ = status return nil } func (b *fakeBuildAndDeployer) nextK8sDeployResult(kTarg model.K8sTarget) store.K8sBuildResult { var err error var deployed []k8s.K8sEntity explicitDeploymentEntities := b.targetObjectTree[kTarg.ID()] if len(explicitDeploymentEntities) != 0 { if b.nextDeployedUID != "" { b.t.Fatalf("Cannot set both explicit deployed entities + next deployed UID") } if b.nextPodTemplateSpecHash != "" { b.t.Fatalf("Cannot set both explicit deployed entities + next pod template spec hashes") } // register Deployment + ReplicaSet so that other parts of the system can properly retrieve them b.kClient.Inject( explicitDeploymentEntities.Deployment(), explicitDeploymentEntities.ReplicaSet()) // only return the Deployment entity as deployed since the ReplicaSet + Pod are created implicitly, // i.e. they are not returned in a normal apply call for a Deployment deployed = []k8s.K8sEntity{explicitDeploymentEntities.Deployment()} } else { deployed, err = k8s.ParseYAMLFromString(kTarg.YAML) require.NoError(b.t, err) for i := 0; i < len(deployed); i++ { uid := types.UID(uuid.New().String()) if b.nextDeployedUID != "" { uid = b.nextDeployedUID b.nextDeployedUID = "" } deployed[i].SetUID(string(uid)) } for i, e := range deployed { if b.nextPodTemplateSpecHash != "" { e = e.DeepCopy() templateSpecs, err := k8s.ExtractPodTemplateSpec(&e) require.NoError(b.t, err) for _, ts := range templateSpecs { ts.Labels = map[string]string{k8s.TiltPodTemplateHashLabel: string(b.nextPodTemplateSpecHash)} } deployed[i] = e } else { deployed[i], err = k8s.InjectPodTemplateSpecHashes(e) require.NoError(b.t, err) } } } resultYAML, err := k8s.SerializeSpecYAML(deployed) require.NoError(b.t, err) b.kClient.UpsertResult = deployed filter, err := k8sconv.NewKubernetesApplyFilter(resultYAML) require.NoError(b.t, err) return store.NewK8sDeployResult(kTarg.ID(), filter) } func (b *fakeBuildAndDeployer) getOrCreateBuildCompletionChannel(key string) buildCompletionChannel { ch := make(buildCompletionChannel) val, _ := b.buildCompletionChans.LoadOrStore(key, ch) var ok bool ch, ok = val.(buildCompletionChannel) if !ok { panic(fmt.Sprintf("expected map value of type: buildCompletionChannel, got %T", val)) } return ch } func (b *fakeBuildAndDeployer) registerBuild(key string) { b.getOrCreateBuildCompletionChannel(key) } func (b *fakeBuildAndDeployer) waitUntilBuildCompleted(ctx context.Context, key string) error { ch := b.getOrCreateBuildCompletionChannel(key) defer b.buildCompletionChans.Delete(key) // wait until channel for this build is closed, or context is canceled/finished. select { case <-ch: return nil case <-ctx.Done(): return ctx.Err() } } func newFakeBuildAndDeployer(t *testing.T, kClient *k8s.FakeK8sClient, dcClient *dockercompose.FakeDCClient, ctrlClient ctrlclient.Client, kaReconciler *kubernetesapply.Reconciler, dcReconciler *dockercomposeservice.Reconciler) *fakeBuildAndDeployer { return &fakeBuildAndDeployer{ t: t, calls: make(chan buildAndDeployCall, 20), buildLogOutput: make(map[model.TargetID]string), resultsByID: store.BuildResultSet{}, kClient: kClient, dcClient: dcClient, ctrlClient: ctrlClient, kaReconciler: kaReconciler, dcReconciler: dcReconciler, targetObjectTree: make(map[model.TargetID]podbuilder.PodObjectTree), } } func (b *fakeBuildAndDeployer) completeBuild(key string) { ch := b.getOrCreateBuildCompletionChannel(key) close(ch) } func TestUpper_Up(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") f.setManifests([]model.Manifest{manifest}) storeErr := make(chan error, 1) go func() { storeErr <- f.upper.Init(f.ctx, InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), StartTime: f.Now(), }) }() call := f.nextCallComplete() assert.Equal(t, manifest.K8sTarget().ID(), call.k8s().ID()) close(f.b.calls) // cancel the context to simulate a Ctrl-C f.cancel() err := <-storeErr if assert.NotNil(t, err, "Store returned nil error (expected context canceled)") { assert.Contains(t, err.Error(), context.Canceled.Error(), "Store error was not as expected") } state := f.upper.store.RLockState() defer f.upper.store.RUnlockState() buildRecord := state.ManifestTargets[manifest.Name].Status().LastBuild() lines := strings.Split(state.LogStore.SpanLog(buildRecord.SpanID), "\n") assertLineMatches(t, lines, regexp.MustCompile("fake built .*foobar")) } func TestUpper_UpK8sEntityOrdering(t *testing.T) { f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI}) f.useRealTiltfileLoader() postgresEntities, err := k8s.ParseYAMLFromString(testyaml.PostgresYAML) require.NoError(t, err) yaml, err := k8s.SerializeSpecYAML(postgresEntities[:3]) // only take entities that don't belong to a workload require.NoError(t, err) f.WriteFile("Tiltfile", `k8s_yaml('postgres.yaml')`) f.WriteFile("postgres.yaml", yaml) storeErr := make(chan error, 1) go func() { storeErr <- f.upper.Init(f.ctx, InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), StartTime: f.Now(), }) }() call := f.nextCallComplete() entities, err := k8s.ParseYAMLFromString(call.k8s().YAML) require.NoError(t, err) expectedKindOrder := []string{"PersistentVolume", "PersistentVolumeClaim", "ConfigMap"} actualKindOrder := make([]string, len(entities)) for i, e := range entities { actualKindOrder[i] = e.GVK().Kind } assert.Equal(t, expectedKindOrder, actualKindOrder, "YAML on the manifest should be in sorted order") f.assertAllBuildsConsumed() require.NoError(t, <-storeErr) } func TestUpper_CI(t *testing.T) { f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI}) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.setManifests([]model.Manifest{manifest}) storeErr := make(chan error, 1) go func() { storeErr <- f.upper.Init(f.ctx, InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), UserArgs: nil, // equivalent to `tilt up --watch=false` (i.e. not specifying any manifest names) StartTime: f.Now(), }) }() call := f.nextCallComplete() close(f.b.calls) assert.Equal(t, "foobar", call.k8s().ID().Name.String()) f.startPod(pb.WithPhase(string(v1.PodRunning)).Build(), manifest.Name) require.NoError(t, <-storeErr) } func TestFirstBuildFails_Up(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") f.SetNextBuildError(errors.New("Build failed")) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("a.go")) call = f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) assert.Equal(t, []string{f.JoinPath("a.go")}, call.oneImageState().FilesChanged()) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestFirstBuildCancels_Up(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") f.SetNextBuildError(context.Canceled) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestFirstBuildFails_CI(t *testing.T) { f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI}) manifest := f.newManifest("foobar") buildFailedToken := errors.New("doesn't compile") f.SetNextBuildError(buildFailedToken) f.setManifests([]model.Manifest{manifest}) f.Init(InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), TerminalMode: store.TerminalModeHUD, StartTime: f.Now(), }) f.WaitUntilManifestState("build has failed", manifest.ManifestName(), func(st store.ManifestState) bool { return st.LastBuild().Error != nil }) select { case err := <-f.upperInitResult: require.NotNil(t, err) assert.Contains(t, err.Error(), "doesn't compile") case <-time.After(stdTimeout): t.Fatal("Timed out waiting for exit action") } f.withState(func(es store.EngineState) { assert.True(t, es.ExitSignal) }) } func TestCIIgnoresDisabledResources(t *testing.T) { f := newTestFixture(t, fixtureOptions{engineMode: &store.EngineModeCI}) m1 := f.newManifest("m1") pb := f.registerForDeployer(m1) m2 := f.newManifest("m2") f.setManifests([]model.Manifest{m1, m2}) f.tfl.Result.EnabledManifests = []model.ManifestName{m1.Name} storeErr := make(chan error, 1) go func() { storeErr <- f.upper.Init(f.ctx, InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), StartTime: f.Now(), }) }() call := f.nextCallComplete() close(f.b.calls) assert.Equal(t, "m1", call.k8s().ID().Name.String()) f.startPod(pb.WithPhase(string(v1.PodRunning)).Build(), m1.Name) require.NoError(t, <-storeErr) } func TestConfigFileChangeClearsBuildStateToForceImageBuild(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` docker_build('gcr.io/windmill-public-containers/servantes/snack', '.', live_update=[sync('.', '/app')]) k8s_yaml('snack.yaml') `) f.WriteFile("Dockerfile", `FROM iron/go:prod`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() // First call: with the old manifest call := f.nextCall("old manifest") assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents) f.WriteConfigFiles("Dockerfile", `FROM iron/go:dev`) // Second call: new manifest! call = f.nextCall("new manifest") assert.Equal(t, "FROM iron/go:dev", call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, testyaml.SnackYAMLPostConfig, call.k8s().YAML) // Since the manifest changed, we cleared the previous build state to force an image build // (i.e. check that we called BuildAndDeploy with no pre-existing state) assert.False(t, call.oneImageState().HasLastResult()) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestMultipleChangesOnlyDeployOneManifest(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` # ensure builds happen in deterministic order update_settings(max_parallel_updates=1) docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1") docker_build("gcr.io/windmill-public-containers/servantes/doggos", "./doggos", dockerfile="Dockerfile2") k8s_yaml(['snack.yaml', 'doggos.yaml']) k8s_resource('snack', new_name='baz') k8s_resource('doggos', new_name='quux') `) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("Dockerfile1", `FROM iron/go:prod`) f.WriteFile("Dockerfile2", `FROM iron/go:prod`) f.WriteFile("doggos.yaml", testyaml.DoggosDeploymentYaml) f.loadAndStart() // First call: with the old manifests call := f.nextCall("old manifest (baz)") assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, "baz", string(call.k8s().Name)) call = f.nextCall("old manifest (quux)") assert.Equal(t, `FROM iron/go:prod`, call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, "quux", string(call.k8s().Name)) // rewrite the dockerfiles f.WriteConfigFiles( "Dockerfile1", `FROM iron/go:dev1`, "Dockerfile2", "FROM iron/go:dev2") // Builds triggered by config file changes call = f.nextCall("manifest from config files (baz)") assert.Equal(t, `FROM iron/go:dev1`, call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, "baz", string(call.k8s().Name)) call = f.nextCall("manifest from config files (quux)") assert.Equal(t, `FROM iron/go:dev2`, call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, "quux", string(call.k8s().Name)) // Now change (only one) dockerfile f.WriteConfigFiles("Dockerfile1", `FROM node:10`) // Second call: one new manifest! call = f.nextCall("changed config file --> new manifest") assert.Equal(t, "baz", string(call.k8s().Name)) assert.ElementsMatch(t, []string{}, call.oneImageState().FilesChanged()) // Since the manifest changed, we cleared the previous build state to force an image build assert.False(t, call.oneImageState().HasLastResult()) // Importantly the other manifest, quux, is _not_ called -- the DF change didn't affect its manifest err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestSecondResourceIsBuilt(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1") k8s_yaml('snack.yaml') k8s_resource('snack', new_name='baz') # rename "snack" --> "baz" `) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("Dockerfile1", `FROM iron/go:dev1`) f.WriteFile("Dockerfile2", `FROM iron/go:dev2`) f.WriteFile("doggos.yaml", testyaml.DoggosDeploymentYaml) f.loadAndStart() // First call: with one resource call := f.nextCall("old manifest (baz)") assert.Equal(t, "FROM iron/go:dev1", call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, "baz", string(call.k8s().Name)) f.assertNoCall() // Now add a second resource f.WriteConfigFiles("Tiltfile", ` docker_build("gcr.io/windmill-public-containers/servantes/snack", "./snack", dockerfile="Dockerfile1") docker_build("gcr.io/windmill-public-containers/servantes/doggos", "./doggos", dockerfile="Dockerfile2") k8s_yaml(['snack.yaml', 'doggos.yaml']) k8s_resource('snack', new_name='baz') # rename "snack" --> "baz" k8s_resource('doggos', new_name='quux') # rename "doggos" --> "quux" `) // Expect a build of quux, the new resource call = f.nextCall("changed config file --> new manifest") assert.Equal(t, "quux", string(call.k8s().Name)) assert.ElementsMatch(t, []string{}, call.oneImageState().FilesChanged()) err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestConfigChange_NoOpChange(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile') k8s_yaml('snack.yaml')`) f.WriteFile("Dockerfile", `FROM iron/go:dev1`) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("src/main.go", "hello") f.loadAndStart() // First call: with the old manifests call := f.nextCall("initial call") assert.Equal(t, "FROM iron/go:dev1", call.firstImgTarg().DockerBuildInfo().DockerfileContents) assert.Equal(t, "snack", string(call.k8s().Name)) // Write same contents to Dockerfile -- an "edit" event for a config file, // but it doesn't change the manifest at all. f.WriteConfigFiles("Dockerfile", `FROM iron/go:dev1`) f.assertNoCall("Dockerfile hasn't changed, so there shouldn't be any builds") // Second call: Editing the Dockerfile means we have to reevaluate the Tiltfile. // Editing the random file means we have to do a rebuild. BUT! The Dockerfile // hasn't changed, so the manifest hasn't changed, so we can do an incremental build. changed := f.WriteFile("src/main.go", "goodbye") f.fsWatcher.Events <- watch.NewFileEvent(changed) call = f.nextCall("build from file change") assert.Equal(t, "snack", string(call.k8s().Name)) assert.ElementsMatch(t, []string{ f.JoinPath("src/main.go"), }, call.oneImageState().FilesChanged()) assert.True(t, call.oneImageState().HasLastResult(), "Unchanged manifest --> we do NOT clear the build state") err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestConfigChange_TiltfileErrorAndFixWithNoChanges(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() origTiltfile := ` docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile') k8s_yaml('snack.yaml')` f.WriteFile("Tiltfile", origTiltfile) f.WriteFile("Dockerfile", `FROM iron/go:dev`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() // First call: all is well _ = f.nextCall("first call") // Second call: change Tiltfile, break manifest f.WriteConfigFiles("Tiltfile", "broken") f.WaitUntil("tiltfile error set", func(st store.EngineState) bool { return st.LastMainTiltfileError() != nil }) f.assertNoCall("Tiltfile error should prevent BuildAndDeploy from being called") // Third call: put Tiltfile back. No change to manifest or to synced files, so expect no build. f.WriteConfigFiles("Tiltfile", origTiltfile) f.WaitUntil("tiltfile error cleared", func(st store.EngineState) bool { return st.LastMainTiltfileError() == nil }) f.withState(func(state store.EngineState) { assert.Equal(t, "", buildcontrol.NextManifestNameToBuild(state).String()) }) } func TestConfigChange_TiltfileErrorAndFixWithFileChange(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() tiltfileWithCmd := func(cmd string) string { return fmt.Sprintf(` docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile', live_update=[ sync('./src', '/src'), run('%s') ] ) k8s_yaml('snack.yaml') `, cmd) } f.WriteFile("Tiltfile", tiltfileWithCmd("original")) f.WriteFile("Dockerfile", `FROM iron/go:dev`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() // First call: all is well _ = f.nextCall("first call") // Second call: change Tiltfile, break manifest f.WriteConfigFiles("Tiltfile", "broken") f.WaitUntil("tiltfile error set", func(st store.EngineState) bool { return st.LastMainTiltfileError() != nil }) f.assertNoCall("Tiltfile error should prevent BuildAndDeploy from being called") // Third call: put Tiltfile back. manifest changed, so expect a build f.WriteConfigFiles("Tiltfile", tiltfileWithCmd("changed")) call := f.nextCall("fixed broken config and rebuilt manifest") assert.False(t, call.oneImageState().HasLastResult(), "expected this call to have NO image (since we should have cleared it to force an image build)") f.WaitUntil("tiltfile error cleared", func(state store.EngineState) bool { return state.LastMainTiltfileError() == nil }) f.withManifestTarget("snack", func(mt store.ManifestTarget) { assert.Equal(t, model.ToUnixCmd("changed").Argv, mt.Manifest.ImageTargetAt(0).LiveUpdateSpec.Execs[0].Args, "Tiltfile change should have propagated to manifest") }) err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestConfigChange_TriggerModeChangePropagatesButDoesntInvalidateBuild(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() origTiltfile := ` docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile') k8s_yaml('snack.yaml')` f.WriteFile("Tiltfile", origTiltfile) f.WriteFile("Dockerfile", `FROM iron/go:dev1`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() _ = f.nextCall("initial build") f.WaitUntilManifest("manifest has triggerMode = auto (default)", "snack", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeAuto }) // Update Tiltfile to change the trigger mode of the manifest tiltfileWithTriggerMode := fmt.Sprintf(`%s trigger_mode(TRIGGER_MODE_MANUAL)`, origTiltfile) f.WriteConfigFiles("Tiltfile", tiltfileWithTriggerMode) f.assertNoCall("A change to TriggerMode shouldn't trigger an update (doesn't invalidate current build)") f.WaitUntilManifest("triggerMode has changed on manifest", "snack", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit }) err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestConfigChange_ManifestWithPendingChangesBuildsIfTriggerModeChangedToAuto(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() baseTiltfile := `trigger_mode(%s) docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile') k8s_yaml('snack.yaml')` triggerManualTiltfile := fmt.Sprintf(baseTiltfile, "TRIGGER_MODE_MANUAL") f.WriteFile("Tiltfile", triggerManualTiltfile) f.WriteFile("Dockerfile", `FROM iron/go:dev1`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() // First call: with the old manifests _ = f.nextCall("initial build") var imageTargetID model.TargetID f.WaitUntilManifest("manifest has triggerMode = manual_after_initial", "snack", func(mt store.ManifestTarget) bool { imageTargetID = mt.Manifest.ImageTargetAt(0).ID() // grab for later return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit }) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("src/main.go")) f.WaitUntil("pending change appears", func(st store.EngineState) bool { return len(st.BuildStatus(imageTargetID).PendingFileChanges) > 0 }) f.assertNoCall("even tho there are pending changes, manual manifest shouldn't build w/o explicit trigger") // Update Tiltfile to change the trigger mode of the manifest triggerAutoTiltfile := fmt.Sprintf(baseTiltfile, "TRIGGER_MODE_AUTO") f.WriteConfigFiles("Tiltfile", triggerAutoTiltfile) call := f.nextCall("manifest updated b/c it's now TriggerModeAuto") assert.True(t, call.oneImageState().HasLastResult(), "we did NOT clear the build state (b/c a change to Manifest.TriggerMode does NOT invalidate the build") f.WaitUntilManifest("triggerMode has changed on manifest", "snack", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeAuto }) f.WaitUntil("manifest is no longer in trigger queue", func(st store.EngineState) bool { return len(st.TriggerQueue) == 0 }) err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestConfigChange_ManifestIncludingInitialBuildsIfTriggerModeChangedToManualAfterInitial(t *testing.T) { f := newTestFixture(t) foo := f.newManifest("foo").WithTriggerMode(model.TriggerModeManual) bar := f.newManifest("bar") f.Start([]model.Manifest{foo, bar}) // foo should be skipped, and just bar built call := f.nextCallComplete("initial build") require.Equal(t, bar.ImageTargetAt(0), call.firstImgTarg()) // since foo is "Manual", it should not be built on startup // make sure there's nothing waiting to build f.withState(func(state store.EngineState) { n := buildcontrol.NextManifestNameToBuild(state) require.Equal(t, model.ManifestName(""), n) }) // change the trigger mode foo = foo.WithTriggerMode(model.TriggerModeManualWithAutoInit) f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), Manifests: []model.Manifest{foo, bar}, }) // now that it is a trigger mode that should build on startup, a build should kick off // even though we didn't trigger anything call = f.nextCallComplete("second build") require.Equal(t, foo.ImageTargetAt(0), call.firstImgTarg()) err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestConfigChange_FilenamesLoggedInManifestBuild(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` k8s_yaml('snack.yaml') docker_build('gcr.io/windmill-public-containers/servantes/snack', './src')`) f.WriteFile("src/Dockerfile", `FROM iron/go:dev`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() f.WaitUntilManifestState("snack loaded", "snack", func(ms store.ManifestState) bool { return len(ms.BuildHistory) == 1 }) // make a config file change to kick off a new build f.WriteFile("Tiltfile", ` k8s_yaml('snack.yaml') docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', ignore='Dockerfile')`) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile")) f.WaitUntilManifestState("snack reloaded", "snack", func(ms store.ManifestState) bool { return len(ms.BuildHistory) == 2 }) f.withState(func(es store.EngineState) { expected := fmt.Sprintf("1 File Changed: [%s]", f.JoinPath("Tiltfile")) require.Contains(t, es.LogStore.ManifestLog("snack"), expected) }) err := f.Stop() assert.Nil(t, err) } func TestConfigChange_LocalResourceChange(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", `print('tiltfile 1') local_resource('local', 'echo one fish two fish', deps='foo.bar')`) f.loadAndStart() // First call: with the old manifests call := f.nextCall("initial call") assert.Equal(t, "local", string(call.local().Name)) assert.Equal(t, "echo one fish two fish", model.ArgListToString(call.local().UpdateCmdSpec.Args)) // Change the definition of the resource -- this changes the manifest which should trigger an updated f.WriteConfigFiles("Tiltfile", `print('tiltfile 2') local_resource('local', 'echo red fish blue fish', deps='foo.bar')`) call = f.nextCall("rebuild from config change") assert.Equal(t, "echo red fish blue fish", model.ArgListToString(call.local().UpdateCmdSpec.Args)) err := f.Stop() assert.Nil(t, err) f.assertAllBuildsConsumed() } func TestDockerRebuildWithChangedFiles(t *testing.T) { f := newTestFixture(t) df := `FROM golang ADD ./ ./ go build ./... ` manifest := f.newManifest("foobar") iTarget := manifest.ImageTargetAt(0). WithLiveUpdateSpec("foobar", v1alpha1.LiveUpdateSpec{}). WithDockerImage(v1alpha1.DockerImageSpec{ DockerfileContents: df, Context: f.Path(), }) manifest = manifest.WithImageTarget(iTarget) f.Start([]model.Manifest{manifest}) call := f.nextCallComplete("first build") assert.True(t, call.oneImageState().IsEmpty()) // Simulate a change to main.go mainPath := filepath.Join(f.Path(), "main.go") f.fsWatcher.Events <- watch.NewFileEvent(mainPath) // Check that this triggered a rebuild. call = f.nextCallComplete("rebuild triggered") assert.Equal(t, []string{mainPath}, call.oneImageState().FilesChanged()) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestHudUpdated(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) f.WaitUntilHUD("hud update", func(v view.View) bool { return len(v.Resources) == 2 }) err := f.Stop() assert.Equal(t, nil, err) assert.Equal(t, 2, len(f.fakeHud().LastView.Resources)) assert.Equal(t, store.MainTiltfileManifestName, f.fakeHud().LastView.Resources[0].Name) rv := f.fakeHud().LastView.Resources[1] assert.Equal(t, manifest.Name, rv.Name) f.assertAllBuildsConsumed() } func TestDisabledHudUpdated(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("TODO(nick): Investigate") } f := newTestFixture(t) manifest := f.newManifest("foobar") opt := func(ia InitAction) InitAction { ia.TerminalMode = store.TerminalModeStream return ia } f.Start([]model.Manifest{manifest}, opt) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) // Make sure we're done logging stuff, then grab # processed bytes f.WaitUntil("foobar logs appear", func(es store.EngineState) bool { return strings.Contains(f.log.String(), "Initial Build") }) assert.True(t, f.ts.ProcessedLogs > 0) oldCheckpoint := f.ts.ProcessedLogs // Log something new, make sure it's reflected msg := []byte("hello world!\n") f.store.Dispatch(store.NewGlobalLogAction(logger.InfoLvl, msg)) f.WaitUntil("hello world logs appear", func(es store.EngineState) bool { return strings.Contains(f.log.String(), "hello world!") }) assert.True(t, f.ts.ProcessedLogs > oldCheckpoint) err := f.Stop() assert.Equal(t, nil, err) f.assertAllBuildsConsumed() } func TestPodEvent(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) pod := pb.WithPhase("CrashLoopBackOff").Build() f.podEvent(pod) f.WaitUntilHUDResource("hud update", "foobar", func(res view.Resource) bool { return res.K8sInfo().PodName == pod.Name }) rv := f.hudResource("foobar") assert.Equal(t, pod.Name, rv.K8sInfo().PodName) assert.Equal(t, "CrashLoopBackOff", rv.K8sInfo().PodStatus) assert.NoError(t, f.Stop()) f.assertAllBuildsConsumed() } func TestPodEventContainerStatus(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) var ref reference.NamedTagged f.WaitUntilManifestState("image appears", "foobar", func(ms store.ManifestState) bool { result := ms.BuildStatus(manifest.ImageTargetAt(0).ID()).LastResult ref, _ = container.ParseNamedTagged(store.ClusterImageRefFromBuildResult(result)) return ref != nil }) pod := pb.Build() pod.Status = k8s.FakePodStatus(ref, "Running") pod.Status.ContainerStatuses[0].ContainerID = "" pod.Spec = k8s.FakePodSpec(ref) f.podEvent(pod) podState := v1alpha1.Pod{} f.WaitUntilManifestState("container status", "foobar", func(ms store.ManifestState) bool { podState = ms.MostRecentPod() return podState.Name == pod.Name && len(podState.Containers) > 0 }) container := podState.Containers[0] assert.Equal(t, "", container.ID) assert.Equal(t, "main", container.Name) assert.Equal(t, []int32{8080}, container.Ports) err := f.Stop() assert.Nil(t, err) } func TestPodEventContainerStatusWithoutImage(t *testing.T) { f := newTestFixture(t) manifest := model.Manifest{ Name: model.ManifestName("foobar"), }.WithDeployTarget(k8s.MustTarget("foobar", SanchoYAML)) pb := f.registerForDeployer(manifest) ref := container.MustParseNamedTagged("dockerhub/we-didnt-build-this:foo") f.Start([]model.Manifest{manifest}) f.WaitUntilManifestState("first build complete", "foobar", func(ms store.ManifestState) bool { return len(ms.BuildHistory) > 0 }) pod := pb.Build() pod.Status = k8s.FakePodStatus(ref, "Running") // If we have no image target to match container status by image ref, // we should just take the first one, i.e. this one pod.Status.ContainerStatuses[0].Name = "first-container" pod.Status.ContainerStatuses[0].ContainerID = "docker://great-container-id" pod.Spec = v1.PodSpec{ Containers: []v1.Container{ { Name: "second-container", Image: "gcr.io/windmill-public-containers/tilt-synclet:latest", Ports: []v1.ContainerPort{{ContainerPort: 9999}}, }, // we match container spec by NAME, so we'll get this one even tho it comes second. { Name: "first-container", Image: ref.Name(), Ports: []v1.ContainerPort{{ContainerPort: 8080}}, }, }, } f.podEvent(pod) podState := v1alpha1.Pod{} f.WaitUntilManifestState("container status", "foobar", func(ms store.ManifestState) bool { podState = ms.MostRecentPod() return podState.Name == pod.Name && len(podState.Containers) > 0 }) // If we have no image target to match container by image ref, we just take the first one container := podState.Containers[0] assert.Equal(t, "great-container-id", container.ID) assert.Equal(t, "first-container", container.Name) assert.Equal(t, []int32{8080}, store.AllPodContainerPorts(podState)) err := f.Stop() assert.Nil(t, err) } func TestPodEventUpdateByTimestamp(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) firstCreationTime := f.Now() pod := pb. WithCreationTime(firstCreationTime). WithPhase("CrashLoopBackOff"). Build() f.podEvent(pod) f.WaitUntilHUDResource("hud update crash", "foobar", func(res view.Resource) bool { return res.K8sInfo().PodStatus == "CrashLoopBackOff" }) pb = podbuilder.New(t, manifest). WithPodName("my-new-pod"). WithCreationTime(firstCreationTime.Add(time.Minute * 2)) newPod := pb.Build() f.podEvent(newPod) f.WaitUntilHUDResource("hud update running", "foobar", func(res view.Resource) bool { return res.K8sInfo().PodStatus == "Running" }) rv := f.hudResource("foobar") assert.Equal(t, newPod.Name, rv.K8sInfo().PodName) assert.Equal(t, "Running", rv.K8sInfo().PodStatus) assert.NoError(t, f.Stop()) f.assertAllBuildsConsumed() } func TestPodForgottenOnDisable(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) pod := pb.WithPhase("CrashLoopBackOff").Build() f.podEvent(pod) f.WaitUntilManifestState("pod seen", "foobar", func(ms store.ManifestState) bool { return ms.K8sRuntimeState().MostRecentPod().Status == "CrashLoopBackOff" }) f.setDisableState("foobar", true) f.WaitUntilManifestState("pod unseen", "foobar", func(ms store.ManifestState) bool { return ms.K8sRuntimeState().PodLen() == 0 }) assert.NoError(t, f.Stop()) f.assertAllBuildsConsumed() } func TestPodEventUpdateByPodName(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) call := f.nextCallComplete() assert.True(t, call.oneImageState().IsEmpty()) creationTime := f.Now() pb = pb. WithCreationTime(creationTime). WithPhase("CrashLoopBackOff") f.podEvent(pb.Build()) f.WaitUntilHUDResource("pod crashes", "foobar", func(res view.Resource) bool { return res.K8sInfo().PodStatus == "CrashLoopBackOff" }) f.podEvent(pb.WithPhase("Running").Build()) f.WaitUntilHUDResource("pod comes back", "foobar", func(res view.Resource) bool { return res.K8sInfo().PodStatus == "Running" }) rv := f.hudResource("foobar") assert.Equal(t, pb.Build().Name, rv.K8sInfo().PodName) assert.Equal(t, "Running", rv.K8sInfo().PodStatus) err := f.Stop() if err != nil { t.Fatal(err) } f.assertAllBuildsConsumed() } func TestPodEventIgnoreOlderPod(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.True(t, call.oneImageState().IsEmpty()) creationTime := f.Now() pb = pb. WithPodName("my-new-pod"). WithPhase("CrashLoopBackOff"). WithCreationTime(creationTime) pod := pb.Build() f.podEvent(pod) f.WaitUntilHUDResource("hud update", "foobar", func(res view.Resource) bool { return res.K8sInfo().PodStatus == "CrashLoopBackOff" }) pb = pb.WithCreationTime(creationTime.Add(time.Minute * -1)) oldPod := pb.Build() f.podEvent(oldPod) time.Sleep(10 * time.Millisecond) assert.NoError(t, f.Stop()) f.assertAllBuildsConsumed() rv := f.hudResource("foobar") assert.Equal(t, pod.Name, rv.K8sInfo().PodName) assert.Equal(t, "CrashLoopBackOff", rv.K8sInfo().PodStatus) } func TestPodContainerStatus(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("fe") pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) _ = f.nextCall() var ref reference.NamedTagged f.WaitUntilManifestState("image appears", "fe", func(ms store.ManifestState) bool { result := ms.BuildStatus(manifest.ImageTargetAt(0).ID()).LastResult ref, _ = container.ParseNamedTagged(store.ClusterImageRefFromBuildResult(result)) return ref != nil }) startedAt := f.Now() pb = pb.WithCreationTime(startedAt) pod := pb.Build() f.podEvent(pod) f.WaitUntilManifestState("pod appears", "fe", func(ms store.ManifestState) bool { return ms.MostRecentPod().Name == pod.Name }) pod = pb.Build() pod.Spec = k8s.FakePodSpec(ref) pod.Status = k8s.FakePodStatus(ref, "Running") f.podEvent(pod) f.WaitUntilManifestState("container is ready", "fe", func(ms store.ManifestState) bool { ports := store.AllPodContainerPorts(ms.MostRecentPod()) return len(ports) == 1 && ports[0] == 8080 }) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestUpper_WatchDockerIgnoredFiles(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") manifest = manifest.WithImageTarget(manifest.ImageTargetAt(0). WithIgnores([]v1alpha1.IgnoreDef{ { BasePath: f.Path(), Patterns: []string{"dignore.txt"}, }, })) f.Start([]model.Manifest{manifest}) call := f.nextCall() assert.Equal(t, manifest.ImageTargetAt(0), call.firstImgTarg()) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("dignore.txt")) f.assertNoCall("event for ignored file should not trigger build") err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestUpper_ShowErrorPodLog(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("foobar") manifest := f.newManifest(name.String()) pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) pod := pb.Build() f.startPod(pod, name) f.podLog(pod, name, "first string") f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("go/a")) f.waitForCompletedBuildCount(2) f.podLog(pod, name, "second string") f.withState(func(state store.EngineState) { ms, _ := state.ManifestState(name) spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name)) assert.Equal(t, "first string\nsecond string\n", state.LogStore.SpanLog(spanID)) }) err := f.Stop() assert.NoError(t, err) } func TestUpperPodLogInCrashLoopThirdInstanceStillUp(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("foobar") manifest := f.newManifest(name.String()) pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) f.startPod(pb.Build(), name) f.podLog(pb.Build(), name, "first string") pb = f.restartPod(pb) f.podLog(pb.Build(), name, "second string") pb = f.restartPod(pb) f.podLog(pb.Build(), name, "third string") // the third instance is still up, so we want to show the log from the last crashed pod plus the log from the current pod f.withState(func(es store.EngineState) { ms, _ := es.ManifestState(name) spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name)) assert.Contains(t, es.LogStore.SpanLog(spanID), "third string\n") assert.Contains(t, es.LogStore.ManifestLog(name), "second string\n") assert.Contains(t, es.LogStore.ManifestLog(name), "third string\n") assert.Contains(t, es.LogStore.ManifestLog(name), "WARNING: Detected container restart. Pod: foobar-fakePodID. Container: sancho.\n") assert.Contains(t, es.LogStore.SpanLog(spanID), "third string\n") }) err := f.Stop() assert.NoError(t, err) } func TestUpperPodLogInCrashLoopPodCurrentlyDown(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("foobar") manifest := f.newManifest(name.String()) pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) f.startPod(pb.Build(), name) f.podLog(pb.Build(), name, "first string") pb = f.restartPod(pb) f.podLog(pb.Build(), name, "second string") pod := pb.Build() pod.Status.ContainerStatuses[0].Ready = false f.notifyAndWaitForPodStatus(pod, name, func(pod v1alpha1.Pod) bool { return !store.AllPodContainersReady(pod) }) f.withState(func(state store.EngineState) { ms, _ := state.ManifestState(name) spanID := k8sconv.SpanIDForPod(name, k8s.PodID(ms.MostRecentPod().Name)) assert.Equal(t, "first string\nWARNING: Detected container restart. Pod: foobar-fakePodID. Container: sancho.\nsecond string\n", state.LogStore.SpanLog(spanID)) }) err := f.Stop() assert.NoError(t, err) } func TestUpperRecordPodWithMultipleContainers(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("foobar") manifest := f.newManifest(name.String()) pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) pod := pb.Build() pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{ Name: "sidecar", Image: "sidecar-image", Ready: false, ContainerID: "docker://sidecar", }) f.startPod(pod, manifest.Name) f.notifyAndWaitForPodStatus(pod, manifest.Name, func(pod v1alpha1.Pod) bool { if len(pod.Containers) != 2 { return false } c1 := pod.Containers[0] require.Equal(t, container.Name("sancho").String(), c1.Name) require.Equal(t, podbuilder.FakeContainerID().String(), c1.ID) require.True(t, c1.Ready) c2 := pod.Containers[1] require.Equal(t, container.Name("sidecar").String(), c2.Name) require.Equal(t, container.ID("sidecar").String(), c2.ID) require.False(t, c2.Ready) return true }) err := f.Stop() assert.NoError(t, err) } func TestUpperProcessOtherContainersIfOneErrors(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("foobar") manifest := f.newManifest(name.String()) pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) pod := pb.Build() pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{ Name: "extra1", Image: "extra1-image", Ready: false, // when populating container info for this pod, we'll error when we try to parse // this cID -- we should still populate info for the other containers, though. ContainerID: "malformed", }, v1.ContainerStatus{ Name: "extra2", Image: "extra2-image", Ready: false, ContainerID: "docker://extra2", }) f.startPod(pod, manifest.Name) f.notifyAndWaitForPodStatus(pod, manifest.Name, func(pod v1alpha1.Pod) bool { if len(pod.Containers) != 2 { return false } require.Equal(t, container.Name("sancho").String(), pod.Containers[0].Name) require.Equal(t, container.Name("extra2").String(), pod.Containers[1].Name) return true }) err := f.Stop() assert.NoError(t, err) } func TestUpper_ServiceEvent(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) result := f.b.resultsByID[manifest.K8sTarget().ID()] uid := result.(store.K8sBuildResult).DeployedRefs[0].UID svc := servicebuilder.New(t, manifest).WithUID(uid).WithPort(8080).WithIP("1.2.3.4").Build() err := k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "") require.NoError(t, err) f.WaitUntilManifestState("lb updated", "foobar", func(ms store.ManifestState) bool { return len(ms.K8sRuntimeState().LBs) > 0 }) err = f.Stop() assert.NoError(t, err) ms, _ := f.upper.store.RLockState().ManifestState(manifest.Name) defer f.upper.store.RUnlockState() lbs := ms.K8sRuntimeState().LBs assert.Equal(t, 1, len(lbs)) url, ok := lbs[k8s.ServiceName(svc.Name)] if !ok { t.Fatalf("%v did not contain key 'myservice'", lbs) } assert.Equal(t, "http://1.2.3.4:8080/", url.String()) } func TestUpper_ServiceEventRemovesURL(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foobar") f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) result := f.b.resultsByID[manifest.K8sTarget().ID()] uid := result.(store.K8sBuildResult).DeployedRefs[0].UID sb := servicebuilder.New(t, manifest).WithUID(uid).WithPort(8080).WithIP("1.2.3.4") svc := sb.Build() err := k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "") require.NoError(t, err) f.WaitUntilManifestState("lb url added", "foobar", func(ms store.ManifestState) bool { url := ms.K8sRuntimeState().LBs[k8s.ServiceName(svc.Name)] if url == nil { return false } return "http://1.2.3.4:8080/" == url.String() }) svc = sb.WithIP("").Build() err = k8swatch.DispatchServiceChange(f.store, svc, manifest.Name, "") require.NoError(t, err) f.WaitUntilManifestState("lb url removed", "foobar", func(ms store.ManifestState) bool { url := ms.K8sRuntimeState().LBs[k8s.ServiceName(svc.Name)] return url == nil }) err = f.Stop() assert.NoError(t, err) } func TestUpper_PodLogs(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("fe") manifest := f.newManifest(string(name)) pb := f.registerForDeployer(manifest) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) pod := pb.Build() f.startPod(pod, name) f.podLog(pod, name, "Hello world!\n") err := f.Stop() assert.NoError(t, err) } func TestK8sEventGlobalLogAndManifestLog(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("fe") manifest := f.newManifest(string(name)) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) objRef := v1.ObjectReference{UID: f.lastDeployedUID(name)} warnEvt := &v1.Event{ InvolvedObject: objRef, Message: "something has happened zomg", Type: v1.EventTypeWarning, ObjectMeta: metav1.ObjectMeta{ CreationTimestamp: apis.NewTime(f.Now()), Namespace: k8s.DefaultNamespace.String(), }, } f.kClient.UpsertEvent(warnEvt) f.WaitUntil("event message appears in manifest log", func(st store.EngineState) bool { return strings.Contains(st.LogStore.ManifestLog(name), "something has happened zomg") }) f.withState(func(st store.EngineState) { assert.Contains(t, st.LogStore.String(), "something has happened zomg", "event message not in global log") }) err := f.Stop() assert.NoError(t, err) } func TestK8sEventNotLoggedIfNoManifestForUID(t *testing.T) { f := newTestFixture(t) name := model.ManifestName("fe") manifest := f.newManifest(string(name)) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) warnEvt := &v1.Event{ InvolvedObject: v1.ObjectReference{UID: types.UID("someRandomUID")}, Message: "something has happened zomg", Type: v1.EventTypeWarning, ObjectMeta: metav1.ObjectMeta{ CreationTimestamp: apis.NewTime(f.Now()), Namespace: k8s.DefaultNamespace.String(), }, } f.kClient.UpsertEvent(warnEvt) time.Sleep(10 * time.Millisecond) assert.NotContains(t, f.log.String(), "something has happened zomg", "should not log event message b/c it doesn't have a UID -> Manifest mapping") } func TestHudExitNoError(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) f.store.Dispatch(hud.NewExitAction(nil)) err := f.WaitForExit() assert.NoError(t, err) } func TestHudExitWithError(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) e := errors.New("helllllo") f.store.Dispatch(hud.NewExitAction(e)) _ = f.WaitForNoExit() } func TestNewConfigsAreWatchedAfterFailure(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.loadAndStart() f.WriteConfigFiles("Tiltfile", "read_file('foo.txt')") f.WaitUntil("foo.txt is a config file", func(state store.EngineState) bool { for _, s := range state.MainConfigPaths() { if s == f.JoinPath("foo.txt") { return true } } return false }) } func TestDockerComposeUp(t *testing.T) { f := newTestFixture(t) redis, server := f.setupDCFixture() f.Start([]model.Manifest{redis, server}) call := f.nextCall() assert.True(t, call.dcState().IsEmpty()) assert.False(t, call.dc().ID().Empty()) assert.Equal(t, redis.DockerComposeTarget().ID(), call.dc().ID()) call = f.nextCall() assert.True(t, call.dcState().IsEmpty()) assert.False(t, call.dc().ID().Empty()) assert.Equal(t, server.DockerComposeTarget().ID(), call.dc().ID()) } func TestDockerComposeRedeployFromFileChange(t *testing.T) { f := newTestFixture(t) r, m := f.setupDCFixture() f.Start([]model.Manifest{r, m}) _ = f.nextCall() _ = f.nextCall() // Change a file -- should trigger build f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("package.json")) call := f.nextCall() assert.Equal(t, []string{f.JoinPath("package.json")}, call.oneImageState().FilesChanged()) } func TestDockerComposeRecordsBuildLogs(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() m, _ := f.setupDCFixture() expected := "yarn install" f.setBuildLogOutput(m.DockerComposeTarget().ID(), expected) f.loadAndStart() f.waitForCompletedBuildCount(2) // recorded in global log f.withState(func(st store.EngineState) { assert.Contains(t, st.LogStore.String(), expected) ms, _ := st.ManifestState(m.ManifestName()) spanID := ms.LastBuild().SpanID assert.Contains(t, st.LogStore.SpanLog(spanID), expected) }) } func TestDockerComposeBuildCompletedSetsStatusToUpIfSuccessful(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() m1, _ := f.setupDCFixture() expected := container.ID("aaaaaa") f.b.nextDockerComposeContainerID = expected containerState := docker.NewRunningContainerState() f.b.nextDockerComposeContainerState = &containerState f.loadAndStart() f.waitForCompletedBuildCount(2) f.withManifestState(m1.ManifestName(), func(st store.ManifestState) { state, ok := st.RuntimeState.(dockercompose.State) if !ok { t.Fatal("expected RuntimeState to be docker compose, but it wasn't") } assert.Equal(t, expected, state.ContainerID) assert.Equal(t, v1alpha1.RuntimeStatusOK, state.RuntimeStatus()) }) } func TestDockerComposeStopOnDisable(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() m, _ := f.setupDCFixture() expected := container.ID("aaaaaa") f.b.nextDockerComposeContainerID = expected containerState := docker.NewRunningContainerState() f.b.nextDockerComposeContainerState = &containerState f.loadAndStart() f.waitForCompletedBuildCount(2) f.setDisableState(m.Name, true) require.Eventually(t, func() bool { return len(f.dcc.RmCalls()) > 0 }, stdTimeout, time.Millisecond) require.Len(t, f.dcc.RmCalls(), 1) require.Len(t, f.dcc.RmCalls()[0].Specs, 1) require.Equal(t, m.Name.String(), f.dcc.RmCalls()[0].Specs[0].Service) } func TestDockerComposeStartOnReenable(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() m, _ := f.setupDCFixture() expected := container.ID("aaaaaa") f.b.nextDockerComposeContainerID = expected containerState := docker.NewRunningContainerState() f.b.nextDockerComposeContainerState = &containerState f.loadAndStart() f.waitForCompletedBuildCount(2) f.setDisableState(m.Name, true) require.Eventually(t, func() bool { return len(f.dcc.RmCalls()) > 0 }, stdTimeout, time.Millisecond, "DC rm") f.setDisableState(m.Name, false) f.waitForCompletedBuildCount(3) } func TestEmptyTiltfile(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", "") closeCh := make(chan error) go func() { err := f.upper.Start(f.ctx, []string{}, model.TiltBuild{}, f.JoinPath("Tiltfile"), store.TerminalModeHUD, analytics.OptIn, token.Token("unit test token"), "nonexistent.example.com") closeCh <- err }() f.WaitUntil("build is set", func(st store.EngineState) bool { return !st.TiltfileStates[model.MainTiltfileManifestName].LastBuild().Empty() }) f.withState(func(st store.EngineState) { assert.Contains(t, st.TiltfileStates[model.MainTiltfileManifestName].LastBuild().Error.Error(), "No resources found. Check out ") assertContainsOnce(t, st.LogStore.String(), "No resources found. Check out ") assertContainsOnce(t, st.LogStore.ManifestLog(store.MainTiltfileManifestName), "No resources found. Check out ") buildRecord := st.TiltfileStates[model.MainTiltfileManifestName].LastBuild() assertContainsOnce(t, st.LogStore.SpanLog(buildRecord.SpanID), "No resources found. Check out ") }) f.cancel() err := <-closeCh testutils.FailOnNonCanceledErr(t, err, "upper.Start failed") } func TestUpperStart(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() tok := token.Token("unit test token") cloudAddress := "nonexistent.example.com" closeCh := make(chan error) f.WriteFile("Tiltfile", "") go func() { err := f.upper.Start(f.ctx, []string{"foo", "bar"}, model.TiltBuild{}, f.JoinPath("Tiltfile"), store.TerminalModeHUD, analytics.OptIn, tok, cloudAddress) closeCh <- err }() f.WaitUntil("init action processed", func(state store.EngineState) bool { return !state.TiltStartTime.IsZero() }) f.withState(func(state store.EngineState) { require.Equal(t, []string{"foo", "bar"}, state.UserConfigState.Args) require.Equal(t, f.JoinPath("Tiltfile"), state.DesiredTiltfilePath) require.Equal(t, tok, state.Token) require.Equal(t, analytics.OptIn, state.AnalyticsEffectiveOpt()) require.Equal(t, cloudAddress, state.CloudAddress) }) f.cancel() err := <-closeCh testutils.FailOnNonCanceledErr(t, err, "upper.Start failed") } func TestWatchManifestsWithCommonAncestor(t *testing.T) { f := newTestFixture(t) m1, m2 := NewManifestsWithCommonAncestor(f) f.Start([]model.Manifest{m1, m2}) f.waitForCompletedBuildCount(2) call := f.nextCall("m1 build1") assert.Equal(t, m1.K8sTarget(), call.k8s()) call = f.nextCall("m2 build1") assert.Equal(t, m2.K8sTarget(), call.k8s()) f.WriteFile(filepath.Join("common", "a.txt"), "hello world") aPath := f.JoinPath("common", "a.txt") f.fsWatcher.Events <- watch.NewFileEvent(aPath) f.waitForCompletedBuildCount(4) // Make sure that both builds are triggered, and that they // are triggered in a particular order. call = f.nextCall("m1 build2") assert.Equal(t, m1.K8sTarget(), call.k8s()) state := call.state[m1.ImageTargets[0].ID()] assert.Equal(t, map[string]bool{aPath: true}, state.FilesChangedSet) // Make sure that when the second build is triggered, we did the bookkeeping // correctly around reusing the image and propagating DepsChanged when // we deploy the second k8s target. call = f.nextCall("m2 build2") assert.Equal(t, m2.K8sTarget(), call.k8s()) id := m2.ImageTargets[0].ID() result := f.b.resultsByID[id] assert.Equal(t, result, call.state[id].LastResult) assert.Equal(t, 0, len(call.state[id].FilesChangedSet)) id = m2.ImageTargets[1].ID() result = f.b.resultsByID[id] // Assert the 2nd image was not re-used from the previous result. assert.NotEqual(t, result, call.state[id].LastResult) assert.Equal(t, map[model.TargetID]bool{m2.ImageTargets[0].ID(): true}, call.state[id].DepsChangedSet) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestConfigChangeThatChangesManifestIsIncludedInManifestsChangedFile(t *testing.T) { // https://app.clubhouse.io/windmill/story/5701/test-testconfigchangethatchangesmanifestisincludedinmanifestschangedfile-is-flaky t.Skip("TODO(nick): fix this") f := newTestFixture(t) f.useRealTiltfileLoader() tiltfile := ` docker_build('gcr.io/windmill-public-containers/servantes/snack', '.') k8s_yaml('snack.yaml')` f.WriteFile("Tiltfile", tiltfile) f.WriteFile("Dockerfile", `FROM iron/go:dev`) f.WriteFile("snack.yaml", testyaml.Deployment("snack", "gcr.io/windmill-public-containers/servantes/snack:old")) f.loadAndStart() f.waitForCompletedBuildCount(1) f.WriteFile("snack.yaml", testyaml.Deployment("snack", "gcr.io/windmill-public-containers/servantes/snack:new")) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("snack.yaml")) f.waitForCompletedBuildCount(2) f.withManifestState("snack", func(ms store.ManifestState) { require.Equal(t, []string{f.JoinPath("snack.yaml")}, ms.LastBuild().Edits) }) f.WriteFile("Dockerfile", `FROM iron/go:foobar`) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Dockerfile")) f.waitForCompletedBuildCount(3) f.withManifestState("snack", func(ms store.ManifestState) { require.Equal(t, []string{f.JoinPath("Dockerfile")}, ms.LastBuild().Edits) }) } func TestSetAnalyticsOpt(t *testing.T) { f := newTestFixture(t) opt := func(ia InitAction) InitAction { ia.AnalyticsUserOpt = analytics.OptIn return ia } f.Start([]model.Manifest{}, opt) f.store.Dispatch(store.AnalyticsUserOptAction{Opt: analytics.OptOut}) f.WaitUntil("opted out", func(state store.EngineState) bool { return state.AnalyticsEffectiveOpt() == analytics.OptOut }) // if we don't wait for 1 here, it's possible the state flips to out and back to in before the subscriber sees it, // and we end up with no events f.opter.WaitUntilCount(t, 1) f.store.Dispatch(store.AnalyticsUserOptAction{Opt: analytics.OptIn}) f.WaitUntil("opted in", func(state store.EngineState) bool { return state.AnalyticsEffectiveOpt() == analytics.OptIn }) f.opter.WaitUntilCount(t, 2) err := f.Stop() if !assert.NoError(t, err) { return } assert.Equal(t, []analytics.Opt{analytics.OptOut, analytics.OptIn}, f.opter.Calls()) } func TestFeatureFlagsStoredOnState(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) f.ensureCluster() f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), Features: map[string]bool{"foo": true}, }) f.WaitUntil("feature is enabled", func(state store.EngineState) bool { return state.Features["foo"] == true }) f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), Features: map[string]bool{"foo": false}, }) f.WaitUntil("feature is disabled", func(state store.EngineState) bool { return state.Features["foo"] == false }) } func TestTeamIDStoredOnState(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) f.ensureCluster() f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), TeamID: "sharks", }) f.WaitUntil("teamID is set to sharks", func(state store.EngineState) bool { return state.TeamID == "sharks" }) f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), TeamID: "jets", }) f.WaitUntil("teamID is set to jets", func(state store.EngineState) bool { return state.TeamID == "jets" }) } func TestBuildLogAction(t *testing.T) { f := newTestFixture(t) f.bc.DisableForTesting() manifest := f.newManifest("alert-injester") f.Start([]model.Manifest{manifest}) f.store.Dispatch(buildcontrols.BuildStartedAction{ ManifestName: manifest.Name, StartTime: f.Now(), SpanID: SpanIDForBuildLog(1), Source: "buildcontrol", }) f.store.Dispatch(store.NewLogAction(manifest.Name, SpanIDForBuildLog(1), logger.InfoLvl, nil, []byte(`a bc def ghij`))) f.WaitUntil("log appears", func(es store.EngineState) bool { ms, _ := es.ManifestState("alert-injester") spanID := ms.EarliestCurrentBuild().SpanID return spanID != "" && len(es.LogStore.SpanLog(spanID)) > 0 }) f.withState(func(s store.EngineState) { assert.Contains(t, s.LogStore.String(), `alert-injest… │ a alert-injest… │ bc alert-injest… │ def alert-injest… │ ghij`) }) err := f.Stop() assert.Nil(t, err) } func TestBuildErrorLoggedOnceByUpper(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("alert-injester") err := errors.New("cats and dogs, living together") f.SetNextBuildError(err) f.Start([]model.Manifest{manifest}) f.waitForCompletedBuildCount(1) // so the test name says "once", but the fake builder also logs once, so we get it twice f.withState(func(state store.EngineState) { require.Equal(t, 2, strings.Count(state.LogStore.String(), err.Error())) }) } func TestTiltfileChangedFilesOnlyLoggedAfterFirstBuild(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile') k8s_yaml('snack.yaml')`) f.WriteFile("Dockerfile", `FROM iron/go:dev1`) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("src/main.go", "hello") f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.waitForCompletedBuildCount(1) // we shouldn't log changes for first build f.withState(func(state store.EngineState) { require.NotContains(t, state.LogStore.String(), "changed: [") }) f.WriteFile("Tiltfile", ` docker_build('gcr.io/windmill-public-containers/servantes/snack', './src', dockerfile='Dockerfile', ignore='foo') k8s_yaml('snack.yaml')`) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile")) f.WaitUntil("Tiltfile reloaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 2 }) f.waitForCompletedBuildCount(2) f.withState(func(state store.EngineState) { expectedMessage := fmt.Sprintf("1 File Changed: [%s]", f.JoinPath("Tiltfile")) require.Contains(t, state.LogStore.String(), expectedMessage) }) } func TestDeployUIDsInEngineState(t *testing.T) { f := newTestFixture(t) uid := types.UID("fake-uid") f.b.nextDeployedUID = uid manifest := f.newManifest("fe") f.Start([]model.Manifest{manifest}) _ = f.nextCall() f.WaitUntilManifestState("UID in ManifestState", "fe", func(state store.ManifestState) bool { return k8sconv.ContainsUID(state.K8sRuntimeState().ApplyFilter, uid) }) err := f.Stop() assert.NoError(t, err) f.assertAllBuildsConsumed() } func TestEnableFeatureOnFail(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` enable_feature('snapshots') fail('goodnight moon') `) f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { assert.True(t, state.Features["snapshots"]) }) } func TestSecretScrubbed(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() tiltfile := ` print('about to print secret') print('aGVsbG8=') k8s_yaml('secret.yaml')` f.WriteFile("Tiltfile", tiltfile) f.WriteFile("secret.yaml", ` apiVersion: v1 kind: Secret metadata: name: my-secret data: client-secret: aGVsbG8= `) f.loadAndStart() f.waitForCompletedBuildCount(1) f.withState(func(state store.EngineState) { log := state.LogStore.String() assert.Contains(t, log, "about to print secret") assert.NotContains(t, log, "aGVsbG8=") assert.Contains(t, log, "[redacted secret my-secret:client-secret]") }) } func TestShortSecretNotScrubbed(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() tiltfile := ` print('about to print secret: s') k8s_yaml('secret.yaml')` f.WriteFile("Tiltfile", tiltfile) f.WriteFile("secret.yaml", ` apiVersion: v1 kind: Secret metadata: name: my-secret stringData: client-secret: s `) f.loadAndStart() f.waitForCompletedBuildCount(1) f.withState(func(state store.EngineState) { log := state.LogStore.String() assert.Contains(t, log, "about to print secret: s") assert.NotContains(t, log, "redacted") }) } func TestDisableDockerPrune(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Dockerfile", `FROM iron/go:prod`) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("Tiltfile", ` docker_prune_settings(disable=True) `+simpleTiltfile) f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { assert.False(t, state.DockerPruneSettings.Enabled) }) } func TestDockerPruneEnabledByDefault(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", simpleTiltfile) f.WriteFile("Dockerfile", `FROM iron/go:prod`) f.WriteFile("snack.yaml", simpleYAML) f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { assert.True(t, state.DockerPruneSettings.Enabled) assert.Equal(t, model.DockerPruneDefaultMaxAge, state.DockerPruneSettings.MaxAge) assert.Equal(t, model.DockerPruneDefaultInterval, state.DockerPruneSettings.Interval) }) } func TestHasEverBeenReadyK8s(t *testing.T) { f := newTestFixture(t) m := f.newManifest("foobar") pb := f.registerForDeployer(m) f.Start([]model.Manifest{m}) f.waitForCompletedBuildCount(1) f.withManifestState(m.Name, func(ms store.ManifestState) { require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded()) }) f.podEvent(pb.WithContainerReady(true).Build()) f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool { return state.RuntimeState.HasEverBeenReadyOrSucceeded() }) } func TestHasEverBeenCompleteK8s(t *testing.T) { f := newTestFixture(t) m := f.newManifest("foobar") pb := f.registerForDeployer(m) f.Start([]model.Manifest{m}) f.waitForCompletedBuildCount(1) f.withManifestState(m.Name, func(ms store.ManifestState) { require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded()) }) f.podEvent(pb.WithPhase(string(v1.PodSucceeded)).Build()) f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool { return state.RuntimeState.HasEverBeenReadyOrSucceeded() }) } func TestHasEverBeenReadyLocal(t *testing.T) { f := newTestFixture(t) m := manifestbuilder.New(f, "foobar").WithLocalResource("foo", []string{f.Path()}).Build() f.SetNextBuildError(errors.New("failure!")) f.Start([]model.Manifest{m}) // first build will fail, HasEverBeenReadyOrSucceeded should be false f.waitForCompletedBuildCount(1) f.withManifestState(m.Name, func(ms store.ManifestState) { require.False(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded()) }) // second build will succeed, HasEverBeenReadyOrSucceeded should be true f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("bar", "main.go")) f.WaitUntilManifestState("flagged ready", m.Name, func(state store.ManifestState) bool { return state.RuntimeState.HasEverBeenReadyOrSucceeded() }) } func TestVersionSettingsStoredOnState(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) f.ensureCluster() vs := model.VersionSettings{ CheckUpdates: false, } f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), VersionSettings: vs, }) f.WaitUntil("CheckVersionUpdates is set to false", func(state store.EngineState) bool { return state.VersionSettings.CheckUpdates == false }) } func TestAnalyticsTiltfileOpt(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) f.ensureCluster() f.withState(func(state store.EngineState) { assert.Equal(t, analytics.OptDefault, state.AnalyticsEffectiveOpt()) }) f.store.Dispatch(ctrltiltfile.ConfigsReloadedAction{ Name: model.MainTiltfileManifestName, FinishTime: f.Now(), AnalyticsTiltfileOpt: analytics.OptIn, }) f.WaitUntil("analytics tiltfile opt-in", func(state store.EngineState) bool { return state.AnalyticsTiltfileOpt == analytics.OptIn }) f.withState(func(state store.EngineState) { assert.Equal(t, analytics.OptIn, state.AnalyticsEffectiveOpt()) }) } func TestConfigArgsChangeCausesTiltfileRerun(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` print('hello') config.define_string_list('foo') cfg = config.parse() print('foo=', cfg['foo'])`) opt := func(ia InitAction) InitAction { ia.UserArgs = []string{"--foo", "bar"} return ia } f.loadAndStart(opt) f.WaitUntil("first tiltfile build finishes", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { spanID := state.MainTiltfileState().LastBuild().SpanID require.Contains(t, state.LogStore.SpanLog(spanID), `foo= ["bar"]`) }) err := tiltfiles.SetTiltfileArgs(f.ctx, f.ctrlClient, []string{"--foo", "baz", "--foo", "quu"}) require.NoError(t, err) f.WaitUntil("second tiltfile build finishes", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 2 }) f.withState(func(state store.EngineState) { spanID := state.MainTiltfileState().LastBuild().SpanID require.Contains(t, state.LogStore.SpanLog(spanID), `foo= ["baz", "quu"]`) }) } func TestTelemetryLogAction(t *testing.T) { f := newTestFixture(t) f.Start([]model.Manifest{}) f.store.Dispatch(store.NewLogAction(model.MainTiltfileManifestName, "0", logger.InfoLvl, nil, []byte("testing"))) f.WaitUntil("log is stored", func(state store.EngineState) bool { l := state.LogStore.ManifestLog(store.MainTiltfileManifestName) return strings.Contains(l, "testing") }) } func TestLocalResourceServeChangeCmd(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", "local_resource('foo', serve_cmd='true')") f.loadAndStart() f.WaitUntil("true is served", func(state store.EngineState) bool { return strings.Contains(state.LogStore.ManifestLog("foo"), "Starting cmd true") }) f.WriteFile("Tiltfile", "local_resource('foo', serve_cmd='false')") f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile")) f.WaitUntil("false is served", func(state store.EngineState) bool { return strings.Contains(state.LogStore.ManifestLog("foo"), "Starting cmd false") }) f.fe.RequireNoKnownProcess(t, "true") err := f.Stop() require.NoError(t, err) } func TestDefaultUpdateSettings(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Dockerfile", `FROM iron/go:prod`) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("Tiltfile", simpleTiltfile) f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { assert.Equal(t, model.DefaultUpdateSettings(), state.UpdateSettings) }) } func TestSetK8sUpsertTimeout(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Dockerfile", `FROM iron/go:prod`) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("Tiltfile", ` update_settings(k8s_upsert_timeout_secs=123) `+simpleTiltfile) f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { assert.Equal(t, 123*time.Second, state.UpdateSettings.K8sUpsertTimeout()) }) } func TestSetMaxBuildSlots(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Dockerfile", `FROM iron/go:prod`) f.WriteFile("snack.yaml", simpleYAML) f.WriteFile("Tiltfile", ` update_settings(max_parallel_updates=123) `+simpleTiltfile) f.loadAndStart() f.WaitUntil("Tiltfile loaded", func(state store.EngineState) bool { return len(state.MainTiltfileState().BuildHistory) == 1 }) f.withState(func(state store.EngineState) { assert.Equal(t, 123, state.UpdateSettings.MaxParallelUpdates()) }) } // https://github.com/tilt-dev/tilt/issues/3514 func TestTiltignoreRespectedOnError(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("a.txt", "hello") f.WriteFile("Tiltfile", `read_file('a.txt') fail('x')`) f.WriteFile(".tiltignore", "a.txt") f.Init(InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), TerminalMode: store.TerminalModeHUD, StartTime: f.Now(), }) f.WaitUntil(".tiltignore processed", func(es store.EngineState) bool { var fw v1alpha1.FileWatch err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "configs:(Tiltfile)"}, &fw) if err != nil { return false } return strings.Contains(strings.Join(fw.Spec.Ignores[0].Patterns, "\n"), "a.txt") }) f.WriteFile(".tiltignore", "a.txt\nb.txt\n") f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile")) f.WaitUntil(".tiltignore processed", func(es store.EngineState) bool { var fw v1alpha1.FileWatch err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "configs:(Tiltfile)"}, &fw) if err != nil { return false } return strings.Contains(strings.Join(fw.Spec.Ignores[0].Patterns, "\n"), "b.txt") }) err := f.Stop() assert.NoError(t, err) } func TestHandleTiltfileTriggerQueue(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", `print("hello world")`) f.Init(InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), TerminalMode: store.TerminalModeHUD, StartTime: f.Now(), }) f.WaitUntil("init action processed", func(state store.EngineState) bool { return !state.TiltStartTime.IsZero() }) f.withState(func(st store.EngineState) { assert.False(t, st.ManifestInTriggerQueue(model.MainTiltfileManifestName), "initial state should NOT have Tiltfile in trigger queue") assert.Equal(t, model.BuildReasonNone, st.MainTiltfileState().TriggerReason, "initial state should not have Tiltfile trigger reason") }) action := store.AppendToTriggerQueueAction{Name: model.MainTiltfileManifestName, Reason: 123} f.store.Dispatch(action) f.WaitUntil("Tiltfile trigger processed", func(st store.EngineState) bool { return st.ManifestInTriggerQueue(model.MainTiltfileManifestName) && st.MainTiltfileState().TriggerReason == 123 }) f.WaitUntil("Tiltfile built and trigger cleared", func(st store.EngineState) bool { return len(st.MainTiltfileState().BuildHistory) == 2 && // Tiltfile built b/c it was triggered... // and the trigger was cleared !st.ManifestInTriggerQueue(model.MainTiltfileManifestName) && st.MainTiltfileState().TriggerReason == model.BuildReasonNone }) err := f.Stop() assert.NoError(t, err) } func TestOverrideTriggerModeEvent(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foo") f.Start([]model.Manifest{manifest}) _ = f.nextCall() f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeAuto }) f.upper.store.Dispatch(server.OverrideTriggerModeAction{ ManifestNames: []model.ManifestName{"foo"}, TriggerMode: model.TriggerModeManualWithAutoInit, }) f.WaitUntilManifest("triggerMode updated", "foo", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeManualWithAutoInit }) err := f.Stop() require.NoError(t, err) f.assertAllBuildsConsumed() } func TestOverrideTriggerModeBadManifestLogsError(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foo") f.Start([]model.Manifest{manifest}) _ = f.nextCall() f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeAuto }) f.upper.store.Dispatch(server.OverrideTriggerModeAction{ ManifestNames: []model.ManifestName{"bar"}, TriggerMode: model.TriggerModeManualWithAutoInit, }) f.log.AssertEventuallyContains(t, "no such manifest", stdTimeout) err := f.Stop() require.NoError(t, err) f.assertAllBuildsConsumed() } func TestOverrideTriggerModeBadTriggerModeLogsError(t *testing.T) { f := newTestFixture(t) manifest := f.newManifest("foo") f.Start([]model.Manifest{manifest}) _ = f.nextCall() f.WaitUntilManifest("manifest has triggerMode = auto (default)", "foo", func(mt store.ManifestTarget) bool { return mt.Manifest.TriggerMode == model.TriggerModeAuto }) f.upper.store.Dispatch(server.OverrideTriggerModeAction{ ManifestNames: []model.ManifestName{"fooo"}, TriggerMode: 12345, }) f.log.AssertEventuallyContains(t, "invalid trigger mode", stdTimeout) err := f.Stop() require.NoError(t, err) f.assertAllBuildsConsumed() } func TestDisableButtonIsCreated(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", ` enable_feature('disable_resources') local_resource('foo', 'echo hi') `) f.loadAndStart() f.waitForCompletedBuildCount(1) var b v1alpha1.UIButton require.Eventually(t, func() bool { err := f.ctrlClient.Get(f.ctx, types.NamespacedName{Name: "toggle-foo-disable"}, &b) require.NoError(t, ctrlclient.IgnoreNotFound(err)) return err == nil }, time.Second, time.Millisecond) require.Equal(t, "DisableToggle", b.Annotations[v1alpha1.AnnotationButtonType]) require.Equal(t, []v1alpha1.UIInputSpec{ { Name: "action", Hidden: &v1alpha1.UIHiddenInputSpec{Value: "on"}, }, }, b.Spec.Inputs) } func TestCmdServerDoesntStartWhenDisabled(t *testing.T) { f := newTestFixture(t) f.useRealTiltfileLoader() f.WriteFile("Tiltfile", `print('dummy tiltfile with no resources')`) f.loadAndStart() f.WriteFile("Tiltfile", `print('tiltfile 1') local_resource('foo', serve_cmd='echo hi; sleep 10') local_resource('bar', 'true') config.set_enabled_resources(['bar']) `) f.fsWatcher.Events <- watch.NewFileEvent(f.JoinPath("Tiltfile")) // make sure we got to the point where we recognized the server is disabled without actually // running the command f.WaitUntil("disabled", func(state store.EngineState) bool { ds := f.localServerController.Get("foo").Status.DisableStatus return ds != nil && ds.Disabled }) require.Equal(t, f.log.String(), "") } func TestDisabledResourceRemovedFromTriggerQueue(t *testing.T) { f := newTestFixture(t) m := manifestbuilder.New(f, "foo").WithLocalResource("foo", []string{f.Path()}).Build() f.Start([]model.Manifest{m}) f.waitForCompletedBuildCount(1) f.bc.DisableForTesting() f.store.Dispatch(store.AppendToTriggerQueueAction{Name: m.Name, Reason: model.BuildReasonFlagTriggerCLI}) f.WaitUntil("in trigger queue", func(state store.EngineState) bool { return state.ManifestInTriggerQueue(m.Name) }) f.setDisableState(m.Name, true) f.WaitUntil("is removed from trigger queue", func(state store.EngineState) bool { return !state.ManifestInTriggerQueue(m.Name) }) } func TestLocalResourceNoServeCmdDeps(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("TODO(nick): fix this") } f := newTestFixture(t) f.useRealTiltfileLoader() // create a Tiltfile with 2 resources: // 1. foo - update only, i.e. a job, with a readiness_probe also defined // (which should be ignored as there's no server to be ready!) // 2. bar - local_resource w/ dep on foo f.WriteFile("Tiltfile", ` local_resource('foo', cmd='true', readiness_probe=probe(http_get=http_get_action(port=12345))) local_resource('bar', serve_cmd='while true; do echo hi; sleep 30; done', resource_deps=['foo']) `) f.loadAndStart() f.waitForCompletedBuildCount(2) f.withState(func(es store.EngineState) { require.True(t, strings.Contains(es.LogStore.ManifestLog("(Tiltfile)"), `WARNING: Ignoring readiness probe for local resource "foo" (no serve_cmd was defined)`), "Log did not contain ignored readiness probe warning") }) // foo should indicate that it has succeeded since there is no serve_cmd and thus no runtime status f.withManifestState("foo", func(ms store.ManifestState) { require.True(t, ms.RuntimeState.HasEverBeenReadyOrSucceeded()) require.Equal(t, v1alpha1.RuntimeStatusNotApplicable, ms.RuntimeState.RuntimeStatus()) }) f.WaitUntilManifestState("bar ready", "bar", func(ms store.ManifestState) bool { return ms.RuntimeState.HasEverBeenReadyOrSucceeded() && ms.RuntimeState.RuntimeStatus() == v1alpha1.RuntimeStatusOK }) } type testFixture struct { *tempdir.TempDirFixture t *testing.T ctx context.Context cancel func() clock clockwork.Clock upper Upper b *fakeBuildAndDeployer fsWatcher *fsevent.FakeMultiWatcher docker *docker.FakeClient kClient *k8s.FakeK8sClient hud hud.HeadsUpDisplay ts *hud.TerminalStream upperInitResult chan error log *bufsync.ThreadSafeBuffer store *store.Store bc *BuildController cc *configs.ConfigsController dcc *dockercompose.FakeDCClient tfl *tiltfile.FakeTiltfileLoader realTFL tiltfile.TiltfileLoader opter *tiltanalytics.FakeOpter dp *dockerprune.DockerPruner fe *cmd.FakeExecer fpm *cmd.FakeProberManager overrideMaxParallelUpdates int ctrlClient ctrlclient.Client engineMode store.EngineMode onchangeCh chan bool sessionController *session.Controller localServerController *local.ServerController execer *localexec.FakeExecer } type fixtureOptions struct { engineMode *store.EngineMode } func newTestFixture(t *testing.T, options ...fixtureOptions) *testFixture { f := tempdir.NewTempDirFixture(t) engineMode := store.EngineModeUp for _, o := range options { if o.engineMode != nil { engineMode = *o.engineMode } } base := xdg.FakeBase{Dir: f.Path()} log := bufsync.NewThreadSafeBuffer() to := tiltanalytics.NewFakeOpter(analytics.OptIn) ctx, _, ta := testutils.ForkedCtxAndAnalyticsWithOpterForTest(log, to) ctx, cancel := context.WithTimeout(ctx, 15*time.Second) cdc := controllers.ProvideDeferredClient() sch := v1alpha1.NewScheme() watcher := fsevent.NewFakeMultiWatcher() kClient := k8s.NewFakeK8sClient(t) clusterClients := cluster.NewConnectionManager() timerMaker := fsevent.MakeFakeTimerMaker(t) dockerClient := docker.NewFakeClient() fSub := fixtureSub{ch: make(chan bool, 1000)} st := store.NewStore(UpperReducer, store.LogActionsFlag(false)) require.NoError(t, st.AddSubscriber(ctx, fSub)) err := os.Mkdir(f.JoinPath(".git"), os.FileMode(0777)) if err != nil { t.Fatal(err) } clock := clockwork.NewRealClock() env := clusterid.ProductDockerDesktop podSource := podlogstream.NewPodSource(ctx, kClient, v1alpha1.NewScheme(), clock) plsc := podlogstream.NewController(ctx, cdc, sch, st, kClient, podSource, clock) au := engineanalytics.NewAnalyticsUpdater(ta, engineanalytics.CmdTags{}, engineMode) ar := engineanalytics.ProvideAnalyticsReporter(ta, st, kClient, env, feature.MainDefaults) fakeDcc := dockercompose.NewFakeDockerComposeClient(t, ctx) k8sContextPlugin := k8scontext.NewPlugin("fake-context", "default", env) versionPlugin := version.NewPlugin(model.TiltBuild{Version: "0.5.0"}) configPlugin := config.NewPlugin("up") execer := localexec.NewFakeExecer(t) extPlugin := tiltextension.NewFakePlugin( tiltextension.NewFakeExtRepoReconciler(f.Path()), tiltextension.NewFakeExtReconciler(f.Path())) ciSettingsPlugin := cisettings.NewPlugin(0) realTFL := tiltfile.ProvideTiltfileLoader(ta, k8sContextPlugin, versionPlugin, configPlugin, extPlugin, ciSettingsPlugin, fakeDcc, "localhost", execer, feature.MainDefaults, env) tfl := tiltfile.NewFakeTiltfileLoader() cc := configs.NewConfigsController(cdc) tqs := configs.NewTriggerQueueSubscriber(cdc) serverOptions, err := server.ProvideTiltServerOptionsForTesting(ctx) require.NoError(t, err) webListener, err := server.ProvideWebListener("localhost", 0) require.NoError(t, err) hudsc := server.ProvideHeadsUpServerController( nil, "tilt-default", webListener, serverOptions, &server.HeadsUpServer{}, assets.NewFakeServer(), model.WebURL{}) ns := k8s.Namespace("default") rd := kubernetesdiscovery.NewContainerRestartDetector() kdc := kubernetesdiscovery.NewReconciler(cdc, sch, clusterClients, rd, st) sw := k8swatch.NewServiceWatcher(clusterClients, ns) ewm := k8swatch.NewEventWatchManager(clusterClients, ns) tcum := cloud.NewStatusManager(httptest.NewFakeClientEmptyJSON(), clock) fe := cmd.NewFakeExecer() fpm := cmd.NewFakeProberManager() fwc := filewatch.NewController(cdc, st, watcher.NewSub, timerMaker.Maker(), v1alpha1.NewScheme(), clock) cmds := cmd.NewController(ctx, fe, fpm, cdc, st, clock, v1alpha1.NewScheme()) lsc := local.NewServerController(cdc) sr := ctrlsession.NewReconciler(cdc, st, clock) sessionController := session.NewController(sr) ts := hud.NewTerminalStream(hud.NewIncrementalPrinter(log), st) tp := prompt.NewTerminalPrompt(ta, prompt.TTYOpen, openurl.BrowserOpen, log, "localhost", model.WebURL{}) h := hud.NewFakeHud() uncached := controllers.UncachedObjects{} for _, obj := range v1alpha1.AllResourceObjects() { uncached = append(uncached, obj.(ctrlclient.Object)) } tscm, err := controllers.NewTiltServerControllerManager( serverOptions, sch, cdc, uncached) require.NoError(t, err, "Failed to create Tilt API server controller manager") pfr := apiportforward.NewReconciler(cdc, sch, st, clusterClients) wsl := server.NewWebsocketList() kar := kubernetesapply.NewReconciler(cdc, kClient, sch, docker.Env{}, st, execer) dcds := dockercomposeservice.NewDisableSubscriber(ctx, fakeDcc, clock) dcr := dockercomposeservice.NewReconciler(cdc, fakeDcc, dockerClient, st, sch, dcds) tfr := ctrltiltfile.NewReconciler(st, tfl, dockerClient, cdc, sch, engineMode, "", "", 0) tbr := togglebutton.NewReconciler(cdc, sch) extr := extension.NewReconciler(cdc, sch, ta) extrr, err := extensionrepo.NewReconciler(cdc, st, base) require.NoError(t, err) cmr := configmap.NewReconciler(cdc, st) cu := &containerupdate.FakeContainerUpdater{} lur := liveupdate.NewFakeReconciler(st, cu, cdc) dockerBuilder := build.NewDockerBuilder(dockerClient, nil) customBuilder := build.NewCustomBuilder(dockerClient, clock, cmds) kp := build.NewKINDLoader() ib := build.NewImageBuilder(dockerBuilder, customBuilder, kp) dir := dockerimage.NewReconciler(cdc, st, sch, dockerClient, ib) cir := cmdimage.NewReconciler(cdc, st, sch, dockerClient, ib) clr := cluster.NewReconciler(ctx, cdc, st, clock, clusterClients, docker.LocalEnv{}, cluster.FakeDockerClientOrError(dockerClient, nil), cluster.FakeKubernetesClientOrError(kClient, nil), wsl, base, "tilt-default") dclsr := dockercomposelogstream.NewReconciler(cdc, st, fakeDcc, dockerClient) cb := controllers.NewControllerBuilder(tscm, controllers.ProvideControllers( fwc, cmds, plsc, kdc, kar, ctrluisession.NewReconciler(cdc, wsl), ctrluiresource.NewReconciler(cdc, wsl, st), ctrluibutton.NewReconciler(cdc, wsl, st), pfr, tfr, tbr, extr, extrr, lur, cmr, dir, cir, clr, dcr, imagemap.NewReconciler(cdc, st), dclsr, sr, )) dp := dockerprune.NewDockerPruner(dockerClient) dp.DisabledForTesting(true) b := newFakeBuildAndDeployer(t, kClient, fakeDcc, cdc, kar, dcr) bc := NewBuildController(b) ret := &testFixture{ TempDirFixture: f, t: t, ctx: ctx, cancel: cancel, clock: clock, b: b, fsWatcher: watcher, docker: dockerClient, kClient: b.kClient, hud: h, ts: ts, log: log, store: st, bc: bc, onchangeCh: fSub.ch, cc: cc, dcc: fakeDcc, tfl: tfl, realTFL: realTFL, opter: to, dp: dp, fe: fe, fpm: fpm, ctrlClient: cdc, sessionController: sessionController, localServerController: lsc, engineMode: engineMode, execer: execer, } ret.disableEnvAnalyticsOpt() tc := telemetry.NewController(clock, tracer.NewSpanCollector(ctx)) podm := k8srollout.NewPodMonitor(clock) uss := uisession.NewSubscriber(cdc) urs := uiresource.NewSubscriber(cdc) subs := ProvideSubscribers(hudsc, tscm, cb, h, ts, tp, sw, bc, cc, tqs, ar, au, ewm, tcum, dp, tc, lsc, podm, sessionController, uss, urs) ret.upper, err = NewUpper(ctx, st, subs) require.NoError(t, err) go func() { err := h.Run(ctx, ret.upper.Dispatch, hud.DefaultRefreshInterval) testutils.FailOnNonCanceledErr(t, err, "hud.Run failed") }() t.Cleanup(ret.TearDown) return ret } func (f *testFixture) Now() time.Time { return f.clock.Now() } func (f *testFixture) fakeHud() *hud.FakeHud { fakeHud, ok := f.hud.(*hud.FakeHud) if !ok { f.t.Fatalf("called f.fakeHud() on a test fixure without a fakeHud (instead f.hud is of type: %T", f.hud) } return fakeHud } // starts the upper with the given manifests, bypassing normal tiltfile loading func (f *testFixture) Start(manifests []model.Manifest, initOptions ...initOption) { f.t.Helper() f.setManifests(manifests) ia := InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), TerminalMode: store.TerminalModeHUD, StartTime: f.Now(), } for _, o := range initOptions { ia = o(ia) } f.Init(ia) } func (f *testFixture) useRealTiltfileLoader() { f.tfl.Delegate = f.realTFL } func (f *testFixture) setManifests(manifests []model.Manifest) { f.tfl.Result.Manifests = manifests f.tfl.Result = f.tfl.Result.WithAllManifestsEnabled() } func (f *testFixture) setMaxParallelUpdates(n int) { f.overrideMaxParallelUpdates = n state := f.store.LockMutableStateForTesting() state.UpdateSettings = state.UpdateSettings.WithMaxParallelUpdates(n) f.store.UnlockMutableState() } func (f *testFixture) disableEnvAnalyticsOpt() { state := f.store.LockMutableStateForTesting() state.AnalyticsEnvOpt = analytics.OptDefault f.store.UnlockMutableState() } type initOption func(ia InitAction) InitAction func (f *testFixture) Init(action InitAction) { f.t.Helper() ctx, cancel := context.WithCancel(f.ctx) defer cancel() watchFiles := f.engineMode.WatchesFiles() f.upperInitResult = make(chan error, 10) go func() { err := f.upper.Init(f.ctx, action) if err != nil && err != context.Canceled { // Print this out here in case the test never completes log.Printf("upper exited: %v\n", err) f.cancel() } cancel() select { case f.upperInitResult <- err: default: fmt.Println("writing to upperInitResult would block!") panic(err) } close(f.upperInitResult) }() f.WaitUntil("tiltfile build finishes", func(st store.EngineState) bool { return !st.MainTiltfileState().LastBuild().Empty() }) state := f.store.LockMutableStateForTesting() expectedFileWatches := ctrltiltfile.ToFileWatchObjects(ctrltiltfile.WatchInputs{ TiltfileManifestName: model.MainTiltfileManifestName, Manifests: state.Manifests(), ConfigFiles: state.MainConfigPaths(), TiltfilePath: action.TiltfilePath, }, make(map[model.ManifestName]*v1alpha1.DisableSource)) if f.overrideMaxParallelUpdates > 0 { state.UpdateSettings = state.UpdateSettings.WithMaxParallelUpdates(f.overrideMaxParallelUpdates) } f.store.UnlockMutableState() f.PollUntil("watches set up", func() bool { if !watchFiles { return true } // wait for FileWatch objects to exist AND have a status indicating they're running var fwList v1alpha1.FileWatchList if err := f.ctrlClient.List(ctx, &fwList); err != nil { // If the context was canceled but the file watches haven't been set up, // that's OK. Just continue executing the rest of the test. // // If the error wasn't intended, the error will be properly // handled in TearDown(). if ctx.Done() != nil { return true } return false } remainingWatchNames := make(map[string]bool) for _, fw := range expectedFileWatches { remainingWatchNames[fw.GetName()] = true } for _, fw := range fwList.Items { if !fw.Status.MonitorStartTime.IsZero() { delete(remainingWatchNames, fw.GetName()) } } return len(remainingWatchNames) == 0 }) } func (f *testFixture) Stop() error { f.cancel() err := <-f.upperInitResult if err == context.Canceled { return nil } else { return err } } func (f *testFixture) WaitForExit() error { select { case <-time.After(stdTimeout): f.T().Fatalf("Timed out waiting for upper to exit") return nil case err := <-f.upperInitResult: return err } } func (f *testFixture) WaitForNoExit() error { select { case <-time.After(stdTimeout): return nil case err := <-f.upperInitResult: f.T().Fatalf("upper exited when it shouldn't have") return err } } func (f *testFixture) SetNextBuildError(err error) { // Before setting the nextBuildError, make sure that any in-flight builds (state.BuildStartedCount) // have hit the buildAndDeployer (f.b.buildCount); by the time we've incremented buildCount and // the fakeBaD mutex is unlocked, we've already grabbed the nextBuildError for that build, // so we can freely set it here for a future build. f.WaitUntil("any in-flight builds have hit the buildAndDeployer", func(state store.EngineState) bool { f.b.mu.Lock() defer f.b.mu.Unlock() return f.b.buildCount == state.BuildControllerStartCount }) _ = f.store.RLockState() f.b.mu.Lock() f.b.nextBuildError = err f.b.mu.Unlock() f.store.RUnlockState() } // Wait until the given view test passes. func (f *testFixture) WaitUntilHUD(msg string, isDone func(view.View) bool) { f.fakeHud().WaitUntil(f.T(), f.ctx, msg, isDone) } func (f *testFixture) WaitUntilHUDResource(msg string, name model.ManifestName, isDone func(view.Resource) bool) { f.fakeHud().WaitUntilResource(f.T(), f.ctx, msg, name, isDone) } // Wait until the given engine state test passes. func (f *testFixture) WaitUntil(msg string, isDone func(store.EngineState) bool) { f.T().Helper() ctx, cancel := context.WithTimeout(f.ctx, stdTimeout) defer cancel() isCanceled := false for { state := f.upper.store.RLockState() done := isDone(state) fatalErr := state.FatalError f.upper.store.RUnlockState() if done { return } if fatalErr != nil { f.T().Fatalf("Store had fatal error: %v", fatalErr) } if isCanceled { _, _ = fmt.Fprintf(os.Stderr, "Test canceled. Dumping engine state:\n") encoder := store.CreateEngineStateEncoder(os.Stderr) require.NoError(f.T(), encoder.Encode(state)) f.T().Fatalf("Timed out waiting for: %s", msg) } select { case <-ctx.Done(): // Let the loop run the isDone test one more time isCanceled = true case <-f.onchangeCh: } } } func (f *testFixture) withState(tf func(store.EngineState)) { state := f.upper.store.RLockState() defer f.upper.store.RUnlockState() tf(state) } func (f *testFixture) withManifestTarget(name model.ManifestName, tf func(ms store.ManifestTarget)) { f.withState(func(es store.EngineState) { mt, ok := es.ManifestTargets[name] if !ok { f.T().Fatalf("no manifest state for name %s", name) } tf(*mt) }) } func (f *testFixture) withManifestState(name model.ManifestName, tf func(ms store.ManifestState)) { f.withManifestTarget(name, func(mt store.ManifestTarget) { tf(*mt.State) }) } // Poll until the given state passes. This should be used for checking things outside // the state loop. Don't use this to check state inside the state loop. func (f *testFixture) PollUntil(msg string, isDone func() bool) { f.t.Helper() ctx, cancel := context.WithTimeout(f.ctx, stdTimeout) defer cancel() ticker := time.NewTicker(10 * time.Millisecond) for { done := isDone() if done { return } select { case <-ctx.Done(): f.T().Fatalf("Timed out waiting for: %s", msg) case <-ticker.C: } } } func (f *testFixture) WaitUntilManifest(msg string, name model.ManifestName, isDone func(store.ManifestTarget) bool) { f.t.Helper() f.WaitUntil(msg, func(es store.EngineState) bool { mt, ok := es.ManifestTargets[name] if !ok { return false } return isDone(*mt) }) } func (f *testFixture) WaitUntilManifestState(msg string, name model.ManifestName, isDone func(store.ManifestState) bool) { f.t.Helper() f.WaitUntilManifest(msg, name, func(mt store.ManifestTarget) bool { return isDone(*(mt.State)) }) } // gets the args for the next BaD call and blocks until that build is reflected in EngineState func (f *testFixture) nextCallComplete(msgAndArgs ...interface{}) buildAndDeployCall { f.t.Helper() call := f.nextCall(msgAndArgs...) f.waitForCompletedBuildCount(call.count) return call } // gets the args passed to the next call to the BaDer // note that if you're using this to block until a build happens, it only blocks until the BaDer itself finishes // so it can return before the build has actually been processed by the upper or the EngineState reflects // the completed build. // using `nextCallComplete` will ensure you block until the EngineState reflects the completed build. func (f *testFixture) nextCall(msgAndArgs ...interface{}) buildAndDeployCall { f.t.Helper() msg := "timed out waiting for BuildAndDeployCall" if len(msgAndArgs) > 0 { format := msgAndArgs[0].(string) args := msgAndArgs[1:] msg = fmt.Sprintf("%s: %s", msg, fmt.Sprintf(format, args...)) } for { select { case call := <-f.b.calls: return call case <-time.After(stdTimeout): f.T().Fatal(msg) } } } func (f *testFixture) assertNoCall(msgAndArgs ...interface{}) { f.t.Helper() msg := "expected there to be no BuildAndDeployCalls, but found one" if len(msgAndArgs) > 0 { msg = fmt.Sprintf("expected there to be no BuildAndDeployCalls, but found one: %s", msgAndArgs...) } for { select { case call := <-f.b.calls: f.T().Fatalf("%s\ncall:\n%s", msg, spew.Sdump(call)) case <-time.After(200 * time.Millisecond): return } } } func (f *testFixture) lastDeployedUID(manifestName model.ManifestName) types.UID { var manifest model.Manifest f.withManifestTarget(manifestName, func(mt store.ManifestTarget) { manifest = mt.Manifest }) result := f.b.resultsByID[manifest.K8sTarget().ID()] k8sResult, ok := result.(store.K8sBuildResult) if !ok { return "" } if len(k8sResult.DeployedRefs) > 0 { return k8sResult.DeployedRefs[0].UID } return "" } func (f *testFixture) startPod(pod *v1.Pod, manifestName model.ManifestName) { f.t.Helper() f.podEvent(pod) f.WaitUntilManifestState("pod appears", manifestName, func(ms store.ManifestState) bool { return ms.MostRecentPod().Name == pod.Name }) } func (f *testFixture) podLog(pod *v1.Pod, manifestName model.ManifestName, s string) { podID := k8s.PodID(pod.Name) f.upper.store.Dispatch(store.NewLogAction(manifestName, k8sconv.SpanIDForPod(manifestName, podID), logger.InfoLvl, nil, []byte(s+"\n"))) f.WaitUntil("pod log seen", func(es store.EngineState) bool { ms, _ := es.ManifestState(manifestName) spanID := k8sconv.SpanIDForPod(manifestName, k8s.PodID(ms.MostRecentPod().Name)) return strings.Contains(es.LogStore.SpanLog(spanID), s) }) } func (f *testFixture) restartPod(pb podbuilder.PodBuilder) podbuilder.PodBuilder { restartCount := pb.RestartCount() + 1 pb = pb.WithRestartCount(restartCount) f.podEvent(pb.Build()) f.WaitUntilManifestState("pod restart seen", pb.ManifestName(), func(ms store.ManifestState) bool { return store.AllPodContainerRestarts(ms.MostRecentPod()) == int32(restartCount) }) return pb } func (f *testFixture) notifyAndWaitForPodStatus(pod *v1.Pod, mn model.ManifestName, pred func(pod v1alpha1.Pod) bool) { f.podEvent(pod) f.WaitUntilManifestState("pod status change seen", mn, func(state store.ManifestState) bool { return pred(state.MostRecentPod()) }) } func (f *testFixture) waitForCompletedBuildCount(count int) { f.t.Helper() f.WaitUntil(fmt.Sprintf("%d builds done", count), func(state store.EngineState) bool { return state.CompletedBuildCount >= count }) } func (f *testFixture) LogLines() []string { return strings.Split(f.log.String(), "\n") } func (f *testFixture) TearDown() { if f.T().Failed() { f.withState(func(es store.EngineState) { fmt.Println(es.LogStore.String()) }) } close(f.fsWatcher.Events) close(f.fsWatcher.Errors) f.cancel() // If the test started an Init() in a goroutine, drain it. if f.upperInitResult != nil { <-f.upperInitResult } } func (f *testFixture) registerForDeployer(manifest model.Manifest) podbuilder.PodBuilder { pb := podbuilder.New(f.t, manifest) f.b.targetObjectTree[manifest.K8sTarget().ID()] = pb.ObjectTreeEntities() return pb } func (f *testFixture) podEvent(pod *v1.Pod) { f.t.Helper() for _, ownerRef := range pod.OwnerReferences { _, err := f.kClient.GetMetaByReference(f.ctx, v1.ObjectReference{ UID: ownerRef.UID, Name: ownerRef.Name, }) if err != nil { f.t.Logf("Owner reference uid[%s] name[%s] for pod[%s] does not exist in fake client", ownerRef.UID, ownerRef.Name, pod.Name) } } f.kClient.UpsertPod(pod) } func (f *testFixture) newManifest(name string) model.Manifest { iTarget := NewSanchoLiveUpdateImageTarget(f) return manifestbuilder.New(f, model.ManifestName(name)). WithK8sYAML(SanchoYAML). WithImageTarget(iTarget). Build() } func (f *testFixture) newManifestWithRef(name string, ref reference.Named) model.Manifest { refSel := container.NewRefSelector(ref) iTarget := NewSanchoLiveUpdateImageTarget(f) iTarget = iTarget.MustWithRef(refSel) return manifestbuilder.New(f, model.ManifestName(name)). WithK8sYAML(SanchoYAML). WithImageTarget(iTarget). Build() } func (f *testFixture) newDockerBuildManifestWithBuildPath(name string, path string) model.Manifest { db := v1alpha1.DockerImageSpec{DockerfileContents: "FROM alpine", Context: path} iTarget := NewSanchoDockerBuildImageTarget(f).WithDockerImage(db) iTarget = iTarget.MustWithRef(container.MustParseSelector(strings.ToLower(name))) // each target should have a unique ID return manifestbuilder.New(f, model.ManifestName(name)). WithK8sYAML(SanchoYAML). WithImageTarget(iTarget). Build() } func (f *testFixture) assertAllBuildsConsumed() { f.t.Helper() close(f.b.calls) for call := range f.b.calls { f.T().Fatalf("Build not consumed: %s", spew.Sdump(call)) } } func (f *testFixture) loadAndStart(initOptions ...initOption) { f.t.Helper() ia := InitAction{ TiltfilePath: f.JoinPath("Tiltfile"), TerminalMode: store.TerminalModeHUD, StartTime: f.Now(), } for _, opt := range initOptions { ia = opt(ia) } f.Init(ia) } func (f *testFixture) WriteConfigFiles(args ...string) { f.t.Helper() if (len(args) % 2) != 0 { f.T().Fatalf("WriteConfigFiles needs an even number of arguments; got %d", len(args)) } for i := 0; i < len(args); i += 2 { filename := f.JoinPath(args[i]) contents := args[i+1] f.WriteFile(filename, contents) // Fire an FS event thru the normal pipeline, so that manifests get marked dirty. f.fsWatcher.Events <- watch.NewFileEvent(filename) } } func (f *testFixture) setupDCFixture() (redis, server model.Manifest) { dcp := filepath.Join(originalWD, "testdata", "fixture_docker-config.yml") dcpc, err := os.ReadFile(dcp) if err != nil { f.T().Fatal(err) } f.WriteFile("docker-compose.yml", string(dcpc)) dfp := filepath.Join(originalWD, "testdata", "server.dockerfile") dfc, err := os.ReadFile(dfp) if err != nil { f.T().Fatal(err) } f.WriteFile("Dockerfile", string(dfc)) f.WriteFile("Tiltfile", `docker_compose('docker-compose.yml')`) f.dcc.WorkDir = f.Path() f.dcc.ConfigOutput = string(dcpc) tlr := f.realTFL.Load(f.ctx, apitiltfile.MainTiltfile(f.JoinPath("Tiltfile"), nil), nil) if tlr.Error != nil { f.T().Fatal(tlr.Error) } if len(tlr.Manifests) != 2 { f.T().Fatalf("Expected two manifests. Actual: %v", tlr.Manifests) } for _, m := range tlr.Manifests { require.NoError(f.t, m.InferImageProperties()) } return tlr.Manifests[0], tlr.Manifests[1] } func (f *testFixture) setBuildLogOutput(id model.TargetID, output string) { f.b.buildLogOutput[id] = output } func (f *testFixture) hudResource(name model.ManifestName) view.Resource { res, ok := f.fakeHud().LastView.Resource(name) if !ok { f.T().Fatalf("Resource not found: %s", name) } return res } func (f *testFixture) completeBuildForManifest(m model.Manifest) { f.b.completeBuild(targetIDStringForManifest(m)) } func (f *testFixture) setDisableState(mn model.ManifestName, isDisabled bool) { err := tiltconfigmap.UpsertDisableConfigMap(f.ctx, f.ctrlClient, fmt.Sprintf("%s-disable", mn), "isDisabled", isDisabled) require.NoError(f.t, err) f.WaitUntil("new disable state reflected in UIResource", func(state store.EngineState) bool { if uir, ok := state.UIResources[mn.String()]; ok { return uir.Status.DisableStatus.DisabledCount > 0 == isDisabled } return false }) } type fixtureSub struct { ch chan bool } func (s fixtureSub) OnChange(ctx context.Context, st store.RStore, _ store.ChangeSummary) error { s.ch <- true return nil } func (f *testFixture) ensureCluster() { f.ensureClusterNamed(v1alpha1.ClusterNameDefault) } func (f *testFixture) ensureClusterNamed(name string) { f.t.Helper() err := f.ctrlClient.Create(f.ctx, &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: v1alpha1.ClusterSpec{ Connection: &v1alpha1.ClusterConnection{ Kubernetes: &v1alpha1.KubernetesClusterConnection{}, }, }, }) require.NoError(f.T(), err) } func assertLineMatches(t *testing.T, lines []string, re *regexp.Regexp) { for _, line := range lines { if re.MatchString(line) { return } } t.Fatalf("Expected line to match: %s. Lines: %v", re.String(), lines) } func assertContainsOnce(t *testing.T, s string, val string) { assert.Contains(t, s, val) assert.Equal(t, 1, strings.Count(s, val), "Expected string to appear only once") } // stringifyTargetIDs attempts to make a unique string to identify any set of targets // (order-agnostic) by sorting and then concatenating the target IDs. func stringifyTargetIDs(targets []model.TargetSpec) string { ids := make([]string, len(targets)) for i, t := range targets { ids[i] = t.ID().String() } sort.Strings(ids) return strings.Join(ids, "::") } func targetIDStringForManifest(m model.Manifest) string { return stringifyTargetIDs(m.TargetSpecs()) }
package parser import ( "strings" "testing" ) func TestParse(t *testing.T) { md := ` --- title: "Front Matters" description: "It really does" --- This is some summary. This is some summary. This is some summary. This is some summary. <!--more--> ### Title End value ` info, err := Parse(strings.NewReader(md)) if err != nil { t.Fatalf("happp error: %v", err) } t.Logf("headers: %#v", info.Matters) t.Logf("content: %s", info.Content) }
package main import ( "net/http" "os" "strings" "github.com/gin-contrib/multitemplate" "github.com/gin-gonic/gin" _ "github.com/heroku/x/hmetrics/onload" "github.com/mattn/go-zglob" . "go_heroku_test/controllers" "go_heroku_test/db" ) func main() { db.Init() defer db.Close() port := os.Getenv("PORT") router := gin.New() router.Use(gin.Logger()) router.Use(headersByRequestURI()) router.Static("/assets", "./dst/assets") router.HTMLRender = loadTemplates() router.GET("/", func(c *gin.Context) { c.HTML(http.StatusOK, "index.html", nil) }) router.GET("/ping", func(c *gin.Context) { c.JSON(200, gin.H{ "message": "pong", }) }) router.GET("/users", UsersHandler) router.GET("/works", WorksHandler) if port == "" { router.Run() } else { router.Run(":" + port) } } func headersByRequestURI() gin.HandlerFunc { return func(c *gin.Context) { if strings.HasPrefix(c.Request.RequestURI, "/assets/") { c.Header("Cache-Control", "max-age=31536000") } } } func loadTemplates() multitemplate.Renderer { r := multitemplate.NewRenderer() layouts, err := zglob.Glob("dst/tmpl/layouts/**/*.html") if err != nil { panic(err.Error()) } // Move base.html to head n := 0 for _, l := range layouts { if l != "dst/tmpl/layouts/base.html" { layouts[n] = l n++ } } layouts = append([]string{"dst/tmpl/layouts/base.html"}, layouts[:n]...) includes, err := zglob.Glob("dst/tmpl/includes/**/*.html") if err != nil { panic(err.Error()) } // Generate our templates map from our layouts/ and includes/ directories for _, include := range includes { layoutCopy := make([]string, len(layouts)) copy(layoutCopy, layouts) files := append(layoutCopy, include) r.AddFromFiles(strings.Replace(include, "dst/tmpl/includes/", "", -1), files...) } return r }
package rod_test import ( "errors" "fmt" "net/http" "os" "os/exec" "runtime" "testing" "time" "github.com/go-rod/rod" "github.com/go-rod/rod/lib/cdp" "github.com/go-rod/rod/lib/devices" "github.com/go-rod/rod/lib/launcher" "github.com/go-rod/rod/lib/proto" "github.com/go-rod/rod/lib/utils" "github.com/ysmood/gson" ) func TestIncognito(t *testing.T) { g := setup(t) k := g.RandStr(16) b := g.browser.MustIncognito().Sleeper(rod.DefaultSleeper) defer b.MustClose() page := b.MustPage(g.blank()) defer page.MustClose() page.MustEval(`k => localStorage[k] = 1`, k) g.True(g.page.MustNavigate(g.blank()).MustEval(`k => localStorage[k]`, k).Nil()) g.Eq(page.MustEval(`k => localStorage[k]`, k).Str(), "1") // localStorage can only store string g.Panic(func() { g.mc.stubErr(1, proto.TargetCreateBrowserContext{}) g.browser.MustIncognito() }) } func TestBrowserResetControlURL(_ *testing.T) { rod.New().ControlURL("test").ControlURL("") } func TestDefaultDevice(t *testing.T) { g := setup(t) ua := "" s := g.Serve() s.Mux.HandleFunc("/t", func(rw http.ResponseWriter, r *http.Request) { ua = r.Header.Get("User-Agent") }) // TODO: https://github.com/golang/go/issues/51459 b := *g.browser b.DefaultDevice(devices.IPhoneX) b.MustPage(s.URL("/t")).MustClose() g.Eq(ua, devices.IPhoneX.UserAgentEmulation().UserAgent) b.NoDefaultDevice() b.MustPage(s.URL("/t")).MustClose() g.Neq(ua, devices.IPhoneX.UserAgentEmulation().UserAgent) } func TestPageErr(t *testing.T) { g := setup(t) g.Panic(func() { g.mc.stubErr(1, proto.TargetAttachToTarget{}) g.browser.MustPage() }) } func TestPageFromTarget(t *testing.T) { g := setup(t) g.Panic(func() { res, err := proto.TargetCreateTarget{URL: "about:blank"}.Call(g.browser) g.E(err) defer func() { g.browser.MustPageFromTargetID(res.TargetID).MustClose() }() g.mc.stubErr(1, proto.EmulationSetDeviceMetricsOverride{}) g.browser.MustPageFromTargetID(res.TargetID) }) } func TestBrowserPages(t *testing.T) { g := setup(t) b := g.browser pages := b.MustPages() g.Gte(len(pages), 1) { g.mc.stub(1, proto.TargetGetTargets{}, func(send StubSend) (gson.JSON, error) { d, _ := send() return *d.Set("targetInfos.0.type", "iframe"), nil }) b.MustPages() } g.Panic(func() { g.mc.stubErr(1, proto.TargetCreateTarget{}) b.MustPage() }) g.Panic(func() { g.mc.stubErr(1, proto.TargetGetTargets{}) b.MustPages() }) g.Panic(func() { _, err := proto.TargetCreateTarget{URL: "about:blank"}.Call(b) g.E(err) g.mc.stubErr(1, proto.TargetAttachToTarget{}) b.MustPages() }) } func TestBrowserClearStates(t *testing.T) { g := setup(t) g.E(proto.EmulationClearGeolocationOverride{}.Call(g.page)) } func TestBrowserEvent(t *testing.T) { g := setup(t) messages := g.browser.Context(g.Context()).Event() p := g.newPage() wait := make(chan struct{}) for msg := range messages { e := proto.TargetAttachedToTarget{} if msg.Load(&e) { g.Eq(e.TargetInfo.TargetID, p.TargetID) close(wait) break } } <-wait } func TestBrowserWaitEvent(t *testing.T) { g := setup(t) g.NotNil(g.browser.Context(g.Context()).Event()) wait := g.page.WaitEvent(proto.PageFrameNavigated{}) g.page.MustNavigate(g.blank()) wait() wait = g.browser.EachEvent(func(e *proto.PageFrameNavigated, id proto.TargetSessionID) bool { return true }) g.page.MustNavigate(g.blank()) wait() } func TestBrowserCrash(t *testing.T) { g := setup(t) browser := rod.New().Context(g.Context()).MustConnect() page := browser.MustPage() js := `() => new Promise(r => setTimeout(r, 10000))` go g.Panic(func() { page.MustEval(js) }) utils.Sleep(0.2) _ = proto.BrowserCrash{}.Call(browser) utils.Sleep(0.3) _, err := page.Eval(js) g.Has(err.Error(), "use of closed network connection") } func TestBrowserCall(t *testing.T) { g := setup(t) v, err := proto.BrowserGetVersion{}.Call(g.browser) g.E(err) g.Regex("1.3", v.ProtocolVersion) } func TestBlockingNavigation(t *testing.T) { g := setup(t) /* Navigate can take forever if a page doesn't response. If one page is blocked, other pages should still work. */ s := g.Serve() pause := g.Context() s.Mux.HandleFunc("/a", func(w http.ResponseWriter, r *http.Request) { <-pause.Done() }) s.Route("/b", ".html", `<html>ok</html>`) blocked := g.newPage() go func() { g.Panic(func() { blocked.MustNavigate(s.URL("/a")) }) }() utils.Sleep(0.3) g.newPage(s.URL("/b")) } func TestResolveBlocking(t *testing.T) { g := setup(t) s := g.Serve() pause := g.Context() s.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { <-pause.Done() }) p := g.newPage() go func() { utils.Sleep(0.1) p.MustStopLoading() }() g.Panic(func() { p.MustNavigate(s.URL()) }) } func TestTestTry(t *testing.T) { g := setup(t) g.Nil(rod.Try(func() {})) err := rod.Try(func() { panic(1) }) var errVal *rod.ErrTry g.True(errors.As(err, &errVal)) g.Is(err, &rod.ErrTry{}) g.Eq(errVal.Unwrap().Error(), "1") g.Eq(1, errVal.Value) g.Has(errVal.Error(), "error value: 1\ngoroutine") errVal = rod.Try(func() { panic(errors.New("t")) }).(*rod.ErrTry) g.Eq(errVal.Unwrap().Error(), "t") } func TestBrowserOthers(t *testing.T) { g := setup(t) g.browser.Timeout(time.Second).CancelTimeout().MustGetCookies() } func TestBinarySize(t *testing.T) { g := setup(t) if runtime.GOOS == "windows" || utils.InContainer { g.SkipNow() } cmd := exec.Command("go", "build", "-trimpath", "-ldflags", "-w -s", "-o", "tmp/translator", "./lib/examples/translator") cmd.Env = append(os.Environ(), "GOOS=linux") g.Nil(cmd.Run()) stat, err := os.Stat("tmp/translator") g.E(err) g.Lte(float64(stat.Size())/1024/1024, 11) // mb } func TestBrowserCookies(t *testing.T) { g := setup(t) b := g.browser.MustIncognito() defer b.MustClose() b.MustSetCookies(&proto.NetworkCookie{ Name: "a", Value: "val", Domain: "test.com", }) cookies := b.MustGetCookies() g.Len(cookies, 1) g.Eq(cookies[0].Name, "a") g.Eq(cookies[0].Value, "val") { b.MustSetCookies() cookies := b.MustGetCookies() g.Len(cookies, 0) } g.mc.stubErr(1, proto.StorageGetCookies{}) g.Err(b.GetCookies()) } func TestWaitDownload(t *testing.T) { g := setup(t) s := g.Serve() content := "test content" s.Route("/d", ".bin", []byte(content)) s.Route("/page", ".html", fmt.Sprintf(`<html><a href="%s/d" download>click</a></html>`, s.URL())) page := g.page.MustNavigate(s.URL("/page")) wait := g.browser.MustWaitDownload() page.MustElement("a").MustClick() data := wait() g.Eq(content, string(data)) } func TestWaitDownloadDataURI(t *testing.T) { g := setup(t) s := g.Serve() s.Route("/", ".html", `<html> <a id="a" href="data:text/plain;,test%20data" download>click</a> <a id="b" download>click</a> <script> const b = document.getElementById('b') b.href = URL.createObjectURL(new Blob(['test blob'], { type: "text/plain; charset=utf-8" })) </script> </html>`, ) page := g.page.MustNavigate(s.URL()) wait1 := g.browser.MustWaitDownload() page.MustElement("#a").MustClick() data := wait1() g.Eq("test data", string(data)) wait2 := g.browser.MustWaitDownload() page.MustElement("#b").MustClick() data = wait2() g.Eq("test blob", string(data)) } func TestWaitDownloadCancel(t *testing.T) { g := setup(t) wait := g.browser.Context(g.Timeout(0)).WaitDownload(os.TempDir()) g.Eq(wait(), (*proto.PageDownloadWillBegin)(nil)) } func TestWaitDownloadFromNewPage(t *testing.T) { g := setup(t) s := g.Serve() content := "test content" s.Route("/d", ".bin", content) s.Route("/page", ".html", fmt.Sprintf( `<html><a href="%s/d" download target="_blank">click</a></html>`, s.URL()), ) page := g.page.MustNavigate(s.URL("/page")) wait := g.browser.MustWaitDownload() page.MustElement("a").MustClick() data := wait() g.Eq(content, string(data)) } func TestBrowserConnectErr(t *testing.T) { g := setup(t) g.Panic(func() { rod.New().ControlURL(g.RandStr(16)).MustConnect() }) } func TestStreamReader(t *testing.T) { g := setup(t) r := rod.NewStreamReader(g.page, "") g.mc.stub(1, proto.IORead{}, func(send StubSend) (gson.JSON, error) { return gson.New(proto.IOReadResult{ Data: "test", }), nil }) b := make([]byte, 4) _, _ = r.Read(b) g.Eq("test", string(b)) g.mc.stubErr(1, proto.IORead{}) _, err := r.Read(nil) g.Err(err) g.mc.stub(1, proto.IORead{}, func(send StubSend) (gson.JSON, error) { return gson.New(proto.IOReadResult{ Base64Encoded: true, Data: "@", }), nil }) _, err = r.Read(nil) g.Err(err) } func TestBrowserConnectFailure(t *testing.T) { g := setup(t) c := g.Context() c.Cancel() err := rod.New().Context(c).Connect() if err == nil { g.Fatal("expected an error on connect failure") } } func TestBrowserPool(_ *testing.T) { pool := rod.NewBrowserPool(3) create := func() *rod.Browser { return rod.New().MustConnect() } b := pool.Get(create) pool.Put(b) pool.Cleanup(func(p *rod.Browser) { p.MustClose() }) } func TestOldBrowser(t *testing.T) { t.Skip() g := setup(t) u := launcher.New().Revision(686378).MustLaunch() b := rod.New().ControlURL(u).MustConnect() g.Cleanup(b.MustClose) res, err := proto.BrowserGetVersion{}.Call(b) g.E(err) g.Eq(res.Revision, "@19d4547535ab5aba70b4730443f84e8153052174") } func TestBrowserLostConnection(t *testing.T) { g := setup(t) l := launcher.New() p := rod.New().ControlURL(l.MustLaunch()).MustConnect().MustPage(g.blank()) go func() { utils.Sleep(1) l.Kill() }() _, err := p.Eval(`() => new Promise(r => {})`) g.Err(err) } func TestBrowserConnectConflict(t *testing.T) { g := setup(t) g.Panic(func() { rod.New().Client(&cdp.Client{}).ControlURL("test").MustConnect() }) }
package freelearning import ( "bytes" "testing" ) func TestParseBooks(t *testing.T) { // Some book titles title1, title2 := "title1", "title2" // The source for a buffer/reader source := title1 + "\n" + title2 + "\n" // Create a buffer buff := bytes.NewBufferString(source) // Parse books from the source if books, err := ParseBooks(buff); err != nil { t.Error("Unexpected error:", err.Error()) } else if num := len(books.books); num != 2 { t.Errorf("Expected 2 books but found %d", num) } //if } //TestParseBooks func TestParseLine(t *testing.T) { // Some lines line1, line2, line3 := "This is a line\n", "This is a line\r\n", "This is a line" // Create a buffer buff := bytes.NewBufferString(line1) // Read the line if line, err := parseLine(buff); err != nil { t.Error("Unexpected error:", err.Error()) } else if line != "This is a line" { t.Errorf(`Expected line to be "This is a line" but was "%s"`, line) } //if // Create a buffer buff = bytes.NewBufferString(line2) // Read the line if line, err := parseLine(buff); err != nil { t.Error("Unexpected error:", err.Error()) } else if line != "This is a line" { t.Errorf(`Expected line to be "This is a line" but was "%s"`, line) } //if // Create a buffer buff = bytes.NewBufferString(line3) // Read the line if line, err := parseLine(buff); err == nil { t.Error("Expected an EOF error") } else if line != "This is a line" { t.Errorf(`Expected line to be "This is a line" but was "%s"`, line) } //if } //TestParseLine func TestNewBooks(t *testing.T) { // Create an empty Books books := NewBooks() if len(books.books) != 0 { t.Error("Expected books to be empty") } //if if books == nil { t.Error("Expected books to not be nil") } //if } //TestNewBooks func TestBooksLen(t *testing.T) { // Create some books books := NewBooks() if s, l := len(books.books), books.Len(); s != l { t.Errorf("Expected slice len (%d) to equal Books.Len (%d)", s, l) } //if } //TestBooksLen func TestBooksIndexOf(t *testing.T) { // Some book titles to add title1, title2 := "title1", "title2" // Create some books books := NewBooks(title1) // Add a book books.Add(title1) if idx := books.indexOf(title1); idx != 0 { t.Errorf(`Expected index of "%s" to be 0 but was %d`, title1, idx) } //if if idx := books.indexOf(title2); idx != -1 { t.Errorf(`Expected index of "%s" to be -1 but was %d`, title2, idx) } //if } //TestBooksIndexOf func TestBooksHas(t *testing.T) { // Some book titles to add title1, title2 := "title1", "title2" // Create some books books := NewBooks(title1) // Check if it has title1 if !books.Has(title1) { t.Errorf(`Expected books to contain "%s"`, title1) } //if // Check if it has title2 if books.Has(title2) { t.Errorf(`Expected books to not contain "%s"`, title2) } //if } //TestBooksHas func TestBooksAdd(t *testing.T) { // Some book titles to add title1 := "title1" // Create some books books := NewBooks() // Add a book books.Add(title1) // Check the count if num := books.Len(); num != 1 { t.Fatalf("Expected 1 book but there were %d", num) } //if // Make sure books contains the new book if !books.Has(title1) { t.Errorf(`Expected books to contain "%s"`, title1) } //if // Make sure a "double add" is not permitted books.Add(title1) // Check the number of books if num := books.Len(); num != 1 { t.Error("Expected books to prevent a double add") } //if } //TestBooksAdd func TestBooksRemove(t *testing.T) { // Some book titles to add title1, title2 := "title1", "title2" // Create some books books := NewBooks(title1, title2) // Remove title2 books.Remove(title2) // Check if it has title1 if !books.Has(title1) { t.Errorf(`Expected books to contain "%s"`, title1) } //if // Check if it has title2 if books.Has(title2) { t.Errorf(`Expected books to not contain "%s"`, title2) } //if // Make sure a bad remove is a no-op books.Remove("Does not exist") if num := books.Len(); num != 1 { t.Error("Expected removal of nonexistent book to have no effect") } //if } //TestBooksRemove func TestBooksSave(t *testing.T) { // Some book titles title1, title2 := "title1", "title2" // Create the expected title expected := title1 + "\n" + title2 + "\n" // Create a buffer var buff bytes.Buffer // Create books books := NewBooks(title1, title2) if err := books.Save(&buff); err != nil { t.Error("Unexpected error:", err.Error()) } else if actual := buff.String(); actual != expected { t.Errorf(`Expected buffer to contain "%s" but was "%s"`, expected, actual) } //if } //TestBooksSave
package main import ( "log" "time" mcp "github.com/ardnew/mcp2221a" ) func main() { m, err := mcp.New(0, mcp.VID, mcp.PID) if nil != err { log.Fatalf("Open(): %v", err) } defer m.Close() log.Print(mcp.PackageVersion()) // reset device to default settings stored in flash memory if err := m.Reset(5 * time.Second); nil != err { log.Fatalf("Reset(): %v", err) } // configure I2C module to use default baud rate (optional) if err := m.I2C.SetConfig(mcp.I2CBaudRate); nil != err { log.Fatalf("I2C.SetConfig(): %v", err) } // identify all I2C slaves on the bus, printing their slave address if addr, err := m.I2C.Scan(mcp.I2CMinAddr, mcp.I2CMaxAddr); nil != err { log.Fatalf("I2C.Scan(): %v", err) } else { for _, a := range addr { log.Printf("scan found = 0x%02X", a) } } // -- DEVICE SPECIFIC -- // read the 16-bit data from device ID register (0xFF) from an INA260 power // sensor at default slave address (0x40) if buf, err := m.I2C.ReadReg(0x40, 0xFF, 2); nil != err { log.Fatalf("I2C.ReadReg(): %v", err) } else { // parse the data received, packing it into a 16-bit unsigned int. the // INA260 returns data MSB-first. var ub uint16 = (uint16(buf[0]) << 8) | uint16(buf[1]) rev := ub & 0x0F // revision is 4 bits (LSB) die := ub >> 4 // device ID is remaining 12 bits log.Printf("Revision = %3d {0x%4X} [0b%16b]", rev, rev, rev) log.Printf("Device ID = %3d {0x%4X} [0b%16b]", die, die, die) } }
package main import "fmt" func main() { i := 0 fmt.Println("numeros impares entre el 1 y el 50: ") for { i++ if i%2 == 0 { continue /* continue manda la ejecucion al principio del loop for, en este caso cada vez que el numero fuese par mandaria al principio sin imprimir el numero, sumaria uno y volveria a chequear este if */ } if i >= 50 { break // break sale del loop for y sigue ejecutando lo que hay despues } fmt.Println(i) } }
package commands type leftovers interface { Delete(filter string, regex bool) error DeleteByType(filter, rType string, regex bool) error List(filter string, regex bool) ListByType(filter, rType string, regex bool) Types() }
package main import ( "bufio" "fmt" "net" "strings" ) // handleConnection ascolta la connessione per delle keyword che permettono // di interagire con il database in memory func handleConnection(conn net.Conn) { scanner := bufio.NewScanner(conn) for scanner.Scan() { cmd := strings.Split(scanner.Text(), " ") cmdLen := len(cmd) if cmdLen < 1 { fmt.Printf("Command %v is invalid.\n\r", cmd) } switch cmd[0] { case "SET": if cmdLen < 3 { fmt.Printf("Command %v is invalid.\n\r", cmd) } else { memDB[cmd[1]] = cmd[2] fmt.Fprintf(conn, "set %v = %v\n\r", cmd[1], memDB[cmd[1]]) } case "GET": if cmdLen < 2 { fmt.Printf("Command %v is invalid.\n\r", cmd) } else { value, ok := memDB[cmd[1]] if !ok { fmt.Fprintf(conn, "value %v is not in DB\n\r", cmd[1]) } else { fmt.Fprintf(conn, "%v", value) } } case "DEL": if cmdLen < 2 { fmt.Printf("Command %v is invalid.\n\r", cmd) } else { if _, ok := memDB[cmd[1]]; !ok { fmt.Fprintf(conn, "value %v is not in DB\n\r", cmd[1]) } else { delete(memDB, cmd[1]) fmt.Fprintf(conn, "deleted key %v\n\r", cmd[1]) } } case "PRINT": for k, v := range memDB { fmt.Fprintf(conn, "%v, %v\n\r", k, v) } default: fmt.Fprintf(conn, "Command %v is invalid.\n\r", cmd) fmt.Printf("Command %v is invalid.\n\r", scanner.Text()) } } } func init() { memDB = make(map[string]string) } var memDB map[string]string func main() { listener, err := net.Listen("tcp", ":8080") if err != nil { panic(err) } fmt.Println("Server started") for { conn, err := listener.Accept() if err != nil { panic(err) } go func() { defer conn.Close() handleConnection(conn) }() } fmt.Println("Server exited") }
package merge_test import ( "fmt" "testing" "github.com/pavelnikolov/algorithms/algorithms/sorting/merge" "github.com/pavelnikolov/algorithms/algorithms/sorting/sorttest" ) func TestMergeSort(t *testing.T) { sorttest.Test(t, merge.Sort) } func BenchmarkMergeSort100(b *testing.B) { sorttest.Benchmark(b, 100, merge.Sort) } func BenchmarkMergeSort1000(b *testing.B) { sorttest.Benchmark(b, 1000, merge.Sort) } func BenchmarkMergeSort10000(b *testing.B) { sorttest.Benchmark(b, 10000, merge.Sort) } func BenchmarkMergeSort100000(b *testing.B) { sorttest.Benchmark(b, 100000, merge.Sort) } func TestMerge(t *testing.T) { data := []struct { name string a1 []int a2 []int }{ { name: "one_to_ten", a1: []int{1, 3, 5, 7, 9}, a2: []int{2, 4, 6, 8, 10}, }, { name: "a1_empty", a1: []int{}, a2: []int{2, 4, 6, 8, 10}, }, { name: "a2_empty", a1: []int{1, 3, 5, 7, 9}, a2: []int{}, }, } for _, d := range data { a := merge.Merge(d.a1, d.a2) for i := 0; i < len(a)-2; i++ { if a[i] > a[i+1] { fmt.Println(a) t.Errorf("a[%d](%d) > a[%d](%d), expected to be less than or equal to", i, a[i], i+1, a[i+1]) t.FailNow() } } } }
package datasource_test import ( "context" "encoding/json" "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/xray" "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/x-ray-datasource/pkg/datasource" "github.com/stretchr/testify/require" ) type XrayClientMock struct { queryCalledWithRegion string } func (client *XrayClientMock) GetServiceGraphPagesWithContext(ctx aws.Context, input *xray.GetServiceGraphInput, fn func(*xray.GetServiceGraphOutput, bool) bool, opts ...request.Option) error { serviceName := "mockServiceName" if client.queryCalledWithRegion != "" { serviceName = serviceName + "-" + client.queryCalledWithRegion } output := &xray.GetServiceGraphOutput{ NextToken: nil, Services: []*xray.Service{ { Name: aws.String(serviceName), AccountId: aws.String("testAccount1"), }, { Name: aws.String(serviceName + "2"), AccountId: aws.String("testAccount2"), }, }, } fn(output, false) return nil } func (client *XrayClientMock) GetTraceGraphPages(input *xray.GetTraceGraphInput, fn func(*xray.GetTraceGraphOutput, bool) bool) error { output := &xray.GetTraceGraphOutput{ NextToken: nil, Services: []*xray.Service{ {}, }, } fn(output, false) return nil } func makeSummary(region string) *xray.TraceSummary { http := &xray.Http{ ClientIp: aws.String("127.0.0.1"), HttpMethod: aws.String("GET"), HttpStatus: aws.Int64(200), HttpURL: aws.String("localhost"), } annotations := make(map[string][]*xray.ValueWithServiceIds) annotations["foo"] = []*xray.ValueWithServiceIds{{ AnnotationValue: &xray.AnnotationValue{}, ServiceIds: []*xray.ServiceId{}, }, { AnnotationValue: &xray.AnnotationValue{}, ServiceIds: []*xray.ServiceId{}, }} annotations["bar"] = []*xray.ValueWithServiceIds{{ AnnotationValue: &xray.AnnotationValue{}, ServiceIds: []*xray.ServiceId{}, }} traceId := "id1" if region != "" { traceId = "id-" + region } return &xray.TraceSummary{ Annotations: annotations, Duration: aws.Float64(10.5), Http: http, Id: aws.String(traceId), ErrorRootCauses: []*xray.ErrorRootCause{ { ClientImpacting: nil, Services: []*xray.ErrorRootCauseService{ { Name: aws.String("service_name_1"), Type: aws.String("service_type_1"), EntityPath: []*xray.ErrorRootCauseEntity{ { Exceptions: []*xray.RootCauseException{ { Name: aws.String("Test exception"), Message: aws.String("Test exception message"), }, }, }, }, }, }, }, }, FaultRootCauses: []*xray.FaultRootCause{ { ClientImpacting: nil, Services: []*xray.FaultRootCauseService{ { Name: aws.String("faulty_service_name_1"), Type: aws.String("faulty_service_type_1"), EntityPath: []*xray.FaultRootCauseEntity{ { Exceptions: []*xray.RootCauseException{ { Name: aws.String("Test fault"), Message: aws.String("Test fault message"), }, }, }, }, }, }, }, }, ResponseTimeRootCauses: []*xray.ResponseTimeRootCause{ { ClientImpacting: nil, Services: []*xray.ResponseTimeRootCauseService{ { Name: aws.String("response_service_name_1"), Type: aws.String("response_service_type_1"), EntityPath: []*xray.ResponseTimeRootCauseEntity{ {Name: aws.String("response_service_name_1")}, {Name: aws.String("response_sub_service_name_1")}, }, }, { Name: aws.String("response_service_name_2"), Type: aws.String("response_service_type_2"), EntityPath: []*xray.ResponseTimeRootCauseEntity{ {Name: aws.String("response_service_name_2")}, {Name: aws.String("response_sub_service_name_2")}, }, }, }, }, }, } } func (client *XrayClientMock) GetTraceSummariesPages(input *xray.GetTraceSummariesInput, fn func(*xray.GetTraceSummariesOutput, bool) bool) error { resp, err := client.GetTraceSummariesWithContext(context.Background(), input) fn(resp, true) return err } func (client *XrayClientMock) GetTraceSummariesWithContext(ctx aws.Context, input *xray.GetTraceSummariesInput, opts ...request.Option) (*xray.GetTraceSummariesOutput, error) { // To make sure we don't panic in this case. nilHttpSummary := makeSummary(client.queryCalledWithRegion) nilHttpSummary.Http.ClientIp = nil nilHttpSummary.Http.HttpURL = nil nilHttpSummary.Http.HttpMethod = nil nilHttpSummary.Http.HttpStatus = nil output := &xray.GetTraceSummariesOutput{ ApproximateTime: aws.Time(time.Now()), TraceSummaries: []*xray.TraceSummary{makeSummary(client.queryCalledWithRegion), nilHttpSummary}, } return output, nil } func (client *XrayClientMock) BatchGetTraces(input *xray.BatchGetTracesInput) (*xray.BatchGetTracesOutput, error) { if *input.TraceIds[0] == "notFound" { return &xray.BatchGetTracesOutput{ Traces: []*xray.Trace{}, }, nil } traceId := "trace1" if client.queryCalledWithRegion != "" { traceId = traceId + "-" + client.queryCalledWithRegion } return &xray.BatchGetTracesOutput{ Traces: []*xray.Trace{{ Duration: aws.Float64(1.0), Id: aws.String(traceId), Segments: []*xray.Segment{ { Id: aws.String("segment1"), Document: aws.String("{}"), }, }, }}, }, nil } func (client *XrayClientMock) GetTimeSeriesServiceStatisticsPagesWithContext(context aws.Context, input *xray.GetTimeSeriesServiceStatisticsInput, fn func(*xray.GetTimeSeriesServiceStatisticsOutput, bool) bool, options ...request.Option) error { firstRow := 0 if client.queryCalledWithRegion != "" { firstRow = 13 } output := &xray.GetTimeSeriesServiceStatisticsOutput{ TimeSeriesServiceStatistics: []*xray.TimeSeriesServiceStatistics{ makeTimeSeriesRow(firstRow, Edge), makeTimeSeriesRow(1, Edge), makeTimeSeriesRow(2, Service), }, } fn(output, false) return nil } const insightSummary = "some text. some more." func (client *XrayClientMock) GetInsightSummaries(input *xray.GetInsightSummariesInput) (*xray.GetInsightSummariesOutput, error) { return &xray.GetInsightSummariesOutput{ InsightSummaries: []*xray.InsightSummary{ { Summary: aws.String(insightSummary), StartTime: aws.Time(time.Date(2020, 6, 20, 1, 0, 1, 0, time.UTC)), EndTime: aws.Time(time.Date(2020, 6, 20, 1, 20, 1, 0, time.UTC)), State: aws.String("CLOSED"), Categories: aws.StringSlice([]string{"FAULT", "ERROR"}), GroupName: aws.String("Grafana"), RootCauseServiceId: &xray.ServiceId{Name: aws.String("graf"), Type: aws.String("AWS")}, TopAnomalousServices: []*xray.AnomalousService{{ServiceId: &xray.ServiceId{Name: aws.String("graf2"), Type: aws.String("AWS2")}}}, InsightId: aws.String("id-" + client.queryCalledWithRegion), }, { Summary: aws.String(insightSummary), StartTime: aws.Time(time.Date(2020, 6, 20, 1, 0, 1, 0, time.UTC)), EndTime: nil, Categories: aws.StringSlice([]string{"a", "b"}), State: aws.String("ACTIVE"), GroupName: aws.String("Grafana"), RootCauseServiceId: &xray.ServiceId{Name: aws.String("graf"), Type: aws.String("AWS")}, TopAnomalousServices: []*xray.AnomalousService{{ServiceId: &xray.ServiceId{Name: aws.String("graf2"), Type: aws.String("AWS2")}}}, InsightId: aws.String("id-2-" + client.queryCalledWithRegion), }, }, }, nil } func (client *XrayClientMock) GetGroupsPages(input *xray.GetGroupsInput, fn func(*xray.GetGroupsOutput, bool) bool) error { output := &xray.GetGroupsOutput{ Groups: []*xray.GroupSummary{ { GroupARN: aws.String("arn:1"), GroupName: aws.String("Default"), FilterExpression: aws.String(""), }, { GroupARN: aws.String("arn:2"), GroupName: aws.String("GroupTest"), FilterExpression: aws.String("service(\"test\")"), }, }, } fn(output, false) return nil } type StatsType string const ( Edge = "edge" Service = "service" ) func makeTimeSeriesRow(index int, statsType StatsType) *xray.TimeSeriesServiceStatistics { stats := &xray.TimeSeriesServiceStatistics{ EdgeSummaryStatistics: nil, ResponseTimeHistogram: []*xray.HistogramEntry{ { Count: aws.Int64(5), Value: aws.Float64(42.42), }, }, ServiceSummaryStatistics: nil, Timestamp: aws.Time(time.Date(2020, 6, 20, 1, index, 1, 0, time.UTC)), } if statsType == "edge" { stats.EdgeSummaryStatistics = &xray.EdgeStatistics{ ErrorStatistics: &xray.ErrorStatistics{ OtherCount: aws.Int64(10), ThrottleCount: aws.Int64(10), TotalCount: aws.Int64(20), }, FaultStatistics: &xray.FaultStatistics{ OtherCount: aws.Int64(15), TotalCount: aws.Int64(20), }, OkCount: aws.Int64(40), TotalCount: aws.Int64(80), TotalResponseTime: aws.Float64(3.14), } } else { stats.ServiceSummaryStatistics = &xray.ServiceStatistics{ ErrorStatistics: &xray.ErrorStatistics{ OtherCount: aws.Int64(10), ThrottleCount: aws.Int64(11), TotalCount: aws.Int64(20), }, FaultStatistics: &xray.FaultStatistics{ OtherCount: aws.Int64(15), TotalCount: aws.Int64(20), }, OkCount: aws.Int64(40), TotalCount: aws.Int64(80), TotalResponseTime: aws.Float64(3.14), } } return stats } func xrayClientFactory(pluginContext *backend.PluginContext, requestSettings datasource.RequestSettings) (datasource.XrayClient, error) { return &XrayClientMock{ queryCalledWithRegion: requestSettings.Region, }, nil } func queryDatasource(ds *datasource.Datasource, queryType string, query interface{}) (*backend.QueryDataResponse, error) { jsonData, _ := json.Marshal(query) return ds.QueryMux.QueryData( context.Background(), &backend.QueryDataRequest{Queries: []backend.DataQuery{{RefID: "A", QueryType: queryType, JSON: jsonData}}}, ) } type MockSender struct { fn func(resp *backend.CallResourceResponse) } func (sender *MockSender) Send(resp *backend.CallResourceResponse) error { sender.fn(resp) return nil } func queryDatasourceResource(ds *datasource.Datasource, req *backend.CallResourceRequest) (*backend.CallResourceResponse, error) { var resp *backend.CallResourceResponse err := ds.ResourceMux.CallResource( context.Background(), req, &MockSender{fn: func(r *backend.CallResourceResponse) { resp = r }}, ) return resp, err } func TestDatasource(t *testing.T) { ds := datasource.NewDatasource(xrayClientFactory) t.Run("getInsightSummaries query", func(t *testing.T) { // Insight with nil EndTime should not throw error response, err := queryDatasource(ds, datasource.QueryGetInsights, datasource.GetInsightsQueryData{State: "All", Group: &xray.Group{GroupName: aws.String("Grafana")}}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) // it should remove the first sentence from the summary require.Equal(t, "some more.", response.Responses["A"].Frames[0].Fields[1].At(0)) // Insight with nil EndTime should return the whole Summary require.Equal(t, insightSummary, response.Responses["A"].Frames[0].Fields[1].At(1)) // State should be in Title case require.Equal(t, "Active", response.Responses["A"].Frames[0].Fields[2].At(1)) // Categories should be converted to Title case and one string require.Equal(t, "Fault, Error", response.Responses["A"].Frames[0].Fields[3].At(0)) // duration should be 20 minutes which is 1 200 000 milliseconds require.Equal(t, int64(1200000), response.Responses["A"].Frames[0].Fields[4].At(0)) // RootCauseServiceId should be Name (Type) require.Equal(t, "graf (AWS)", response.Responses["A"].Frames[0].Fields[5].At(0)) // TopAnomalousServices should be Name (Type) require.Equal(t, "graf2 (AWS2)", response.Responses["A"].Frames[0].Fields[6].At(0)) }) t.Run("getInsightSummaries query with different region", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetInsights, datasource.GetInsightsQueryData{State: "All", Group: &xray.Group{GroupName: aws.String("Grafana")}, Region: "us-east-1"}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) frame := response.Responses["A"].Frames[0] require.Equal(t, "id-us-east-1", *frame.Fields[0].At(0).(*string)) }) t.Run("getTrace query", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetTrace, datasource.GetTraceQueryData{Query: "traceID"}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) require.Equal(t, 2, len(response.Responses["A"].Frames)) require.Equal(t, "TraceGraph", response.Responses["A"].Frames[1].Name) require.Equal(t, 1, response.Responses["A"].Frames[1].Fields[0].Len()) require.Equal(t, 1, response.Responses["A"].Frames[0].Fields[0].Len()) require.JSONEq( t, "{\"Duration\":1,\"Id\":\"trace1\",\"LimitExceeded\":null,\"Segments\":[{\"Document\":\"{}\",\"Id\":\"segment1\"}]}", response.Responses["A"].Frames[0].Fields[0].At(0).(string), ) }) t.Run("getTrace query with different region", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetTrace, datasource.GetTraceQueryData{Query: "traceID", Region: "us-east-1"}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) require.JSONEq( t, "{\"Duration\":1,\"Id\":\"trace1-us-east-1\",\"LimitExceeded\":null,\"Segments\":[{\"Document\":\"{}\",\"Id\":\"segment1\"}]}", response.Responses["A"].Frames[0].Fields[0].At(0).(string), ) }) t.Run("getTrace query trace not found", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetTrace, datasource.GetTraceQueryData{Query: "notFound"}) require.NoError(t, err) require.Error(t, response.Responses["A"].Error, "trace not found") }) t.Run("getTimeSeriesServiceStatistics query with no columns selected", func(t *testing.T) { response, err := queryDatasource( ds, datasource.QueryGetTimeSeriesServiceStatistics, datasource.GetTimeSeriesServiceStatisticsQueryData{Query: "traceID", Columns: []string{}}, ) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) require.Equal(t, 3, response.Responses["A"].Frames[0].Fields[0].Len()) require.Equal(t, 6, len(response.Responses["A"].Frames)) require.Equal(t, "Time", response.Responses["A"].Frames[0].Fields[0].Name) require.Equal(t, "Throttle Count", response.Responses["A"].Frames[0].Fields[1].Name) require.Equal(t, "Average Response Time", response.Responses["A"].Frames[5].Fields[1].Name) require.Equal( t, time.Date(2020, 6, 20, 1, 0, 1, 0, time.UTC).String(), response.Responses["A"].Frames[0].Fields[0].At(0).(*time.Time).String(), ) require.Equal(t, int64(10), *response.Responses["A"].Frames[0].Fields[1].At(0).(*int64)) require.Equal(t, int64(11), *response.Responses["A"].Frames[0].Fields[1].At(2).(*int64)) require.Equal(t, 3.14/80, *response.Responses["A"].Frames[5].Fields[1].At(0).(*float64)) }) t.Run("getTimeSeriesServiceStatistics query with region", func(t *testing.T) { response, err := queryDatasource( ds, datasource.QueryGetTimeSeriesServiceStatistics, datasource.GetTimeSeriesServiceStatisticsQueryData{Query: "traceID", Columns: []string{}, Region: "us-east-1"}, ) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) // expect different time as a stand-in for different results based on region, notice 13 require.Equal( t, time.Date(2020, 6, 20, 1, 13, 1, 0, time.UTC).String(), response.Responses["A"].Frames[0].Fields[0].At(0).(*time.Time).String(), ) }) t.Run("getTimeSeriesServiceStatistics query returns filtered columns", func(t *testing.T) { response, err := queryDatasource( ds, datasource.QueryGetTimeSeriesServiceStatistics, datasource.GetTimeSeriesServiceStatisticsQueryData{Query: "traceID", Columns: []string{"OkCount", "FaultStatistics.TotalCount"}}, ) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) require.Equal(t, 2, len(response.Responses["A"].Frames)) require.Equal(t, "Success Count", response.Responses["A"].Frames[0].Fields[1].Name) require.Equal(t, "Fault Count", response.Responses["A"].Frames[1].Fields[1].Name) }) t.Run("getTimeSeriesServiceStatistics query with all columns selected", func(t *testing.T) { response, err := queryDatasource( ds, datasource.QueryGetTimeSeriesServiceStatistics, datasource.GetTimeSeriesServiceStatisticsQueryData{Query: "traceID", Columns: []string{"all"}}, ) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) require.Equal(t, 3, response.Responses["A"].Frames[0].Fields[0].Len()) require.Equal(t, 6, len(response.Responses["A"].Frames)) require.Equal(t, "Time", response.Responses["A"].Frames[0].Fields[0].Name) require.Equal(t, "Throttle Count", response.Responses["A"].Frames[0].Fields[1].Name) require.Equal(t, "Average Response Time", response.Responses["A"].Frames[5].Fields[1].Name) require.Equal( t, time.Date(2020, 6, 20, 1, 0, 1, 0, time.UTC).String(), response.Responses["A"].Frames[0].Fields[0].At(0).(*time.Time).String(), ) require.Equal(t, int64(10), *response.Responses["A"].Frames[0].Fields[1].At(0).(*int64)) require.Equal(t, int64(11), *response.Responses["A"].Frames[0].Fields[1].At(2).(*int64)) require.Equal(t, 3.14/80, *response.Responses["A"].Frames[5].Fields[1].At(0).(*float64)) }) t.Run("getTraceSummaries query", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetTraceSummaries, datasource.GetTraceSummariesQueryData{Query: ""}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) frame := response.Responses["A"].Frames[0] require.Equal(t, 2, frame.Fields[0].Len()) require.Equal(t, "id1", *frame.Fields[0].At(0).(*string)) require.Equal(t, "GET", *frame.Fields[1].At(0).(*string)) require.Equal(t, 10.5, *frame.Fields[3].At(0).(*float64)) require.Equal(t, int64(3), *frame.Fields[6].At(0).(*int64)) }) t.Run("getTraceSummaries query with region", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetTraceSummaries, datasource.GetTraceSummariesQueryData{Query: "", Region: "us-east-1"}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) frame := response.Responses["A"].Frames[0] require.Equal(t, "id-us-east-1", *frame.Fields[0].At(0).(*string)) }) t.Run("getServiceMap query", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetServiceMap, datasource.GetServiceMapQueryData{Group: &xray.Group{}}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) // Bit simplistic test but right now we just send each service as a json to frontend and do transform there. frame := response.Responses["A"].Frames[0] require.Equal(t, 2, frame.Fields[0].Len()) // 2 because of the 2 services added to the mock }) t.Run("getServiceMap query with region", func(t *testing.T) { response, err := queryDatasource(ds, datasource.QueryGetServiceMap, datasource.GetServiceMapQueryData{Group: &xray.Group{}, Region: "us-east-1"}) require.NoError(t, err) require.NoError(t, response.Responses["A"].Error) // Bit simplistic test but right now we just send each service as a json to frontend and do transform there. frame := response.Responses["A"].Frames[0] require.Equal(t, 2, frame.Fields[0].Len()) require.True(t, strings.Contains(frame.Fields[0].At(0).(string), "mockServiceName-us-east-1")) }) // // RootCauseError // t.Run("getAnalyticsRootCauseErrorService query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseErrorService, [][]interface{}{ {"service_name_1 (service_type_1)", int64(8), float64(100)}, }) }) t.Run("getAnalyticsRootCauseErrorPath query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseErrorPath, [][]interface{}{ {"service_name_1 (service_type_1) -> Test exception", int64(8), float64(100)}, }) }) t.Run("getAnalyticsRootCauseErrorMessage query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseErrorMessage, [][]interface{}{ {"Test exception message", int64(8), float64(100)}, }) }) // // RootCauseFault // t.Run("getAnalyticsRootCauseFaultService query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseFaultService, [][]interface{}{ {"faulty_service_name_1 (faulty_service_type_1)", int64(8), float64(100)}, }) }) t.Run("getAnalyticsRootCauseFaultPath query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseFaultPath, [][]interface{}{ {"faulty_service_name_1 (faulty_service_type_1) -> Test fault", int64(8), float64(100)}, }) }) t.Run("getAnalyticsRootCauseFaultMessage query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseFaultMessage, [][]interface{}{ {"Test fault message", int64(8), float64(100)}, }) }) // // RootCauseResponseTime // t.Run("getAnalyticsRootCauseResponseTimeService query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseResponseTimeService, [][]interface{}{ {"response_service_name_2 (response_service_type_2)", int64(8), float64(100)}, }) }) t.Run("getAnalyticsRootCauseResponseTimePath query", func(t *testing.T) { testAnalytics(t, ds, datasource.QueryGetAnalyticsRootCauseResponseTimePath, [][]interface{}{ { "response_service_name_1 (response_service_type_1) -> response_sub_service_name_1 => response_service_name_2 (response_service_type_2) -> response_sub_service_name_2", int64(8), float64(100), }, }) }) t.Run("getGroups query", func(t *testing.T) { resp, err := queryDatasourceResource(ds, &backend.CallResourceRequest{ Path: "/groups", Method: "GET", }) require.NoError(t, err) var data []*xray.GroupSummary err = json.Unmarshal(resp.Body, &data) require.NoError(t, err) require.Equal(t, 2, len(data)) require.Equal(t, "Default", *data[0].GroupName) require.Equal(t, "GroupTest", *data[1].GroupName) }) } func testAnalytics(t *testing.T, ds *datasource.Datasource, queryType string, data [][]interface{}) { response, err := queryDatasource(ds, queryType, datasource.GetTraceSummariesQueryData{Query: ""}) require.NoError(t, err) checkResponse(t, response, data) } func checkResponse(t *testing.T, response *backend.QueryDataResponse, data [][]interface{}) { require.NoError(t, response.Responses["A"].Error) frame := response.Responses["A"].Frames[0] require.Equal(t, len(data), frame.Fields[0].Len()) for rowIndex, row := range data { for columnIndex, column := range row { require.Equal(t, column, frame.Fields[columnIndex].At(rowIndex)) } } }
package views import ( v1 "github.com/jenkins-x/jx-api/v4/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/octant-jx/pkg/common/viewhelpers" "github.com/vmware-tanzu/octant/pkg/view/component" ) func ToEnvironmentNameLink(r *v1.Environment) component.Component { name := ToEnvironmentName(r) ref := r.Name return component.NewLink(name, name, ref) } func ToEnvironmentNameComponent(r *v1.Environment) component.Component { return component.NewText(ToEnvironmentName(r)) } func ToEnvironmentName(r *v1.Environment) string { s := &r.Spec l := s.Label if l == "" { l = r.Name } return l } func ToEnvironmentSource(r *v1.Environment) component.Component { return viewhelpers.NewMarkdownText(viewhelpers.ToGitLinkMarkdown(r.Spec.Source.URL)) } func ToEnvironmentNamespace(r *v1.Environment) component.Component { spec := &r.Spec prefix := "" if r.Spec.RemoteCluster { prefix = "Remote " } return component.NewText(prefix + spec.Namespace) } func ToEnvironmentRemote(r *v1.Environment) component.Component { text := "" if r.Spec.RemoteCluster { // TODO switch to checkbox when we can use html/markdown views https://github.com/vmware-tanzu/octant/issues/882 text = "yes" } return component.NewText(text) } func ToEnvironmentPromote(r *v1.Environment) component.Component { return component.NewText(string(r.Spec.PromotionStrategy)) }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package calc contains common functions used in the Calculator app. package calc import ( "context" "fmt" "time" "chromiumos/tast/common/action" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/webutil" ) // UIConn returns a connection to the Calculator app HTML page, // where JavaScript can be executed to simulate interactions with the UI. // The caller should close the returned connection. e.g. defer calcConn.Close(). func UIConn(ctx context.Context, cr *chrome.Chrome) (*chrome.Conn, error) { // Establish a Chrome connection to the Calculator app and wait for it to finish loading. targetURL := "https://calculator.apps.chrome/" appConn, err := cr.NewConnForTarget(ctx, chrome.MatchTargetURL(targetURL)) if err != nil { return nil, errors.Wrapf(err, "failed to connect to target %q", targetURL) } if err := webutil.WaitForQuiescence(ctx, appConn, 10*time.Second); err != nil { return nil, errors.Wrap(err, "failed to wait for Calculator app to finish loading") } return appConn, nil } // TapKey taps key on the app by executing click function on located web element. // keyName is aria-label of the element, not displayed text. e.g. Do not use '+' but 'plus'. func TapKey(appConn *chrome.Conn, keyName string) uiauto.Action { script := fmt.Sprintf(`document.querySelector(".keypad canvas[aria-role='button'][aria-label='%s']").click()`, keyName) return func(ctx context.Context) error { if err := appConn.Eval(ctx, script, nil); err != nil { return errors.Wrapf(err, "failed to tap key %q", keyName) } return nil } } // WaitForCalculateResult waits until the calculation result is expected. func WaitForCalculateResult(appConn *chrome.Conn, expectedResult string) uiauto.Action { script := `document.querySelector(".calculator-display").innerText` var result string return action.RetrySilently(3, func(ctx context.Context) error { if err := appConn.Eval(ctx, script, &result); err != nil { return errors.Wrap(err, "failed to get calculation result") } if result != expectedResult { return errors.Errorf("Wrong calculation result: got %q; want %q", result, expectedResult) } return nil }, time.Second) }
package main import ( "encoding/json" "fmt" "log" "net/http" "time" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" _ "github.com/go-sql-driver/mysql" "github.com/gorilla/websocket" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/sqlite" ) var db *gorm.DB var err error type Message struct { gorm.Model From string `json:"from"` To string `json:"to"` Text string `json:"text"` Timestamp int64 `json:"time"` } var upgrader = websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, CheckOrigin: func(r *http.Request) bool { return true }, } // WebSocket message reader func reader(conn *websocket.Conn) { for { messageType, p, err := conn.ReadMessage() if err != nil { log.Println(err) return } var newMessage Message json.Unmarshal([]byte(p), &newMessage) dbMessage := Message{From: newMessage.From, To: newMessage.To, Text: newMessage.Text, Timestamp: time.Now().Unix()} db.NewRecord(dbMessage) db.Create(&dbMessage) if err := conn.WriteMessage(messageType, p); err != nil { log.Println(err) return } } } // WebSocket endpoint instantiation func serveWs(w http.ResponseWriter, r *http.Request) { fmt.Println(r.Host) ws, err := upgrader.Upgrade(w, r, nil) if err != nil { log.Println(err) } reader(ws) } func main() { //database instantiation db, err = gorm.Open("sqlite3", "./gorm.db") defer db.Close() db.AutoMigrate(&Message{}) //server innstantiation r := gin.Default() r.Use(cors.Default()) r.GET("/", func(c *gin.Context) { fmt.Fprintf(c.Writer, "Starting server") }) r.GET("/ws", func(c *gin.Context) { serveWs(c.Writer, c.Request) }) r.GET("/messages/:email", GetMessages) r.Run("localhost:8080") } func GetMessages(c *gin.Context) { email := c.Params.ByName("email") var allUserMessages []Message if err := db.Where(Message{From: email}).Or(Message{To: email}).Find(&allUserMessages).Error; err != nil { c.AbortWithStatus(404) fmt.Println(err) } else { c.JSON(200, allUserMessages) } }
package v1alpha1 import ( "Hybrid_Cluster/apis" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" ) type ExampleV1Alpha1Interface interface { KubeFedCluster(namespace string) KubeFedClusterInterface } type ExampleV1Alpha1Client struct { restClient rest.Interface } func NewForConfig(c *rest.Config) (*ExampleV1Alpha1Client, error) { apis.AddToScheme(scheme.Scheme) config := *c config.ContentConfig.GroupVersion = &schema.GroupVersion{Group: fedv1b1.SchemeGroupVersion.Group, Version: fedv1b1.SchemeGroupVersion.Version} config.APIPath = "/apis" //config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() config.NegotiatedSerializer = serializer.NewCodecFactory(scheme.Scheme) config.UserAgent = rest.DefaultKubernetesUserAgent() client, err := rest.RESTClientFor(&config) if err != nil { return nil, err } return &ExampleV1Alpha1Client{restClient: client}, nil } func (c *ExampleV1Alpha1Client) KubeFedCluster(namespace string) KubeFedClusterInterface { return &KubeFedClusterClient{ restClient: c.restClient, ns: namespace, } }
package sqlmock import ( "bytes" "database/sql/driver" "encoding/csv" "errors" "fmt" "io" "strings" ) const invalidate = "☠☠☠ MEMORY OVERWRITTEN ☠☠☠ " // CSVColumnParser is a function which converts trimmed csv // column string to a []byte representation. Currently // transforms NULL to nil var CSVColumnParser = func(s string) interface{} { switch { case strings.ToLower(s) == "null": return nil } return []byte(s) } type rowSets struct { sets []*Rows pos int ex *ExpectedQuery raw [][]byte } func (rs *rowSets) Columns() []string { return rs.sets[rs.pos].cols } func (rs *rowSets) Close() error { rs.invalidateRaw() rs.ex.rowsWereClosed = true return rs.sets[rs.pos].closeErr } // advances to next row func (rs *rowSets) Next(dest []driver.Value) error { r := rs.sets[rs.pos] r.pos++ rs.invalidateRaw() if r.pos > len(r.rows) { return io.EOF // per interface spec } for i, col := range r.rows[r.pos-1] { if b, ok := rawBytes(col); ok { rs.raw = append(rs.raw, b) dest[i] = b continue } dest[i] = col } return r.nextErr[r.pos-1] } // transforms to debuggable printable string func (rs *rowSets) String() string { if rs.empty() { return "with empty rows" } msg := "should return rows:\n" if len(rs.sets) == 1 { for n, row := range rs.sets[0].rows { msg += fmt.Sprintf(" row %d - %+v\n", n, row) } return strings.TrimSpace(msg) } for i, set := range rs.sets { msg += fmt.Sprintf(" result set: %d\n", i) for n, row := range set.rows { msg += fmt.Sprintf(" row %d - %+v\n", n, row) } } return strings.TrimSpace(msg) } func (rs *rowSets) empty() bool { for _, set := range rs.sets { if len(set.rows) > 0 { return false } } return true } func rawBytes(col driver.Value) (_ []byte, ok bool) { val, ok := col.([]byte) if !ok || len(val) == 0 { return nil, false } // Copy the bytes from the mocked row into a shared raw buffer, which we'll replace the content of later // This allows scanning into sql.RawBytes to correctly become invalid on subsequent calls to Next(), Scan() or Close() b := make([]byte, len(val)) copy(b, val) return b, true } // Bytes that could have been scanned as sql.RawBytes are only valid until the next call to Next, Scan or Close. // If those occur, we must replace their content to simulate the shared memory to expose misuse of sql.RawBytes func (rs *rowSets) invalidateRaw() { // Replace the content of slices previously returned b := []byte(invalidate) for _, r := range rs.raw { copy(r, bytes.Repeat(b, len(r)/len(b)+1)) } // Start with new slices for the next scan rs.raw = nil } // Rows is a mocked collection of rows to // return for Query result type Rows struct { converter driver.ValueConverter cols []string def []*Column rows [][]driver.Value pos int nextErr map[int]error closeErr error } // NewRows allows Rows to be created from a // sql driver.Value slice or from the CSV string and // to be used as sql driver.Rows. // Use Sqlmock.NewRows instead if using a custom converter func NewRows(columns []string) *Rows { return &Rows{ cols: columns, nextErr: make(map[int]error), converter: driver.DefaultParameterConverter, } } // CloseError allows to set an error // which will be returned by rows.Close // function. // // The close error will be triggered only in cases // when rows.Next() EOF was not yet reached, that is // a default sql library behavior func (r *Rows) CloseError(err error) *Rows { r.closeErr = err return r } // RowError allows to set an error // which will be returned when a given // row number is read func (r *Rows) RowError(row int, err error) *Rows { r.nextErr[row] = err return r } // AddRow composed from database driver.Value slice // return the same instance to perform subsequent actions. // Note that the number of values must match the number // of columns func (r *Rows) AddRow(values ...driver.Value) *Rows { if len(values) != len(r.cols) { panic("Expected number of values to match number of columns") } row := make([]driver.Value, len(r.cols)) for i, v := range values { // Convert user-friendly values (such as int or driver.Valuer) // to database/sql native value (driver.Value such as int64) var err error v, err = r.converter.ConvertValue(v) if err != nil { panic(fmt.Errorf( "row #%d, column #%d (%q) type %T: %s", len(r.rows)+1, i, r.cols[i], values[i], err, )) } row[i] = v } r.rows = append(r.rows, row) return r } // AddRows adds multiple rows composed from database driver.Value slice and // returns the same instance to perform subsequent actions. func (r *Rows) AddRows(values ...[]driver.Value) *Rows { for _, value := range values { r.AddRow(value...) } return r } // FromCSVString build rows from csv string. // return the same instance to perform subsequent actions. // Note that the number of values must match the number // of columns func (r *Rows) FromCSVString(s string) *Rows { res := strings.NewReader(strings.TrimSpace(s)) csvReader := csv.NewReader(res) for { res, err := csvReader.Read() if err != nil { if errors.Is(err, io.EOF) { break } panic(fmt.Sprintf("Parsing CSV string failed: %s", err.Error())) } row := make([]driver.Value, len(r.cols)) for i, v := range res { row[i] = CSVColumnParser(strings.TrimSpace(v)) } r.rows = append(r.rows, row) } return r }
package employee type IEmployee interface { GetName() string SetName(name string) Accept(visitor IVisitor) } //////////////////////////////////////// type employee struct { name string } func NewEmployee() *employee { return new(employee) } func (e *employee)GetName() string { return e.name } func (e *employee)SetName(name string) { e.name = name } func (e *employee)Accept(visitor IVisitor) { panic("not implement") } ///////////////////////////////////////////// type ICommonEmployee interface { IEmployee GetJob() string SetJob(job string) } type commonEmployee struct { employee job string } func NewCommonEmployee() *commonEmployee { return new(commonEmployee) } func (c *commonEmployee) GetJob() string { return c.job } func (c *commonEmployee) SetJob(job string) { c.job = job } func (c *commonEmployee) Accept(visitor IVisitor) { visitor.Visit(c) } /////////////////////////////////////////////////////// type IManager interface { IEmployee GetPer() string SetPer(job string) } type manager struct { employee per string } func NewManager() *manager { return new(manager) } func (c *manager) GetPer() string { return c.per } func (c *manager) SetPer(per string) { c.per = per } func (c *manager) Accept(visitor IVisitor) { visitor.Visit(c) }
package remove import ( "errors" "store" "sync" ) var ( mu sync.Mutex err error ) var wg sync.WaitGroup var removeErrorRemoved error = errors.New("Could not remove: This employee has already been removed.") var removeErrorNotFound error = errors.New("Could not remove: An employee with this id does not exist.") func RemoveEmployeesByIDEverywhere(id int) error { _, ok := (store.IdEmpMap)[id] if !ok { return removeErrorRemoved } err = nil wg.Add(4) go RemoveEmployeesFromList(id, &(store.Employees)) go RemoveEmployeesFromIdEmpMap(id, &(store.IdEmpMap)) go RemoveEmployeesFromDeptEmpMap(id, &(store.DeptEmpMap)) go RemoveEmployeesFromLocEmpMap(id, &(store.LocEmpMap)) wg.Wait() return err } func RemoveEmployeesFromList(id int, employees *([]store.Employee)) { defer wg.Done() mu.Lock() if err != nil { mu.Unlock() return } mu.Unlock() for i := 0; i < len(*employees); i++ { if ((*employees)[i]).GetID() == id { if ((*employees)[i]).There == false { mu.Lock() err = removeErrorRemoved mu.Unlock() } else { ((*employees)[i]).There = false } } } } func RemoveEmployeesFromIdEmpMap(id int, idEmpMap *map[int](store.Employee)) { defer wg.Done() mu.Lock() if err != nil { mu.Unlock() return } mu.Unlock() for mapId := range *idEmpMap { if mapId == id { newEmpl := (*idEmpMap)[mapId] if newEmpl.There == false { mu.Lock() err = removeErrorRemoved mu.Unlock() } newEmpl.There = false (*idEmpMap)[mapId] = newEmpl } } } func RemoveEmployeesFromDeptEmpMap(id int, deptEmpMap *map[string]*([](store.Employee))) { defer wg.Done() mu.Lock() if err != nil { mu.Unlock() return } mu.Unlock() for dept := range *deptEmpMap { for i := 0; i < len(*((*deptEmpMap)[dept])); i++ { if (*((*deptEmpMap)[dept]))[i].GetID() == id { if (*((*deptEmpMap)[dept]))[i].There == false { mu.Lock() err = removeErrorRemoved mu.Unlock() } (*((*deptEmpMap)[dept]))[i].There = false } } } } func RemoveEmployeesFromLocEmpMap(id int, locEmpMap *map[int]*([]store.Employee)) { defer wg.Done() mu.Lock() if err != nil { mu.Unlock() return } mu.Unlock() for pin := range *locEmpMap { for i := 0; i < len(*((*locEmpMap)[pin])); i++ { if (*((*locEmpMap)[pin]))[i].GetID() == id { if (*((*locEmpMap)[pin]))[i].There == false { mu.Lock() err = removeErrorRemoved mu.Unlock() } (*((*locEmpMap)[pin]))[i].There = false } } } }
package services import "tax-calculator/models" type ITaxServices interface { FindAll() []models.ITax Load(id string) models.ITax Create(payload models.ITax) bool }
package main import "fmt" func HeapPermutation(a []int, size int) { if size == 1 { fmt.Println(a) } for i := 0; i < size; i++ { HeapPermutation(a, size-1) if size%2 == 1 { a[0], a[size-1] = a[size-1], a[0] } else { a[i], a[size-1] = a[size-1], a[i] } } } func main() { a := []int{1, 2, 3, 4, 5} HeapPermutation(a, len(a)) }
package imgscale import ( "testing" "github.com/thatguystone/cog/check" ) func TestArgsStrings(t *testing.T) { c := check.New(t) newInt := func(i int) *int { return &i } tests := []struct { args args query string nameSuffix string }{ {}, { args: args{ W: newInt(100), Ext: ".jpg", }, nameSuffix: "-100x.jpg", }, { args: args{ H: newInt(100), Ext: ".jpg", }, nameSuffix: "-x100.jpg", }, { args: args{ W: newInt(100), H: newInt(100), Ext: ".jpg", }, nameSuffix: "-100x100.jpg", }, { args: args{ Q: newInt(50), Ext: ".jpg", }, nameSuffix: "-q50.jpg", }, { args: args{ Crop: true, Ext: ".jpg", }, nameSuffix: "-c.jpg", }, { args: args{ Crop: true, Gravity: northWest, Ext: ".jpg", }, nameSuffix: "-cnw.jpg", }, { args: args{ Ext: ".png", }, nameSuffix: ".png", }, } for _, test := range tests { c.Equal(test.args.nameSuffix(), test.nameSuffix) } }
// Copyright 2014 qiufeng-sun. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "util/logs" "util/run" "core/server" ) var _ = logs.Debug // main entrance for gate // -- tranfer msg to special server which assigned by protobuf target default value // -- cache server info to conn. server info is specified by to client msg func main() { defer run.Recover(true) server.Run(NewGate()) }
package main import ( "fmt" "net" "container/list" "bytes" ) type Client struct { Name string Incoming chan string Outgoing chan string Conn net.Conn Quit chan bool ClientList *list.List } func (c *Client) Read(buffer []byte) (int, bool) { bytesRead, error := c.Conn.Read(buffer) if error != nil { c.Close() fmt.Println(error) return 0, false } fmt.Println("Read ", bytesRead, " bytes") return bytesRead, true } func (c *Client) Close() { c.Quit <- true c.Conn.Close() c.RemoveMe() } func (c *Client) Equal(other *Client) bool { if bytes.Equal([]byte(c.Name), []byte(other.Name)) { if c.Conn == other.Conn { return true } } return false } func (c *Client) RemoveMe() { for entry := c.ClientList.Front(); entry != nil; entry = entry.Next() { client := entry.Value.(Client) if c.Equal(&client) { fmt.Println("RemoveMe: ", c.Name) c.ClientList.Remove(entry) } } } func IOHandler(Incoming <-chan string, clientList *list.List) { for { input := <-Incoming for e := clientList.Front(); e != nil; e = e.Next() { client := e.Value.(Client) client.Incoming <- input } } } func ClientReader(client *Client) { buffer := make([]byte, 2048) for { n, status := client.Read(buffer) if !status { break } if bytes.Equal(buffer, []byte("/quit")) { client.Close() break } fmt.Println("ClientReader received ", client.Name, "> ", string(buffer[:n])) send := client.Name + ">> " + string(buffer[:n]) client.Outgoing <- send } client.Outgoing <- client.Name + " has left chat" fmt.Println("ClientReader stopped for ", client.Name) } func ClientSender(client *Client) { for { select { case buffer := <-client.Incoming: fmt.Println("Sending ", string(buffer), " to ", client.Name) client.Conn.Write([]byte(buffer)) case <-client.Quit: fmt.Println("Client ", client.Name, " quitting") client.Conn.Close() break } } } func ClientHandler(conn net.Conn, ch chan string, clientList *list.List) { buffer := make([]byte, 1024) bytesRead, error := conn.Read(buffer) if error != nil { fmt.Println("Client connection error: ", error) return } name := string(buffer[0:bytesRead]) newClient := &Client{ Name: name, Incoming: make(chan string), Outgoing: ch, Conn: conn, Quit: make(chan bool), ClientList: clientList, } go ClientSender(newClient) go ClientReader(newClient) clientList.PushBack(*newClient) ch <- string(name + " has joined the chat ") } func main() { clientList := list.New() in := make(chan string) go IOHandler(in, clientList) netListen, error := net.Listen("tcp", ":9988") if error != nil { fmt.Println(error) } else { defer netListen.Close() for { fmt.Println("Server is running...") connection, error := netListen.Accept() if error != nil { fmt.Println("Client error: ", error) } else { go ClientHandler(connection, in, clientList) } } } }
package core import ( "fmt" "math/rand" "time" ) type Brick struct { length int raw [100]byte dirBack bool isMove bool } func (b *Brick) startPoint() int { for i, v := range b.raw { if v == '#' { return i } } return 0 } func (b *Brick) endPoint() int { return b.startPoint() + b.length - 1 } func (b *Brick) initRaw(c *Config) { b.length = rand.Intn(c.MaxBrickLen-c.MinBrickLen+1) + c.MinBrickLen for i := 0; i < b.length; i++ { b.raw[i] = '#' } for i := b.length; i < len(b.raw); i++ { b.raw[i] = ' ' } b.dirBack = false b.isMove = true } func (b *Brick) makeRaw() { s := b.startPoint() e := b.endPoint() if b.dirBack { for i := s; i <= e; i++ { b.raw[i], b.raw[i-1] = b.raw[i-1], b.raw[i] } } else { for i := e; i >= s; i-- { b.raw[i], b.raw[i+1] = b.raw[i+1], b.raw[i] } } if b.raw[99] == '#' || b.raw[0] == '#' { b.dirBack = !b.dirBack } } func (b *Brick) printRaw(c *Config) { b.initRaw(c) for b.isMove { b.makeRaw() fmt.Printf("\r%s", string(b.raw[:])) time.Sleep(time.Millisecond * time.Duration(c.MoveDur)) } } func (b *Brick) waitForStop(c *Config) { var t string go b.printRaw(c) fmt.Scanln(&t) b.isMove = false } func isMiss(b1 *Brick, b2 *Brick) bool { return b2.startPoint() > b1.endPoint() || b2.endPoint() < b1.startPoint() }
package main import ( "bytes" "encoding/json" "flag" "fmt" "log" "os/exec" "reflect" "regexp" "strings" ) var genGoPkg = flag.String("gen_go_pkg", "main", "Go package") var genGoFmt = flag.Bool("gen_go_fmt", true, "Run gofmt on output") var genGoDbg = flag.Bool("gen_go_dbg", false, "Add debug to output code") type GoGenerator struct { Package string } func NewGoGenerator() *GoGenerator { return &GoGenerator{ Package: *genGoPkg, } } func methodOrGet(link *JsonLink) string { if link.Method != "" { return link.Method } return "GET" } func clearNL(s string) string { return strings.Replace(s, "\n", "", -1) } func genLinkDoc(link *JsonLink, key string) string { doc := "" if link.Title != "" || link.Description != "" { doc += "\n\n" } if link.Title != "" { doc += "// " + link.Title + "\n" } if link.Description != "" { doc += "// " + clearNL(link.Description) + "\n" } if *genGoDbg { doc += "// (ApiGenCode: key=" + key + ")\n" } return doc } func genSchemaDoc(schema *JsonSchema, key string) string { doc := "" if schema.Title != "" || schema.Description != "" { doc += "\n\n" } if schema.Title != "" { doc += "// " + schema.Title + "\n" } if schema.Description != "" { doc += "// " + clearNL(schema.Description) + "\n" } if *genGoDbg { doc += "// (ApiGenCode: key=" + key + ")\n" } return doc } func (g *GoGenerator) GenCode(api *JsonSchema) []*GenFile { return []*GenFile{ &GenFile{ Name: "types.go", Content: g.MaybeRunGoFmt(g.WrapFile(g.Objects(api))), }, &GenFile{ Name: "paths.go", Content: g.MaybeRunGoFmt(g.WrapFile(g.Paths(api))), }, &GenFile{ Name: "interface.go", Content: g.MaybeRunGoFmt(g.WrapFile(g.Interface(api))), }, &GenFile{ Name: "handler.go", Content: g.MaybeRunGoFmt(g.WrapFile(g.Handler(api))), }, } } func (g *GoGenerator) WrapFile(content string) string { return fmt.Sprintf("package %s\n\n%s", g.Package, content) } func (g *GoGenerator) EnumType(schema *JsonSchema) string { var common reflect.Kind = reflect.Invalid for _, obj := range schema.Enum { t := reflect.TypeOf(obj).Kind() if common == reflect.Invalid { common = t continue } if t != common { return "interface{}" } } switch common { case reflect.Bool: return "bool" case reflect.Float64: return "float64" case reflect.String: return "string" } return "interface{}" } func (g *GoGenerator) TypeName(path string, ptr bool, schema *JsonSchema) string { switch schema.Type { case "number": return "float32" case "string": return "string" case "integer": return "int" case "boolean": return "bool" case "object": if ptr { return "*" + g.GoName(path) } return g.GoName(path) case "array": if schema.Ref != "" { return g.GoName(path) } return "[]" + g.TypeName(path, ptr, schema.Items) } if schema.Ref != "" { if ptr { return "*" + g.GoName(schema.Ref) } return g.GoName(schema.Ref) } if len(schema.Enum) != 0 { return g.EnumType(schema) } return "/* SHOULD NOT COMPILE */" } // ======================================================= type structGenerator struct { g *GoGenerator } func (i *structGenerator) schema(path string, in *JsonSchema, parent *JsonLink) *line { name := path if parent != nil { if in.Ref != "" { name = i.g.GoName(in.Ref) } else { panic(fmt.Sprintf("Always use #/definitions/<type_name> so I can "+ "infer unique type names (in %s)", path)) } } doc := genSchemaDoc(in, path) topLevel := regexp.MustCompile("^#/definitions/[^/]+$") if in.Type == "object" { content := make([]string, 0, len(in.Properties)) for fieldName, field := range in.Properties { if field.Type == "array" && field.Items.Ref != "" { t := i.g.GoName(field.Items.Ref) content = append(content, fmt.Sprintf("%s []%s `json:\"%s,omitempty\"`", i.g.GoName(fieldName), t, fieldName)) } else if field.Type == "array" && field.Items.Title != "" { t := i.g.GoName(field.Items.Title) content = append(content, fmt.Sprintf("%s []%s `json:\"%s,omitempty\"`", i.g.GoName(fieldName), t, fieldName)) } else { t := i.g.TypeName(path+"/"+fieldName, true, field) content = append(content, fmt.Sprintf("%s %s `json:\"%s,omitempty\"`", i.g.GoName(fieldName), t, fieldName)) } } l := fmt.Sprintf("%stype %s struct {\n %s\n}", doc, i.g.GoName(name), strings.Join(content, "\n ")) return &line{path, l} } else if len(in.Enum) > 0 { l := i.g.GoName(name) enumType := i.g.EnumType(in) values := make([]string, 0, len(in.Enum)) for n, v := range in.Enum { key := i.enumVar(n, v) val := i.formatEnumValue(enumType, v) values = append(values, fmt.Sprintf("%s_%s %s = %s", l, key, l, val)) } return &line{path, fmt.Sprintf(` %stype %s %s const ( %s )`, doc, l, enumType, strings.Join(values, "\n"))} } // Generate top-level aliases as well. if topLevel.MatchString(path) { t := i.g.TypeName(path, true, in) if in.Type == "array" { t = "[]" + t } l := fmt.Sprintf("%stype %s %s\n", doc, i.g.GoName(name), t) return &line{path, l} } return nil } func (i *structGenerator) link(path string, link *JsonLink, parent *JsonSchema) *line { return nil } func (i *structGenerator) enumVar(n int, val interface{}) string { switch v := val.(type) { case nil: return "_nil_" case int: return fmt.Sprintf("%d", v) case string: return i.g.GoName(v) case bool: return fmt.Sprintf("%t", v) } return fmt.Sprintf("_%d_", n) } func (i *structGenerator) formatEnumValue(tpe string, val interface{}) string { switch { case tpe == "string": return fmt.Sprintf("\"%s\"", val) case tpe == "bool": return fmt.Sprintf("%t", val) case tpe == "float64": return fmt.Sprintf("%f", val) } data, _ := json.Marshal(val) return fmt.Sprintf("\"%s\"", data) } func (g *GoGenerator) Objects(schema *JsonSchema) string { gen := &structGenerator{g} return GenLines(schema, gen) } // ======================================================= type interfaceGenerator struct { g *GoGenerator } func (i *interfaceGenerator) schema(path string, in *JsonSchema, parent *JsonLink) *line { return nil } func (i *interfaceGenerator) link(path string, link *JsonLink, parent *JsonSchema) *line { name := i.g.GoName(link.Title) var req, resp string if link.Schema != nil { if link.Schema.Ref != "" { req = i.g.TypeName(path+"/schema", true, link.Schema) } else { panic(fmt.Sprintf("Always use #/definitions/<type_name> so I can "+ "infer unique type names (in %s/schema)", path)) } } if link.TargetSchema != nil { if link.TargetSchema.Ref != "" { resp = i.g.TypeName(path+"/targetSchema", true, link.TargetSchema) } else { panic(fmt.Sprintf("Always use #/definitions/<type_name> so I can "+ "infer unique type names (in %s/targetSchema)", path)) } } params := make([]string, 0) for _, extraParam := range i.Placeholders(link, parent) { params = append(params, extraParam[0]+" "+extraParam[1]) } if len(req) > 0 { params = append(params, "input "+req) } re := regexp.MustCompile("\\{([^}]+)\\}") key := methodOrGet(link) + " " + re.ReplaceAllString(link.Href, "{}") return &line{ DedupeKey: key, Line: fmt.Sprintf("%s%s(%s) (%s, error)", genLinkDoc(link, key), name, strings.Join(params, ","), resp), } } func (i *interfaceGenerator) Placeholders(method *JsonLink, parent *JsonSchema) [][]string { params := make([][]string, 0) re := regexp.MustCompile("\\{([^}]+)\\}") placeholders := re.FindAllStringSubmatch(method.Href, -1) for _, match := range placeholders { name := match[1] if schema, known := parent.Properties[name]; known { params = append(params, []string{name, i.g.TypeName("", true, schema)}) } else { fmt.Println("Unknwon " + name) } } return params } func (g *GoGenerator) Interface(api *JsonSchema) string { gen := &interfaceGenerator{g} return "type " + g.GoName(api.Title) + " interface {\n" + " " + GenLines(api, gen) + "\n}" } // ======================================================= type handlerGenerator struct { g *GoGenerator class string } func (i *handlerGenerator) schema(path string, in *JsonSchema, parent *JsonLink) *line { return nil } func (i *handlerGenerator) link(path string, link *JsonLink, parent *JsonSchema) *line { name := i.g.GoName(link.Title) var req string if link.Schema != nil { if link.Schema.Ref != "" { req = i.g.TypeName(path+"/schema", false, link.Schema) } else { panic(fmt.Sprintf("Always use #/definitions/<type_name> so I can "+ "infer unique type names (in %s/schema)", path)) } } params := make([]string, 0) for _, extraParam := range i.Placeholders(link, parent) { params = append(params, "_"+extraParam[0]) } re := regexp.MustCompile("\\{([^}]+)\\}") key := methodOrGet(link) + " " + re.ReplaceAllString(link.Href, "{}") var args []string for i, p := range params { args = append(args, fmt.Sprintf(`%s := matches[0][%d]`, p, i+1)) } var matches = "" if len(args) > 0 { matches = "matches := re.FindAllStringSubmatch(r.URL.Path, -1)" } if len(req) > 0 { params = append(params, "input") args = append(args, fmt.Sprintf(`input := &%s{} body, _ := ioutil.ReadAll(r.Body) json.Unmarshal(body, input)`, req)) } return &line{ DedupeKey: key, Line: fmt.Sprintf(`%sfunc (h *%s) _%s(w http.ResponseWriter, r *http.Request) (bool, error) { re := regexp.MustCompile("^%s$") if r.Method == "%s" && re.MatchString(r.URL.Path) { %s %s r, err := h.Api.%s(%s) if err != nil { return true, err } if resp, err := json.Marshal(r); err == nil { w.Write(resp) return true, nil } else { return true, err } } return false, nil }`, genLinkDoc(link, key), i.class, name, re.ReplaceAllString(link.Href, "([^/]*)"), methodOrGet(link), matches, strings.Join(args, "\n"), name, strings.Join(params, ", ")), } } func (i *handlerGenerator) Placeholders(method *JsonLink, parent *JsonSchema) [][]string { params := make([][]string, 0) re := regexp.MustCompile("\\{([^}]+)\\}") placeholders := re.FindAllStringSubmatch(method.Href, -1) for _, match := range placeholders { name := match[1] if schema, known := parent.Properties[name]; known { params = append(params, []string{name, i.g.TypeName("", true, schema)}) } else { fmt.Println("Unknwon " + name) } } return params } type dispatcherGenerator struct { g *GoGenerator } func (i *dispatcherGenerator) schema(path string, in *JsonSchema, parent *JsonLink) *line { return nil } func (i *dispatcherGenerator) link(path string, link *JsonLink, parent *JsonSchema) *line { name := i.g.GoName(link.Title) re := regexp.MustCompile("\\{([^}]+)\\}") key := methodOrGet(link) + " " + re.ReplaceAllString(link.Href, "{}") return &line{ DedupeKey: key, Line: fmt.Sprintf(`if ok, err := s._%s(w, r); ok { return true, err }`, name), } } func (g *GoGenerator) Handler(api *JsonSchema) string { gen := &handlerGenerator{g, g.GoName(api.Title) + "Handler"} dispatch := &dispatcherGenerator{g} return fmt.Sprintf(`import ( "net/http" "regexp" "encoding/json" "io/ioutil" ) var _ = ioutil.ReadAll // Some generated code may need ioutil. type %s struct { Api %s } func (s *%sHandler) Dispatch(w http.ResponseWriter, r *http.Request) (bool, error) { %s return false, nil } %s `, gen.class, g.GoName(api.Title), g.GoName(api.Title), GenLines(api, dispatch), GenLines(api, gen)) } // ======================================================= type pathsGenerator struct { g *GoGenerator } func (i *pathsGenerator) schema(path string, in *JsonSchema, parent *JsonLink) *line { return nil } func (i *pathsGenerator) link(path string, link *JsonLink, parent *JsonSchema) *line { method := methodOrGet(link) f := fmt.Sprintf(`{ "%s", "%s", "%s", },`, i.g.GoName(link.Title), method, link.Href) re := regexp.MustCompile("\\{([^}]+)\\}") key := method + "." + re.ReplaceAllString(link.Href, "{}") return &line{key, f} } func (g *GoGenerator) Paths(api *JsonSchema) string { gen := &pathsGenerator{g} strukt := `type pathDef struct { Name string Method string Href string } ` return strukt + "var " + g.GoName(api.Title) + "Paths = []pathDef{" + GenLines(api, gen) + "\n}\n" } // ======================================================= func camelcase(in string, splits []string) string { for _, split := range splits { if strings.Contains(in, split) { path := strings.Split(in, split) for i, p := range path { path[i] = strings.ToUpper(p[0:1]) + p[1:] } in = strings.Join(path, "") } } return in } func (g *GoGenerator) GoName(jsonName string) string { if strings.Contains(jsonName, "/") { path := strings.Split(jsonName, "/") jsonName = path[len(path)-1] } jsonName = camelcase(jsonName, []string{" ", "_"}) return strings.ToUpper(jsonName[0:1]) + jsonName[1:] } func (g *GoGenerator) MaybeRunGoFmt(in string) string { if *genGoFmt { return g.RunGoFmt(in) } return in } func (g *GoGenerator) RunGoFmt(in string) string { cmd := exec.Command("gofmt") cmd.Stdin = strings.NewReader(in) var out bytes.Buffer var errOut bytes.Buffer cmd.Stdout = &out cmd.Stderr = &errOut err := cmd.Run() if err != nil { log.Println("=== gofmt returned an error. input was: ===") for i, l := range strings.Split(in, "\n") { fmt.Printf("%3.d %s\n", i+1, l) } log.Println("=== gofmt output: ===") log.Println(errOut.String()) log.Println(err) log.Println("=== end gofmt error ===") } return out.String() }
package controllers import ( "encoding/json" "github.com/astaxie/beego" "smtcar/models" ) type RoleController struct { beego.Controller } const ( DefRolePageRow = 10 DefRolePageOrder = "Name" ) func (this *RoleController) Get() { coord := models.RoleListCoord{} if err := json.Unmarshal(this.Ctx.Input.RequestBody, &coord); err != nil { // 没有坐标信息则用默认值 coord.Page = 1 coord.Row = DefRolePageRow coord.Sort = DefRolePageOrder } roles, count := models.GetRolelist(&coord) this.Data["json"] = &map[string]interface{}{"Total": count, "rows": &roles} this.ServeJSON() }
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2020 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( "fmt" "reflect" "sort" "strconv" "strings" "time" "unicode" "github.com/bitmark-inc/logger" ) const ( defaultNum = 0 defaultTimeStrErrorMsg = "clock time out of range" defaultTimePeriodErrorMsg = "time period format error" timePeriodSeparator = "," clockSeparator = "-" spaceChar = " " allDayClockStr = "" defaultIndex = 0 oneWeekDuration = time.Duration(24*7) * time.Hour delayOfStartStop = time.Duration(5) * time.Second jobCalendarPrefix = "calendar" ) type JobCalendar interface { PickNextStartEvent(time.Time) interface{} PickNextStopEvent(time.Time) interface{} PickInitialiseStartEvent(time.Time) interface{} PickInitialiseStopEvent(time.Time) interface{} Refresh(calendar ConfigCalendar) RescheduleStartEventsPrior(event time.Time) RescheduleStopEventsPrior(event time.Time) RunForever() bool SetLog(l *logger.L) } type NumberRange struct { min uint32 max uint32 } // collapse events, all start time put in one place, all end time put in one place type FlattenEvents struct { start []time.Time stop []time.Time } type SingleEvent struct { start time.Time stop time.Time } type JobCalendarData struct { flattenEvents FlattenEvents events map[time.Weekday][]SingleEvent rawData ConfigCalendar rescheduleChannel chan<- struct{} log *logger.L } type TimeData struct { hour, minute uint32 } func newJobCalendar(channel chan<- struct{}) JobCalendar { return &JobCalendarData{ flattenEvents: FlattenEvents{ start: []time.Time{}, stop: []time.Time{}, }, events: map[time.Weekday][]SingleEvent{ time.Sunday: {}, time.Monday: {}, time.Tuesday: {}, time.Wednesday: {}, time.Thursday: {}, time.Friday: {}, time.Saturday: {}, }, rawData: ConfigCalendar{}, rescheduleChannel: channel, } } func (j *JobCalendarData) newEmptyFlattenEvents() FlattenEvents { return FlattenEvents{ start: []time.Time{}, stop: []time.Time{}, } } func (j *JobCalendarData) newEmptyEvents() map[time.Weekday][]SingleEvent { return map[time.Weekday][]SingleEvent{ time.Sunday: {}, time.Monday: {}, time.Tuesday: {}, time.Wednesday: {}, time.Thursday: {}, time.Friday: {}, time.Saturday: {}, } } func (j *JobCalendarData) SetLog(l *logger.L) { j.log = l } func (j *JobCalendarData) RunForever() bool { return 0 == len(j.flattenEvents.stop) } func (j *JobCalendarData) Refresh(calendar ConfigCalendar) { j.log.Debug("refresh calendar") if !j.isSameCalendar(calendar) { j.log.Debug("calendar change") j.setNewCalendar(calendar) j.resetEvents() j.parseRawData(calendar) j.removeRedundantStopEvent() j.printEvents() j.notifyJobManager() } } func (j *JobCalendarData) resetEvents() { j.flattenEvents = j.newEmptyFlattenEvents() j.events = j.newEmptyEvents() } func (j *JobCalendarData) notifyJobManager() { j.log.Debug("notify manager for new calendar settings...") j.rescheduleChannel <- struct{}{} } func (j *JobCalendarData) setNewCalendar(calendar ConfigCalendar) { j.rawData = calendar } func (j *JobCalendarData) parseRawData(calendar ConfigCalendar) { j.convertWeekScheduleToEvents() j.sortFlattenEventsFromEarlier2Later() } func (j *JobCalendarData) removeRedundantStopEvent() { j.log.Debug("removing redundant events...") start := j.flattenEvents.start stop := j.flattenEvents.stop redundantIdx := make([]bool, len(j.flattenEvents.stop)) loop: for i, k := 0, 0; i < len(start) && k < len(stop); { if start[i].Equal(stop[k]) { j.log.Debugf("%+v stop event is redundant", j.flattenEvents.stop[k]) redundantIdx[k] = true i++ k++ continue loop } if start[i].After(stop[k]) { k++ } else { i++ } } newSlice := make([]time.Time, 0, len(j.flattenEvents.stop)) for i := 0; i < len(redundantIdx); i++ { if !redundantIdx[i] { newSlice = append(newSlice, stop[i]) } } j.flattenEvents.stop = newSlice } func isEventAlreadyExist(times []time.Time, event time.Time) (bool, int) { if 0 == len(times) { return false, defaultIndex } for i, v := range times { if v == event { return true, i } } return false, defaultIndex } func isSameTime(t1 TimeData, t2 TimeData) bool { if t1.hour == t2.hour && t1.minute == t2.minute { return true } return false } func isTimeDataFirstEarlierThanSecond(first TimeData, second TimeData) bool { if first.hour < second.hour { return true } if first.minute < second.minute { return true } return false } func (j *JobCalendarData) isTimeBooked(event time.Time) bool { weekDay := event.Weekday() events := j.events[weekDay] for _, t := range events { afterOrEqualToStartTime := t.start.Before(event) || t.start.Equal(event) beforeOrEqualToEndTime := t.stop.After(event) || t.stop.Equal(event) if afterOrEqualToStartTime && beforeOrEqualToEndTime { return true } if events[0].stop.IsZero() && afterOrEqualToStartTime { return true } } return false } func (j *JobCalendarData) PickInitialiseStartEvent(event time.Time) interface{} { if j.isTimeBooked(event) { j.log.Debugf("working time, start after %s", delayOfStartStop.String()) return event.Add(delayOfStartStop) } return j.PickNextStartEvent(event) } func (j *JobCalendarData) PickInitialiseStopEvent(event time.Time) interface{} { if j.isTimeBooked(event) { return j.PickNextStopEvent(event) } j.log.Debugf("not working time, stop after %s", delayOfStartStop.String()) return event.Add(delayOfStartStop) } func (j *JobCalendarData) PickNextStartEvent(event time.Time) interface{} { for _, e := range j.flattenEvents.start { if e.After(event) { j.log.Infof("next start event at %s", e) return e } } j.log.Error("cannot find next start event") j.printEvents() return nil } func (j *JobCalendarData) PickNextStopEvent(event time.Time) interface{} { for _, e := range j.flattenEvents.stop { if e.After(event) { j.log.Infof("next stop event at %s", e) return e } } j.log.Info("cannot find next stop event") j.printEvents() return nil } func (j *JobCalendarData) RescheduleStartEventsPrior(event time.Time) { if 0 == len(j.flattenEvents.start) || j.flattenEvents.start[0].After(event) { return } times := j.flattenEvents.start newSlices := make([]time.Time, 0, len(times)) schedules := make([]time.Time, 0, len(times)) loop: for i, t := range times { if t.Before(event) || t.Equal(event) { schedules = append(schedules, t.Add(oneWeekDuration)) } else { newSlices = append(newSlices, times[i:]...) break loop } } newSlices = append(newSlices, schedules...) j.flattenEvents.start = newSlices } func (j *JobCalendarData) RescheduleStopEventsPrior(event time.Time) { if 0 == len(j.flattenEvents.stop) || j.flattenEvents.stop[0].After(event) { return } times := j.flattenEvents.stop newSlices := make([]time.Time, 0, len(times)) schedules := make([]time.Time, 0, len(times)) loop: for i, t := range times { if t.Before(event) || t.Equal(event) { schedules = append(schedules, t.Add(oneWeekDuration)) } else { newSlices = append(newSlices, times[i:]...) break loop } } newSlices = append(newSlices, schedules...) j.flattenEvents.stop = newSlices } func (j *JobCalendarData) removeEventFrom(times []time.Time, event time.Time) ([]time.Time, error) { exist, idx := isEventAlreadyExist(times, event) if !exist { return times, nil } return append(times[:idx], times[idx+1:]...), nil } func (j *JobCalendarData) weekDayCurrent2Target(current time.Weekday, target time.Weekday) int { return int(target) - int(current) } func (j *JobCalendarData) parseClockStr(clock string) (TimeData, error) { if clock == "24:00" || clock == "24:0" { return TimeData{ hour: uint32(24), minute: uint32(0), }, nil } t, err := time.Parse("15:04", clock) if nil != err { j.log.Errorf("%s\n", err.Error()) return TimeData{}, err } return TimeData{ hour: uint32(t.Hour()), minute: uint32(t.Minute()), }, nil } func (j *JobCalendarData) convertStr2NumberWithLimit(str string, numRange NumberRange) (uint32, error) { num, err := strconv.Atoi(str) if err != nil || uint32(num) < numRange.min || uint32(num) > numRange.max { return defaultNum, fmt.Errorf(defaultTimeStrErrorMsg) } return uint32(num), nil } // period: 2:12 - 3:14 // clock: 2:12 func (j *JobCalendarData) parseTimePeriod(period string) (TimeData, TimeData, error) { str := strings.ReplaceAll(period, spaceChar, "") clocks := strings.Split(str, clockSeparator) if len(clocks) > 2 { return TimeData{}, TimeData{}, fmt.Errorf(defaultTimePeriodErrorMsg) } timeFirst, err := j.parseClockStr(clocks[0]) if nil != err { return TimeData{}, TimeData{}, fmt.Errorf(defaultTimePeriodErrorMsg) } timeSecond, err := j.parseClockStr(clocks[1]) if nil != err { return TimeData{}, TimeData{}, fmt.Errorf(defaultTimePeriodErrorMsg) } if isTimeDataFirstEarlierThanSecond(timeFirst, timeSecond) { return timeFirst, timeSecond, nil } return timeSecond, timeFirst, nil } func (j *JobCalendarData) timeByWeekdayAndOffset(day time.Weekday, clock TimeData) time.Time { now := time.Now() dayDiffNum := j.weekDayCurrent2Target(now.Weekday(), day) return time.Date(now.Year(), now.Month(), now.Day()+dayDiffNum, int(clock.hour), int(clock.minute), 0, 0, now.Location()) } func (j *JobCalendarData) timeOfWeekdayStartFromBeginning(day time.Weekday) FlattenEvents { flattenEvents := FlattenEvents{ start: []time.Time{j.timeByWeekdayAndOffset(day, TimeData{hour: 0, minute: 0})}, stop: []time.Time{}, } return flattenEvents } func (j *JobCalendarData) sortFlattenEventsFromEarlier2Later() { j.log.Debug("sort events") events := j.flattenEvents sort.Slice(events.start, func(i, j int) bool { return events.start[i].Before(events.start[j]) }) sort.Slice(events.stop, func(i, j int) bool { return events.stop[i].Before(events.stop[j]) }) } // TODO: refactor to use for loop, currently no idea how to use code for // time.Sunday & time.rawData.Sunday func (j *JobCalendarData) convertWeekScheduleToEvents() { j.convertDayScheduleToEvents(time.Sunday, j.rawData.Sunday) j.convertDayScheduleToEvents(time.Monday, j.rawData.Monday) j.convertDayScheduleToEvents(time.Tuesday, j.rawData.Tuesday) j.convertDayScheduleToEvents(time.Wednesday, j.rawData.Wednesday) j.convertDayScheduleToEvents(time.Thursday, j.rawData.Thursday) j.convertDayScheduleToEvents(time.Friday, j.rawData.Friday) j.convertDayScheduleToEvents(time.Saturday, j.rawData.Saturday) } func (j *JobCalendarData) convertDayScheduleToEvents(day time.Weekday, clock string) { if allDayClockStr == strings.Trim(clock, spaceChar) { j.log.Debugf("%s work all day", day.String()) j.scheduleStartEventWhenDayBegin(day) return } j.scheduleEvents(day, clock) } func containsLetter(s string) bool { for _, c := range s { if unicode.IsLetter(c) { return true } } return false } func (j *JobCalendarData) isValidPeriod(str string) bool { s := strings.Split(str, clockSeparator) if len(s) != 2 { j.log.Errorf("invalid caledar string %s, contains too many clock string", str) return false } t1 := strings.Trim(s[0], spaceChar) t2 := strings.Trim(s[1], spaceChar) if t1 == t2 { j.log.Errorf("invalid caledar string %s, 2 clock strings equal", str) return false } if containsLetter(t1) || containsLetter(t2) { j.log.Errorf("invalid caledar string %s, contains letter", str) return false } return true } func (j *JobCalendarData) scheduleEvents(day time.Weekday, clock string) { periods := strings.Split(clock, timePeriodSeparator) events := make([]SingleEvent, 0) flattenEvents := FlattenEvents{ start: []time.Time{}, stop: []time.Time{}, } loop: for _, period := range periods { if !j.isValidPeriod(period) { continue loop } t1, t2, err := j.parseTimePeriod(period) if nil != err { j.log.Errorf("error parse time period %s, error: %s", period, err) continue loop } events = append(events, SingleEvent{ start: j.timeByWeekdayAndOffset(day, t1), stop: j.timeByWeekdayAndOffset(day, t2), }) flattenEvents.start = append( flattenEvents.start, j.timeByWeekdayAndOffset(day, t1), ) flattenEvents.stop = append( flattenEvents.stop, j.timeByWeekdayAndOffset(day, t2), ) } if 0 == len(flattenEvents.start) { j.log.Debugf("empty flatten start event, add start event to day start") j.scheduleStartEventWhenDayBegin(day) return } j.events[day] = events j.flattenEvents.start = append(j.flattenEvents.start, flattenEvents.start...) j.flattenEvents.stop = append(j.flattenEvents.stop, flattenEvents.stop...) } func (j *JobCalendarData) scheduleStartEventWhenDayBegin(day time.Weekday) { flattenEvent := j.timeOfWeekdayStartFromBeginning(day) j.flattenEvents.start = append(j.flattenEvents.start, flattenEvent.start[0]) j.events[day] = []SingleEvent{ {start: flattenEvent.start[0]}, } } func (j *JobCalendarData) isSameCalendar(new ConfigCalendar) bool { equal := reflect.DeepEqual(j.rawData, new) if !equal { j.log.Debug("config changed") j.log.Debugf("previous: %+v", j.rawData) j.log.Debugf("new: %+v", new) } return equal } func (j *JobCalendarData) printEvents() { weekdays := []time.Weekday{ time.Sunday, time.Monday, time.Tuesday, time.Wednesday, time.Thursday, time.Friday, time.Saturday, } for _, d := range weekdays { j.log.Debugf("%d start flattenEvents: %+v", len(j.flattenEvents.start), j.flattenEvents.start, ) j.log.Debugf("%d stop flattenEvents: %+v", len(j.flattenEvents.stop), j.flattenEvents.stop, ) j.log.Debugf("%d events on %s", len(j.events[d]), d.String()) for _, e := range j.events[d] { if e.stop.IsZero() { j.log.Debugf("%s: start: %s", d.String(), e.start, ) } else { j.log.Debugf("%s: start: %s, end: %s", d.String(), e.start, e.stop, ) } } } }
package lc // Time: O(n) // Benchmark: 0ms 2.0mb | 100% func checkRecord(s string) bool { var aCount int for i, ch := range s { if ch == 'A' { aCount++ if aCount > 1 { return false } } else if ch == 'L' { if i+2 < len(s) && s[i+1] == 'L' && s[i+2] == 'L' { return false } } } return true }
package main import ( "fmt" "time" ) func hasPathCore(matrix [][]int, col, row int, str string, pathLength *int, visited []bool) bool { if len(matrix) == 0 { return false } rows, cols := len(matrix), len(matrix[0]) } func main() { }
package cmd import ( "fmt" "os" "github.com/KKKKjl/gosfs/master" "github.com/KKKKjl/gosfs/storage" "github.com/spf13/cobra" ) func init() { rootCmd.AddCommand(masterCmd) rootCmd.AddCommand(storageCmd) } var rootCmd = &cobra.Command{ Use: "gosfs", Long: ` ______ ______ ______ ______ ______ /\ ___\/\ __ \/\ ___\/\ ___\/\ ___\ \ \ \__ \ \ \/\ \ \___ \ \ __\\ \___ \ \ \_____\ \_____\/\_____\ \_\ \/\_____\ \/_____/\/_____/\/_____/\/_/ \/_____/ `, } func Execute() { if err := rootCmd.Execute(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } // 启动主节点命令 var masterCmd = &cobra.Command{ Use: "master", Short: "start the master server", Run: func(cmd *cobra.Command, args []string) { fmt.Print(args) m := master.NewApiGatewayServer() m.Start() }, } // 启动储存节点命令 var storageCmd = &cobra.Command{ Use: "storage", Short: "start the storage server", Run: func(cmd *cobra.Command, args []string) { // server.Register() storageServer := storage.NewStorageServer() storageServer.StartStorageServer() }, }
package main func main() { // chapter1.Run() return }
package cli import ( "context" "fmt" "os" "path/filepath" "strings" "time" "github.com/fatih/color" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/tilt-dev/go-get" "github.com/tilt-dev/tilt/internal/analytics" "github.com/tilt-dev/tilt/internal/cli/demo" "github.com/tilt-dev/tilt/pkg/logger" "github.com/tilt-dev/tilt/pkg/model" ) const demoResourcesPrefix = "tilt-demo-" const sampleProjPackage = "github.com/tilt-dev/tilt-avatars" type demoCmd struct { // legacy disables the web UI (this is only used for integration tests) legacy bool // teardown will clean up any leftover `tilt demo` clusters and exit teardown bool // tmpdir for cloned `tilt-avatars` resources tmpdir string // skipCreateCluster uses default kubeconfig context instead of creating // an ephemeral cluster skipCreateCluster bool // projPackage is the `go get` style URL for the demo project projPackage string // tiltfilePath is a path to a Tiltfile to launch instead of cloning and // running the `tilt-avatars` project tiltfilePath string } func (c *demoCmd) name() model.TiltSubcommand { return "demo" } func (c *demoCmd) register() *cobra.Command { cmd := &cobra.Command{ Use: "demo [flags]", Short: "Creates a local, temporary Kubernetes cluster and runs a Tilt sample project", Long: fmt.Sprintf(`Test out Tilt using an isolated, ephemeral local Kubernetes setup. Tilt will create a temporary, local Kubernetes development cluster running in Docker. The cluster will be removed when Tilt is exited with Ctrl-C. A sample project (%s) will be cloned locally to a temporary directory using Git and launched. `, sampleProjPackage), } cmd.Flags().BoolVarP(&c.teardown, "teardown", "", false, "Removes any leftover tilt-demo Kubernetes clusters and exits") // --legacy flag only exists for integration tests to disable web console cmd.Flags().BoolVar(&c.legacy, "legacy", false, "If true, tilt will open in legacy HUD mode.") cmd.Flags().Lookup("legacy").Hidden = true // --tmpdir exists so that integration tests can inspect the output / use the Tiltfile cmd.Flags().StringVarP(&c.tmpdir, "tmpdir", "", "", "Temporary directory to clone sample project to") cmd.Flags().Lookup("tmpdir").Hidden = true cmd.Flags().BoolVar(&c.skipCreateCluster, "no-cluster", false, "Skip ephemeral cluster creation (requires local K8s cluster to already be configured)") cmd.Flags().StringVarP(&c.projPackage, "repo", "r", sampleProjPackage, "Path to custom repo to use instead of Tiltfile") // we don't use the `addTiltfileFlag()` because the default here should be empty cmd.Flags().StringVarP(&c.tiltfilePath, "file", "f", "", "Path to custom Tiltfile to use instead of sample project") addStartServerFlags(cmd) addDevServerFlags(cmd) return cmd } func (c *demoCmd) run(ctx context.Context, args []string) error { a := analytics.Get(ctx) a.Incr("cmd.demo", map[string]string{}) defer a.Flush(time.Second) client, err := wireDockerLocalClient(ctx) if err != nil { return errors.Wrap(err, "Failed to init Docker client") } k3dCli := demo.NewK3dClient(client) if c.teardown { return c.cleanupClusters(ctx, k3dCli) } if c.projPackage != sampleProjPackage && c.tiltfilePath != "" { return fmt.Errorf("cannot specify both a custom repo and Tiltfile path") } // // 0. Prepare environment // logger.Get(ctx).Infof("\nHang tight while Tilt prepares your demo environment!") c.tmpdir, err = os.MkdirTemp(c.tmpdir, demoResourcesPrefix) if err != nil { return fmt.Errorf("could not create temporary directory: %v", err) } if !c.skipCreateCluster { err = client.CheckConnected() if err != nil { return fmt.Errorf("tilt demo requires Docker to be installed and running: %v", err) } if !isLocalDockerHost(client.Env().DaemonHost()) { // properly supporting remote Docker connections is very tricky - either: // // the remote host will need more ports accessible (for K8s API + registry API) and we have to ensure // that everything both listens on the public interface and references it in configs // (such as "local-registry-hosting" ConfigMap) // OR // we need to tunnel everything (perhaps using Docker - this is the approach ctlptl takes!) // // for now, it's not supported as it's a pretty advanced setup to begin with, so we're not really targeting // it with the `tilt demo` functionality return fmt.Errorf("tilt demo requires a local Docker daemon to create a temporary Kubernetes cluster (current Docker host: %s)", client.Env().DaemonHost()) } // // 1. Create a cluster that will be torn down in the background on exit (Ctrl-C) // clusterName := filepath.Base(c.tmpdir) logger.Get(ctx).Infof("\tCreating %q local Kubernetes cluster...", clusterName) if err := k3dCli.CreateCluster(ctx, clusterName); err != nil { return fmt.Errorf("failed to create Kubernetes cluster: %v", err) } defer func() { // N.B. use background context because the main context has already been canceled due to Ctrl-C // but also don't block on execution (just fire request to Docker API and forget) because at this // point we have < 2 secs before the signal handler forcibly exits the process ctx := logger.WithLogger(context.Background(), logger.Get(ctx)) logger.Get(ctx).Infof("\nDeleting %q local Kubernetes cluster...", clusterName) if err = k3dCli.DeleteCluster(ctx, clusterName, false); err != nil { logger.Get(ctx).Warnf("\tFailed to delete cluster %q: %v", clusterName, err) } }() // // 2. Use the new cluster's kubeconfig for this Tilt process // if kubeconfig, err := k3dCli.GenerateKubeconfig(ctx, clusterName); err != nil { return fmt.Errorf("failed to generate kubeconfig: %v", err) } else { kubeconfigPath := filepath.Join(c.tmpdir, "kubeconfig") if err := os.WriteFile(kubeconfigPath, kubeconfig, 0666); err != nil { return fmt.Errorf("failed to write kubeconfig file: %v", err) } err = os.Setenv("KUBECONFIG", kubeconfigPath) if err != nil { return fmt.Errorf("failed to set KUBECONFIG env var: %v", err) } } } // // 3. Download the sample project to a tmpdir // var projPath string if c.tiltfilePath == "" { logger.Get(ctx).Infof("\tFetching %q project...", c.projPackage) dlr := get.NewDownloader(c.tmpdir) projPath, err = dlr.Download(c.projPackage) if err != nil { return fmt.Errorf("failed to download sample project: %v", err) } c.tiltfilePath = filepath.Join(projPath, "Tiltfile") } logger.Get(ctx).Infof("\tDone!") if projPath != "" { logger.Get(ctx).Infof( ` ----------------------------------------------------- Open the project directory in your preferred editor: %s ----------------------------------------------------- `, color.BlueString("%s", projPath)) } // // 4. Launch the `tilt up` command with the sample project // (it will implicitly use our kubeconfig) // up := upCmd{ fileName: c.tiltfilePath, legacy: c.legacy, stream: false, } return up.run(ctx, args) } func (c *demoCmd) cleanupClusters(ctx context.Context, k3dCli *demo.K3dClient) error { clusterNames, err := k3dCli.ListClusters(ctx) if err != nil { return err } failed := false for _, clusterName := range clusterNames { if strings.HasPrefix(clusterName, demoResourcesPrefix) { logger.Get(ctx).Infof("Removing cluster %q...", clusterName) if err := k3dCli.DeleteCluster(ctx, clusterName, true); err != nil { failed = true logger.Get(ctx).Errorf("Failed to remove %q cluster: %v", clusterName, err) } } } if failed { return errors.New("could not remove one or more tilt-demo K8s clusters") } return nil } // TODO(milas): this is copy-pasted from ctlptl, use it from a common place func isLocalDockerHost(dockerHost string) bool { return dockerHost == "" || // Check all the "standard" docker localhosts. // https://github.com/docker/cli/blob/a32cd16160f1b41c1c4ae7bee4dac929d1484e59/opts/hosts.go#L22 strings.HasPrefix(dockerHost, "tcp://localhost:") || strings.HasPrefix(dockerHost, "tcp://127.0.0.1:") || // https://github.com/moby/moby/blob/master/client/client_windows.go#L4 strings.HasPrefix(dockerHost, "npipe:") || // https://github.com/moby/moby/blob/master/client/client_unix.go#L6 strings.HasPrefix(dockerHost, "unix:") }
package resolver_test import ( "strings" "testing" "github.com/miekg/dns" "github.com/ooni/probe-cli/v3/internal/engine/netx/resolver" ) func TestDecoderUnpackError(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode(dns.TypeA, nil) if err == nil { t.Fatal("expected an error here") } if data != nil { t.Fatal("expected nil data here") } } func TestDecoderNXDOMAIN(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode(dns.TypeA, resolver.GenReplyError(t, dns.RcodeNameError)) if err == nil || !strings.HasSuffix(err.Error(), "no such host") { t.Fatal("not the error we expected") } if data != nil { t.Fatal("expected nil data here") } } func TestDecoderOtherError(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode(dns.TypeA, resolver.GenReplyError(t, dns.RcodeRefused)) if err == nil || !strings.HasSuffix(err.Error(), "query failed") { t.Fatal("not the error we expected") } if data != nil { t.Fatal("expected nil data here") } } func TestDecoderNoAddress(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode(dns.TypeA, resolver.GenReplySuccess(t, dns.TypeA)) if err == nil || !strings.HasSuffix(err.Error(), "no response returned") { t.Fatal("not the error we expected") } if data != nil { t.Fatal("expected nil data here") } } func TestDecoderDecodeA(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode( dns.TypeA, resolver.GenReplySuccess(t, dns.TypeA, "1.1.1.1", "8.8.8.8")) if err != nil { t.Fatal(err) } if len(data) != 2 { t.Fatal("expected two entries here") } if data[0] != "1.1.1.1" { t.Fatal("invalid first IPv4 entry") } if data[1] != "8.8.8.8" { t.Fatal("invalid second IPv4 entry") } } func TestDecoderDecodeAAAA(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode( dns.TypeAAAA, resolver.GenReplySuccess(t, dns.TypeAAAA, "::1", "fe80::1")) if err != nil { t.Fatal(err) } if len(data) != 2 { t.Fatal("expected two entries here") } if data[0] != "::1" { t.Fatal("invalid first IPv6 entry") } if data[1] != "fe80::1" { t.Fatal("invalid second IPv6 entry") } } func TestDecoderUnexpectedAReply(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode( dns.TypeA, resolver.GenReplySuccess(t, dns.TypeAAAA, "::1", "fe80::1")) if err == nil || !strings.HasSuffix(err.Error(), "no response returned") { t.Fatal("not the error we expected") } if data != nil { t.Fatal("expected nil data here") } } func TestDecoderUnexpectedAAAAReply(t *testing.T) { d := resolver.MiekgDecoder{} data, err := d.Decode( dns.TypeAAAA, resolver.GenReplySuccess(t, dns.TypeA, "1.1.1.1", "8.8.4.4.")) if err == nil || !strings.HasSuffix(err.Error(), "no response returned") { t.Fatal("not the error we expected") } if data != nil { t.Fatal("expected nil data here") } }
package redpack import ( "gylib/weixinsdk/wxpay" "fmt" "gylib/common" ) type WxHongBao struct { AppId string // 微信公众平台应用ID MchId string // 微信支付商户平台商户号 ApiKey string // 微信支付商户平台API密钥 // 微信支付商户平台证书路径 CertFile string KeyFile string RootcaFile string Wxuser map[string]string } func NewWxHongBao(wxuserdata map[string]string) (*WxHongBao) { this := new(WxHongBao) this.Wxuser=make(map[string]string) this.Wxuser = wxuserdata this.AppId = this.Wxuser["appid"] this.ApiKey = this.Wxuser["apipass"] this.MchId = this.Wxuser["mchid"] this.CertFile = this.Wxuser["certfile"] this.KeyFile = this.Wxuser["keyfile"] this.RootcaFile = this.Wxuser["rootca"] return this } func (this *WxHongBao) Send_Pay(data map[string]interface{}) (bool) { c := wxpay.NewClient(this.AppId, this.MchId, this.ApiKey) // 附着商户证书 err := c.WithCert(this.CertFile, this.KeyFile, this.RootcaFile) if err != nil { fmt.Println(err) return false } params := make(wxpay.Params) for key, v := range data { switch v.(type) { case string: params.SetString(key, v.(string)) case int64: params.SetInt64(key, v.(int64)) } } // 查询企业付款接口请求参数 params.SetString("mch_appid", c.AppId); params.SetString("mchid", c.MchId) params.SetString("nonce_str", common.RandomStr(32)) // 随机字符串 params.SetString("sign", c.Sign(params)) // 签名 // 查询企业付款接口请求URL url := "https://api.mch.weixin.qq.com/mmpaymkttransfers/promotion/transfers" ret, err := c.Post(url, params, true) if err != nil { return false } else { if (ret["return_code"] == "SUCCESS") { return true } else { return false } } } func (this *WxHongBao) Send_Redpack(data map[string]interface{}) (bool) { c := wxpay.NewClient(this.AppId, this.MchId, this.ApiKey) // 附着商户证书 err := c.WithCert(this.CertFile, this.KeyFile, this.RootcaFile) if err != nil { fmt.Println(err) return false } params := make(wxpay.Params) for key, v := range data { switch v.(type) { case string: params.SetString(key, v.(string)) case int64: params.SetInt64(key, v.(int64)) } } // 查询企业付款接口请求参数 params.SetString("mch_appid", c.AppId); params.SetString("mchid", c.MchId) params.SetString("nonce_str", common.RandomStr(32)) // 随机字符串 params.SetString("sign", c.Sign(params)) // 签名 // 查询企业付款接口请求URL url := "https://api.mch.weixin.qq.com/mmpaymkttransfers/sendredpack" ret, err := c.Post(url, params, true) if err != nil { return false } else { if (ret["return_code"] == "SUCCESS") { return true } else { return false } } }
package salia const ( HeartBeat = "salia/heartbeat" ChargeMode = "salia/chargemode" PauseCharging = "salia/pausecharging" GridCurrentLimit = "grid_current_limit" ) type Api struct { Device struct { ModelName string SoftwareVersion string `json:"software_version"` } Secc Secc } type Secc struct { Port0 Port } type Port struct { Ci struct { Evse struct { Basic struct { OfferedCurrentLimit float64 `json:"offered_current_limit,string"` } } Charge struct { Cp struct { Status string } } } Salia struct { ChargeMode string PauseCharging int `json:"pausecharging,string"` } Metering struct { Meter struct { Type string Available int `json:",string"` } Power struct { ActiveTotal struct { Actual float64 `json:",string"` } `json:"active_total"` Active struct { AC struct { L1 struct { Actual float64 `json:",string"` } L2 struct { Actual float64 `json:",string"` } L3 struct { Actual float64 `json:",string"` } } } } Energy struct { ActiveImport struct { Actual float64 `json:",string"` } `json:"active_import"` } Current struct { AC struct { L1 struct { Actual float64 `json:",string"` } L2 struct { Actual float64 `json:",string"` } L3 struct { Actual float64 `json:",string"` } } } } EvPresent int `json:"ev_present,string"` Charging int `json:",string"` GridCurrentLimit int `json:"grid_current_limit,string"` }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "github.com/google/gapid/core/event" "github.com/google/gapid/core/net/grpcutil" "github.com/google/gapid/core/os/device" "github.com/google/gapid/test/robot/job" "github.com/google/gapid/test/robot/job/worker" "github.com/google/gapid/test/robot/search" "google.golang.org/grpc" ) type remote struct { client ServiceClient } // NewRemote returns a Worker that talks to a remote grpc trace service. func NewRemote(ctx context.Context, conn *grpc.ClientConn) Manager { return &remote{ client: NewServiceClient(conn), } } // Search implements Manager.Search // It forwards the call through grpc to the remote implementation. func (m *remote) Search(ctx context.Context, query *search.Query, handler ActionHandler) error { stream, err := m.client.Search(ctx, query) if err != nil { return err } return event.Feed(ctx, event.AsHandler(ctx, handler), grpcutil.ToProducer(stream)) } // Register implements Manager.Register // It forwards the call through grpc to the remote implementation. func (m *remote) Register(ctx context.Context, host *device.Instance, target *device.Instance, handler TaskHandler) error { request := &worker.RegisterRequest{Host: host, Target: target} stream, err := m.client.Register(ctx, request) if err != nil { return err } return event.Feed(ctx, event.AsHandler(ctx, handler), grpcutil.ToProducer(stream)) } // Do implements Manager.Do // It forwards the call through grpc to the remote implementation. func (m *remote) Do(ctx context.Context, device string, input *Input) (string, error) { response, err := m.client.Do(ctx, &DoRequest{Device: device, Input: input}) return response.Id, err } // Update implements Manager.Update // It forwards the call through grpc to the remote implementation. func (m *remote) Update(ctx context.Context, action string, status job.Status, output *Output) error { request := &UpdateRequest{Action: action, Status: status, Output: output} _, err := m.client.Update(ctx, request) return err }
package utils import ( "strings" "github.com/jerolan/slack-poll/domain/entity" ) func ConvertCommandToPoll(command string) (poll entity.Poll) { sanitizedCommand := removeDoubleCuotes(command) poll.Mode = entity.PollModeSingle if strings.Contains(sanitizedCommand, "-m") { poll.Mode = entity.PollModeMultiple } var options []string dirtyOptions := strings.Split(strings.ReplaceAll(sanitizedCommand, "-m", ""), `"`) for _, option := range dirtyOptions { pseudoOption := strings.Trim(option, " ") if pseudoOption != "" { options = append(options, pseudoOption) } } poll.Options = options[2:] poll.Question = options[1] return poll } func removeDoubleCuotes(str string) string { clean := strings.ReplaceAll(str, "\u201D", `"`) clean = strings.ReplaceAll(clean, "\u201C", `"`) return clean }
package main import ( "fmt" "io/ioutil" "log" "net/http" ) func homePage(w http.ResponseWriter, r *http.Request) { fmt.Println("Received home page request") fmt.Fprintf(w, "Test Go web app.") } func askRust(w http.ResponseWriter, r *http.Request) { fmt.Println("Received ask request") resp, err := http.Get("http://127.0.0.1:3001") if err != nil { log.Fatalln(err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Fatalln(err) } fmt.Fprintf(w, "Test Go web app asks Rust web app: %s", body) } func setupRoutes() { http.HandleFunc("/", homePage) http.HandleFunc("/ask", askRust) } func main() { fmt.Println("Go web app started on port 3000") setupRoutes() http.ListenAndServe(":3000", nil) }
package model import "strings" type BuildReason int const BuildReasonNone = BuildReason(0) const ( BuildReasonFlagChangedFiles BuildReason = 1 << iota BuildReasonFlagConfig // NOTE(nick): In live-update-v1, if a container had live-updated changed, // then crashed, we would automatically replace it with a fresh image. // This approach was difficult to reason about and sometimes led to infinite loops. // Users complained that it was too aggressive about doing an image build. // // In live-update-v2, the reconciler keeps track of how to bring crashing // containers up to date. Instead, we only kick off fresh image builds // if there's a new file change / trigger but the container has been // marked unrecoverable. So this build reason is obsolete. BuildReasonFlagCrashDeprecated BuildReasonFlagInit BuildReasonFlagTriggerWeb BuildReasonFlagTriggerCLI BuildReasonFlagTriggerHUD BuildReasonFlagTriggerUnknown // An external process called `tilt args` BuildReasonFlagTiltfileArgs // Suppose you have // manifestA with imageA depending on imageCommon // manifestB with imageB depending on imageCommon // // Building manifestA will mark imageB // with changed dependencies. BuildReasonFlagChangedDeps ) func (r BuildReason) With(flag BuildReason) BuildReason { return r | flag } func (r BuildReason) Has(flag BuildReason) bool { return r&flag == flag } func (r BuildReason) HasTrigger() bool { for _, v := range triggerBuildReasons { if r.Has(v) { return true } } return false } func (r BuildReason) WithoutTriggers() BuildReason { result := int(r) for _, v := range triggerBuildReasons { if r.Has(v) { result -= int(v) } } return BuildReason(result) } var translations = map[BuildReason]string{ BuildReasonFlagChangedFiles: "Changed Files", BuildReasonFlagConfig: "Config Changed", BuildReasonFlagCrashDeprecated: "Pod Crashed, Lost live_update Changes", BuildReasonFlagInit: "Initial Build", BuildReasonFlagTriggerWeb: "Web Trigger", BuildReasonFlagTriggerCLI: "CLI Trigger", BuildReasonFlagTriggerHUD: "HUD Trigger", BuildReasonFlagTriggerUnknown: "Unknown Trigger", BuildReasonFlagTiltfileArgs: "Tilt Args", BuildReasonFlagChangedDeps: "Dependency Updated", } var triggerBuildReasons = []BuildReason{ BuildReasonFlagTriggerWeb, BuildReasonFlagTriggerCLI, BuildReasonFlagTriggerHUD, BuildReasonFlagTriggerUnknown, } var allBuildReasons = []BuildReason{ BuildReasonFlagInit, BuildReasonFlagChangedFiles, BuildReasonFlagConfig, BuildReasonFlagCrashDeprecated, BuildReasonFlagTriggerWeb, BuildReasonFlagTriggerCLI, BuildReasonFlagTriggerHUD, BuildReasonFlagChangedDeps, BuildReasonFlagTriggerUnknown, BuildReasonFlagTiltfileArgs, } func (r BuildReason) String() string { rs := []string{} // The trigger build reasons should never be used in conjunction with another // build reason, because it was explicitly specified by the user rather than implicit. for _, v := range triggerBuildReasons { if r.Has(v) { return translations[v] } } // The Init build reason should be listed alone too. if r.Has(BuildReasonFlagInit) { return translations[BuildReasonFlagInit] } // Use an array to iterate over the translations to ensure the iteration order // is consistent. for _, v := range allBuildReasons { if r.Has(v) { rs = append(rs, translations[v]) } } return strings.Join(rs, " | ") }
package main import ( "context" "fmt" healthz "google.golang.org/grpc/health/grpc_health_v1" "io" "log" "net" "strings" "testing" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" repos "rpcs.bts.org/service/repository-service" users "rpcs.bts.org/service/user-service" healthsvc "google.golang.org/grpc/health" ) func startTestServer() (*grpc.Server, *bufconn.Listener) { l := bufconn.Listen(10) s := grpc.NewServer() h := healthsvc.NewServer() registerServices(s, h) go func() { err := startServer(s, l) if err != nil { log.Fatal(err) } }() return s, l } func TestUserService(t *testing.T) { s, l := startTestServer() defer s.GracefulStop() bufconnDialer := func(ctx context.Context, addr string) (net.Conn, error) { return l.Dial() } client, err := grpc.DialContext( context.Background(), "", grpc.WithInsecure(), grpc.WithContextDialer(bufconnDialer), ) if err != nil { t.Fatal(err) } usersClient := users.NewUsersClient(client) resp, err := usersClient.GetUser( context.Background(), &users.UserGetRequest{ Email: "test@test.com", Id: "test-id", }, ) if err != nil { t.Fatal(err) } if resp.User.FirstName != "test" { t.Errorf("Expect FirstName to be: test, Got: %s", resp.User.FirstName) } } func TestRepoServiceGetRepos(t *testing.T) { s, l := startTestServer() defer s.GracefulStop() bufconnDialer := func(ctx context.Context, addr string) (net.Conn, error) { return l.Dial() } client, err := grpc.DialContext( context.Background(), "", grpc.WithInsecure(), grpc.WithContextDialer(bufconnDialer), ) if err != nil { t.Fatal(err) } repoClient := repos.NewRepoClient(client) stream, err := repoClient.GetRepos( context.Background(), &repos.RepoGetRequest{ CreatorId: "repo-user-1", Id: "repo-1", }, ) if err != nil { log.Fatal(err) } var reps []*repos.Repository for { repo, err := stream.Recv() if err == io.EOF { break } if err != nil { log.Fatal(err) } reps = append(reps, repo.Repo) } if len(reps) != 5 { t.Fatalf("Expected get 5 repos, but get %d", len(reps)) } for idx, repo := range reps { repoName := repo.Name expectedRepoName := fmt.Sprintf("repo-%d", idx+1) if repoName != expectedRepoName { t.Errorf("Expect repo name: %s, got: %s", expectedRepoName, repoName) } } } func TestRepoServiceCreateRepo(t *testing.T) { s, l := startTestServer() defer s.GracefulStop() bufconnDialer := func(ctx context.Context, addr string) (net.Conn, error) { return l.Dial() } client, err := grpc.DialContext( context.Background(), "", grpc.WithInsecure(), grpc.WithContextDialer(bufconnDialer), ) if err != nil { t.Fatal(err) } repoClient := repos.NewRepoClient(client) stream, err := repoClient.CreateRepo(context.Background()) if err != nil { t.Fatal("CreateRepo: ", err) } c := repos.RepoCreateRequest_Context{ Context: &repos.RepoContext{ CreatorId: "user-12", Name: "test-create-repo", }, } r := repos.RepoCreateRequest{ Body: &c, } err = stream.Send(&r) if err != nil { t.Fatal("Stream Send: ", err) } data := "arbi data bytes" repoData := strings.NewReader(data) for { b, err := repoData.ReadByte() if err == io.EOF { break } bData := repos.RepoCreateRequest_Data{ Data: []byte{b}, } r := repos.RepoCreateRequest{ Body: &bData, } err = stream.Send(&r) if err != nil { t.Fatal("Stream send: ", err) } //l.Close() } resp, err := stream.CloseAndRecv() if err != nil { t.Fatal("CloseAndRecv: ", err) } expectedSize := int32(len(data)) if resp.Size != expectedSize { t.Errorf("Expected repo create size: %d, Got: %d", expectedSize, resp.Size) } expectedRepoUrl := "https://rpcs.bts.org/user-12/test-create-repo" if resp.Repo.Url != expectedRepoUrl { t.Errorf("Expected repo url: %s, Got: %s", expectedRepoUrl, resp.Repo.Url) } } func getHealthClient(l *bufconn.Listener) (healthz.HealthClient, error) { bufconnDialer := func(ctx context.Context, addr string) (net.Conn, error) { return l.Dial() } client, err := grpc.DialContext( context.Background(), "", grpc.WithInsecure(), grpc.WithContextDialer(bufconnDialer), ) if err != nil { return nil, err } return healthz.NewHealthClient(client), nil } func TestHealthService(t *testing.T) { s, l := startTestServer() defer s.GracefulStop() healthClient, err := getHealthClient(l) if err != nil { t.Fatal(err) } resp, err := healthClient.Check( context.Background(), &healthz.HealthCheckRequest{}, ) if err != nil { t.Fatal(err) } serviceHealthStatus := resp.Status.String() if serviceHealthStatus != "SERVING" { t.Fatalf("Expected health: SERVING, Got: %s", serviceHealthStatus) } }
package main import ( "flag" "log" "os" "runtime" "runtime/pprof" ) var concurrency = flag.Int("concurrency", 100, "Number of concurrent files/goroutines") func main() { flag.Parse() f, err := os.Create("cprof") if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() runtime.GOMAXPROCS(runtime.NumCPU()) filename := flag.Arg(0) external := flag.Arg(1) makeTags(filename) if external != "" { calculateExternalDistances(*concurrency, external) } else { calculateDistances(*concurrency) } f, err = os.Create("mprof") if err != nil { log.Fatal(err) } pprof.WriteHeapProfile(f) f.Close() }
package main import ( "fmt" "net/http" "os" "strings" log "github.com/Sirupsen/logrus" "github.com/gorilla/context" "github.com/gorilla/sessions" "github.com/labstack/echo" "github.com/labstack/echo/engine/standard" "github.com/satori/go.uuid" "github.com/SUSE/stratos-ui/components/app-core/backend/config" "github.com/SUSE/stratos-ui/components/app-core/backend/repository/interfaces" ) const cfSessionCookieName = "JSESSIONID" func handleSessionError(err error) error { if strings.Contains(err.Error(), "dial tcp") { return interfaces.NewHTTPShadowError( http.StatusServiceUnavailable, "Service is currently unavailable", "Service is currently unavailable: %v", err, ) } return interfaces.NewHTTPShadowError( http.StatusUnauthorized, "User session could not be found", "User session could not be found: %v", err, ) } func (p *portalProxy) sessionMiddleware(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { log.Debug("sessionMiddleware") p.removeEmptyCookie(c) userID, err := p.GetSessionValue(c, "user_id") if err == nil { c.Set("user_id", userID) return h(c) } return handleSessionError(err) } } func sessionCleanupMiddleware(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { log.Debug("sessionCleanupMiddleware") err := h(c) req := c.Request().(*standard.Request).Request context.Clear(req) return err } } func (p *portalProxy) adminMiddleware(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { // if user is an admin, passthrough request // get the user guid userID, err := p.GetSessionValue(c, "user_id") if err == nil { // check their admin status in UAA u, err := p.getUAAUser(userID.(string)) if err != nil { return c.NoContent(http.StatusUnauthorized) } if u.Admin == true { return h(c) } } return handleSessionError(err) } } func errorLoggingMiddleware(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { log.Debug("errorLoggingMiddleware") err := h(c) if shadowError, ok := err.(interfaces.ErrHTTPShadow); ok { log.Error(shadowError.LogMessage) return shadowError.HTTPError } return err } } func retryAfterUpgradeMiddleware(h echo.HandlerFunc) echo.HandlerFunc { upgradeVolume, noUpgradeVolumeErr := config.GetValue(UpgradeVolume) upgradeLockFile, noUpgradeLockFileNameErr := config.GetValue(UpgradeLockFileName) // If any of those properties are not set, disable upgrade middleware if noUpgradeVolumeErr != nil || noUpgradeLockFileNameErr != nil { return func(c echo.Context) error { return h(c) } } return func(c echo.Context) error { if _, err := os.Stat(fmt.Sprintf("/%s/%s", upgradeVolume, upgradeLockFile)); err == nil { c.Response().Header().Add("Retry-After", "10") return c.NoContent(http.StatusServiceUnavailable) } return h(c) } } func (p *portalProxy) cloudFoundryMiddleware(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { // Check that we are on HTTPS - redirect if not if c.Request().Header().Contains("X-Forwarded-Proto") { proto := c.Request().Header().Get("X-Forwarded-Proto") if proto != "https" { redirect := fmt.Sprintf("https://%s%s", c.Request().Host(), c.Request().URI()) return c.Redirect(301, redirect) } return h(c) } return interfaces.NewHTTPShadowError( http.StatusBadRequest, "X-Forwarded-Proto not found and is required", "X-Forwarded-Proto not found and is required", ) } } // For cloud foundry session affinity // Ensure we add a cookie named "JSESSIONID" for Cloud Foundry session affinity func (p *portalProxy) cloudFoundrySessionMiddleware(h echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { // Make sure there is a JSESSIONID cookie set to the session ID session, err := p.GetSession(c) if err == nil { // We have a session guid, err := p.GetSessionValue(c, cfSessionCookieName) if err != nil || guid == nil { guid = uuid.NewV4().String() session.Values[cfSessionCookieName] = guid p.SaveSession(c, session) } sessionGUID := fmt.Sprintf("%s", guid) // Set the JSESSIONID coolie for Cloud Foundry session affinity w := c.Response().(*standard.Response).ResponseWriter cookie := sessions.NewCookie(cfSessionCookieName, sessionGUID, session.Options) http.SetCookie(w, cookie) } return h(c) } }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package hwsec import ( "context" "syscall" "chromiumos/tast/common/testexec" "chromiumos/tast/errors" ) // FakePCAAgent performs the execution and terminiation of the fake pca agent. type FakePCAAgent struct { cmd *testexec.Cmd } // FakePCAAgentContext creates a new FakePCAAgent where context is used when calling the commands. func FakePCAAgentContext(ctx context.Context) *FakePCAAgent { return &FakePCAAgent{testexec.CommandContext(ctx, "fake_pca_agentd")} } // Start starts running the fake pca agent. func (f *FakePCAAgent) Start() error { return f.cmd.Start() } // Stop signals the fake pca agent with SIGTERM as upstart does to daemons, and waits for its termination. func (f *FakePCAAgent) Stop() error { if err := f.cmd.Signal(syscall.SIGTERM); err != nil { return errors.Wrap(err, "failed signal the process") } if err := f.cmd.Wait(); err != nil { return errors.Wrap(err, "failed wait for shutdown") } return nil }
package main import ( "fmt" "net/http" "github.com/gorilla/mux" ) // make function to pass as value to HandleFunc handler parameter // use if statement in place of router. Setting up a router will require refactoring handlerFunc() into separate page functions. // Doing so is the first level of abstraction required for simplifying and separating repetitive tasks into individual functions. // func handlerFunc(w http.ResponseWriter, r *http.Request) { // w.Header().Set("Content-Type", "text/html") // if r.URL.Path == "/" { // fmt.Fprint(w, "<h1>Welcome to Beauty by Masayo!</h1>") // } else if r.URL.Path == "/about" { // fmt.Fprint(w, "<h1>About Beauty by Masayo!</h1>") // } else if r.URL.Path == "/contact" { // fmt.Fprint(w, "<h1>Contact Beauty by Masayo</h1>\n") // fmt.Fprint(w, "To get in touch, please contact <a href=\"mailto:findme@scottlaing.ca\">"+ // "info@beautybymasayo.com</a>.") // } else if r.URL.Path == "/workshops" { // fmt.Fprint(w, "<h1>Workshops by Beauty by Masayo!</h1>") // } else if r.URL.Path == "/headshots" { // fmt.Fprint(w, "<h1>Headshots by Beauty by Masayo!</h1>") // } else { // w.WriteHeader(http.StatusNotFound) // fmt.Fprint(w, "<h1>404 Error</h1>\n") // fmt.Fprint(w, "<h2>We could not find the page you were seeking.</h2> <p>Please contact us if you require further assistance.</p>") // } // } func home(w http.ResponseWriter, r *http.Request) { // set document type/header information w.Header().Set("Content-Type", "text/html") fmt.Fprint(w, "<h1>Welcome to Beauty by Masayo!</h1>") } func contact(w http.ResponseWriter, r *http.Request) { // set document type/header information w.Header().Set("Content-Type", "text/html") fmt.Fprint(w, "<h1>Contact Beauty by Masayo</h1>\n") fmt.Fprint(w, "To get in touch, please contact <a href=\"mailto:findme@scottlaing.ca\">"+ "info@beautybymasayo.com</a>.") } func about(w http.ResponseWriter, r *http.Request) { // set document type/header information w.Header().Set("Content-Type", "text/html") fmt.Fprint(w, "<h1>About Beauty by Masayo!</h1>") } func workshops(w http.ResponseWriter, r *http.Request) { // set document type/header information w.Header().Set("Content-Type", "text/html") fmt.Fprint(w, "<h1>Workshops by Beauty by Masayo!</h1>") } func headshots(w http.ResponseWriter, r *http.Request) { // set document type/header information w.Header().Set("Content-Type", "text/html") fmt.Fprint(w, "<h1>Headshots by Beauty by Masayo!</h1>") } // Since not using if statement, error-handling and generation of 404 page must be handled differently. // else { // w.WriteHeader(http.StatusNotFound) // fmt.Fprint(w, "<h1>404 Error</h1>\n") // fmt.Fprint(w, "<h2>We could not find the page you were seeking.</h2> <p>Please contact us if you require further assistance.</p>") // } // Since not using if statement, error-handling and generation of 404 page must be handled differently. // else { // w.WriteHeader(http.StatusNotFound) // fmt.Fprint(w, "<h1>404 Error</h1>\n") // fmt.Fprint(w, "<h2>We could not find the page you were seeking.</h2> <p>Please contact us if you require further assistance.</p>") // } func main() { // use gorilla mux router. using a router precludes need for if statement, as values passed to router parameters will dictate // which page will be rendered router := mux.NewRouter() // use HandleFunc to connection client, server, utilizing path and handler // http.HandleFunc("/", handlerFunc) // assign to router value HandleFunc + parameters for each page router.HandleFunc("/", home) router.HandleFunc("/contact", contact) router.HandleFunc("/about", about) router.HandleFunc("/workshops", workshops) router.HandleFunc("/headshots", headshots) // set address (port) and handler (default is nil) http.ListenAndServe(":8011", router) }
/** * @file data.go * @author malin * @mail malinbupt@163.com * @time Sun 09 Aug 2015 04:05:22 PM CST */ package resource import ( "bytes" "compress/gzip" "io/ioutil" aint "distributedcache/util/atomicint" ) // Resource is the resouce should be cached type Resource struct { value []byte miniteQps *aint.AtomicInt } func New(value []byte) *Resource { return &Resource{ value: value, miniteQps: aint.New(1), } } // Marshal compressed the resource by gzip // inorder to improve transform speed by network func Marshal(r *Resource) (value []byte, err error) { buf := bytes.NewBuffer([]byte{}) gWriter := gzip.NewWriter(buf) defer gWriter.Close() if _, err = gWriter.Write(r.value); err != nil { return } if _, err = gWriter.Write(r.miniteQps.Bytes()); err != nil { return } gWriter.Flush() return ioutil.ReadAll(buf) } func UnMarshal(value []byte) (r *Resource, err error) { buf := bytes.NewBuffer(value) gReader, err := gzip.NewReader(buf) if err != nil { return } defer gReader.Close() //TODO if process 'value' as follow, will get an 'unexpected EOF' error //if value, err = ioutil.ReadAll(gReader); err != nil { // return //} value, _ = ioutil.ReadAll(gReader) r = &Resource{ value: value[:len(value)-8], miniteQps: aint.NewFromBytes(value[len(value)-8:]), } return } func (r *Resource) GetValue() []byte { return r.value } // Add 'n' to resource's miniteQps func (r *Resource) Add(n uint64) { r.miniteQps.Add(n) } // Reset resource's miniteQps to zero func (r *Resource) Reset() { r.miniteQps.Reset() } func (r *Resource) GetQps() uint64 { return r.miniteQps.Get() }
package main import ( "fmt" "log" "net/http" ) var TAGS_URL = "http://localhost:1313/tlist.html" var CATEGORIES_URL = "http://localhost:1313/clist.html" func main() { hugoCompleter := HugoCompleter{ tagsUrl: TAGS_URL, categoriesUrl: CATEGORIES_URL, } http.HandleFunc("/tags", func(w http.ResponseWriter, r *http.Request) { log.Println("Got requests for fetching tags list") tags, err := hugoCompleter.GetTagsJson() if err != nil { log.Println("Error in tagsJson") } w.Write(tags) }) http.HandleFunc("/categories", func(w http.ResponseWriter, r *http.Request) { log.Println("Got requests for fetching categories list") categories, err := hugoCompleter.GetCategoriesJson() if err != nil { log.Println("Error in getCategoriesJson") } w.Write(categories) }) // listen to port addr := "localhost:5050" log.Println("Listing on 5050 port.") log.Println(banner(addr)) http.ListenAndServe(addr, nil) } func banner(addr string) string { fetchTags := addr + "/tags" fetchCategories := addr + "/categories" banner := "\n=========Usage=============\n" banner += fmt.Sprintf("Fetch Tags: %v \n", fetchTags) banner += fmt.Sprintf("Fetch Categories: %v\n", fetchCategories) banner += "==========================" return banner }
package main import ( "chi-rest/bootstrap" "chi-rest/lib/mysql" "chi-rest/lib/utils" "chi-rest/services/journeyplan" "fmt" "log" "os" "path/filepath" "runtime" "github.com/urfave/cli/v2" ) var ( _, b, _, _ = runtime.Caller(0) basepath = filepath.Dir(b) config utils.Config debug = false host string // app the base of skeleton app *bootstrap.App ) // EnvConfigPath environtment variable that set the config path const EnvConfigPath = "REBEL_CLI_CONFIG_PATH" // setup initialize the used variable and dependencies func setup() error { configFile := os.Getenv(EnvConfigPath) if configFile == "" { configFile = "./config.json" } log.Println(configFile) config = utils.NewViperConfig(basepath, configFile) host = config.GetString("app.host") debug = config.GetBool("app.debug") validator := bootstrap.SetupValidator(config) app = &bootstrap.App{ Debug: debug, Config: config, DB: mysql.Connect(config.GetString("db.mysql_dsn")), Validator: validator, } return nil } func main() { runtime.GOMAXPROCS(runtime.NumCPU()) setup() cmd := &cli.App{ Name: "chi-rest", Usage: "journeyplan, cli", Commands: []*cli.Command{ { Name: "journeyplan", Usage: "Run the http 1/1 for API", // Flags: journeyplan.Flags, Action: journeyplan.API{app}.Start, // After: ListenSignal, }, }, Action: func(cli *cli.Context) error { fmt.Printf("%s version:%s\n", cli.App.Name, "1.0") return nil }, } err := cmd.Run(os.Args) if err != nil { log.Fatal(err) } }
package controllers type NormalResp struct { Errno string `json:"errno"` Errmsg string `json:"errmsg"` Data interface{} `json:"data"` }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package vcenter import ( "github.com/pkg/errors" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/govmomi/extra" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/govmomi/template" ) const ( diskMoveType = string(types.VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate) ) // Clone kicks off a clone operation on vCenter to create a new virtual machine. func Clone(ctx *context.MachineContext, bootstrapData []byte) error { ctx = context.NewMachineLoggerContext(ctx, "vcenter") ctx.Logger.V(6).Info("starting clone process") var extraConfig extra.Config extraConfig.SetCloudInitUserData(bootstrapData) tpl, err := template.FindTemplate(ctx, ctx.VSphereMachine.Spec.Template) if err != nil { return err } folder, err := ctx.Session.Finder.FolderOrDefault(ctx, ctx.VSphereCluster.Spec.CloudProviderConfiguration.Workspace.Folder) if err != nil { return errors.Wrapf(err, "unable to get folder for %q", ctx) } datastore, err := ctx.Session.Finder.DatastoreOrDefault(ctx, ctx.VSphereCluster.Spec.CloudProviderConfiguration.Workspace.Datastore) if err != nil { return errors.Wrapf(err, "unable to get datastore for %q", ctx) } pool, err := ctx.Session.Finder.ResourcePoolOrDefault(ctx, ctx.VSphereCluster.Spec.CloudProviderConfiguration.Workspace.ResourcePool) if err != nil { return errors.Wrapf(err, "unable to get resource pool for %q", ctx) } devices, err := tpl.Device(ctx) if err != nil { return errors.Wrapf(err, "error getting devices for %q", ctx) } diskSpec, err := getDiskSpec(ctx, devices) if err != nil { return errors.Wrapf(err, "error getting disk spec for %q", ctx) } networkSpecs, err := getNetworkSpecs(ctx, devices) if err != nil { return errors.Wrapf(err, "error getting network specs for %q", ctx) } deviceSpecs := []types.BaseVirtualDeviceConfigSpec{diskSpec} deviceSpecs = append(deviceSpecs, networkSpecs...) numCPUs := ctx.VSphereMachine.Spec.NumCPUs if numCPUs < 2 { numCPUs = 2 } numCoresPerSocket := ctx.VSphereMachine.Spec.NumCoresPerSocket if numCoresPerSocket == 0 { numCoresPerSocket = numCPUs } memMiB := ctx.VSphereMachine.Spec.MemoryMiB if memMiB == 0 { memMiB = 2048 } spec := types.VirtualMachineCloneSpec{ Config: &types.VirtualMachineConfigSpec{ Annotation: ctx.String(), // Assign the clone's InstanceUUID the value of the Kubernetes Machine // object's UID. This allows lookup of the cloned VM prior to knowing // the VM's UUID. InstanceUuid: string(ctx.Machine.UID), Flags: newVMFlagInfo(), DeviceChange: deviceSpecs, ExtraConfig: extraConfig, NumCPUs: numCPUs, NumCoresPerSocket: numCoresPerSocket, MemoryMB: memMiB, }, Location: types.VirtualMachineRelocateSpec{ Datastore: types.NewReference(datastore.Reference()), DiskMoveType: diskMoveType, Folder: types.NewReference(folder.Reference()), Pool: types.NewReference(pool.Reference()), }, // This is implicit, but making it explicit as it is important to not // power the VM on before its virtual hardware is created and the MAC // address(es) used to build and inject the VM with cloud-init metadata // are generated. PowerOn: false, } ctx.Logger.V(6).Info("cloning machine", "clone-spec", spec) task, err := tpl.Clone(ctx, folder, ctx.Machine.Name, spec) if err != nil { return errors.Wrapf(err, "error trigging clone op for machine %q", ctx) } ctx.VSphereMachine.Status.TaskRef = task.Reference().Value return nil } func newVMFlagInfo() *types.VirtualMachineFlagInfo { diskUUIDEnabled := true return &types.VirtualMachineFlagInfo{ DiskUuidEnabled: &diskUUIDEnabled, } } func getDiskSpec( ctx *context.MachineContext, devices object.VirtualDeviceList) (types.BaseVirtualDeviceConfigSpec, error) { disks := devices.SelectByType((*types.VirtualDisk)(nil)) if len(disks) != 1 { return nil, errors.Errorf("invalid disk count: %d", len(disks)) } disk := disks[0].(*types.VirtualDisk) disk.CapacityInKB = int64(ctx.VSphereMachine.Spec.DiskGiB) * 1024 * 1024 return &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationEdit, Device: disk, }, nil } const ethCardType = "vmxnet3" func getNetworkSpecs( ctx *context.MachineContext, devices object.VirtualDeviceList) ([]types.BaseVirtualDeviceConfigSpec, error) { deviceSpecs := []types.BaseVirtualDeviceConfigSpec{} // Remove any existing NICs for _, dev := range devices.SelectByType((*types.VirtualEthernetCard)(nil)) { deviceSpecs = append(deviceSpecs, &types.VirtualDeviceConfigSpec{ Device: dev, Operation: types.VirtualDeviceConfigSpecOperationRemove, }) } // Add new NICs based on the machine config. key := int32(-100) for i := range ctx.VSphereMachine.Spec.Network.Devices { netSpec := &ctx.VSphereMachine.Spec.Network.Devices[i] ref, err := ctx.Session.Finder.Network(ctx, netSpec.NetworkName) if err != nil { return nil, errors.Wrapf(err, "unable to find network %q", netSpec.NetworkName) } backing, err := ref.EthernetCardBackingInfo(ctx) if err != nil { return nil, errors.Wrapf(err, "unable to create new ethernet card backing info for network %q on %q", netSpec.NetworkName, ctx) } dev, err := object.EthernetCardTypes().CreateEthernetCard(ethCardType, backing) if err != nil { return nil, errors.Wrapf(err, "unable to create new ethernet card %q for network %q on %q", ethCardType, netSpec.NetworkName, ctx) } // Get the actual NIC object. This is safe to assert without a check // because "object.EthernetCardTypes().CreateEthernetCard" returns a // "types.BaseVirtualEthernetCard" as a "types.BaseVirtualDevice". nic := dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() if netSpec.MACAddr != "" { nic.MacAddress = netSpec.MACAddr // Please see https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.device.VirtualEthernetCard.html#addressType // for the valid values for this field. nic.AddressType = string(types.VirtualEthernetCardMacTypeManual) ctx.Logger.V(6).Info("configured manual mac address", "mac-addr", nic.MacAddress) } // Assign a temporary device key to ensure that a unique one will be // generated when the device is created. nic.Key = key deviceSpecs = append(deviceSpecs, &types.VirtualDeviceConfigSpec{ Device: dev, Operation: types.VirtualDeviceConfigSpecOperationAdd, }) ctx.Logger.V(6).Info("created network device", "eth-card-type", ethCardType, "network-spec", netSpec) key-- } return deviceSpecs, nil }
// Goroutine memory usage example, page 43 package goroutine import ( "runtime" "sync" "time" ) var c <-chan interface{} // Spawns the leaking goroutine. func Noop(wg *sync.WaitGroup) { go func() { wg.Done() time.Sleep(1 * time.Second) // blocks here. <-c }() } // MemConsumed returns the current system memory consumed. func MemConsumed() uint64 { runtime.GC() var s runtime.MemStats runtime.ReadMemStats(&s) return s.Sys }
package goip import "testing" //you need to set the username and license to run the tests var userName = "101479" var license = "LLrtcBQcPkkT" func TestConnect(t *testing.T) { New(userName, license) } func TestCountryName(t *testing.T) { check := New(userName, license) country, err := check.CountryName("80.249.82.222") if err != nil { t.Error("Can't resolve IP to Country\n", err) } else if country != "Belarus" { t.Error("Incorrect IP to Country (expected Belarus)", country) } } func TestCountryNameFail(t *testing.T) { check := New(userName, license) _, err := check.CountryName("not-exists") if err == nil { t.Error("Error not produced for the invalid input") } } func TestContinentName(t *testing.T) { check := New(userName, license) continent, err := check.ContinentName("80.249.82.222") if err != nil { t.Error("Can't resolve IP to Continent\n", err) } else if continent != "Europe" { t.Error("Incorrect IP to Continent (expected Europe)", continent) } } func TestContinentNameFail(t *testing.T) { check := New(userName, license) _, err := check.ContinentName("not-exists") if err == nil { t.Error("Error not produced for the invalid input") } }
package http import ( "echo-crud/entity" "echo-crud/internal/service" "net/http" nethttp "net/http" "github.com/google/uuid" "github.com/labstack/echo/v4" ) // CreateSupplierBodyRequest defines all body attributes needed to add supplier. type CreateSupplierBodyRequest struct { NamaSupplier string `json:"nama_supplier" binding:"required"` Telepon string `json:"telepon" binding:"required"` Alamat string `json:"alamat" binding:"required"` } // SupplierRowResponse defines all attributes needed to fulfill for supplier row entity. type SupplierRowResponse struct { ID uuid.UUID `json:"id_supplier"` NamaSupplier string `json:"nama_supplier"` Telepon string `json:"telepon"` Alamat string `json:"alamat"` } // SupplierResponse defines all attributes needed to fulfill for pic supplier entity. type SupplierDetailResponse struct { ID uuid.UUID `json:"id_supplier"` NamaSupplier string `json:"nama_supplier"` Telepon string `json:"telepon"` Alamat string `json:"alamat"` } func buildSupplierRowResponse(supplier *entity.Supplier) SupplierRowResponse { form := SupplierRowResponse{ ID: supplier.ID, NamaSupplier: supplier.NamaSupplier, Alamat: supplier.Alamat, Telepon: supplier.Telepon, } return form } func buildSupplierDetailResponse(supplier *entity.Supplier) SupplierDetailResponse { form := SupplierDetailResponse{ ID: supplier.ID, NamaSupplier: supplier.NamaSupplier, Alamat: supplier.Alamat, Telepon: supplier.Telepon, } return form } // QueryParamsSupplier defines all attributes for input query params type QueryParamsSupplier struct { Limit string `form:"limit"` Offset string `form:"offset"` SortBy string `form:"sort_by"` Order string `form:"order"` Status string `form:"status"` } // MetaSupplier define attributes needed for Meta type MetaSupplier struct { Limit int `json:"limit"` Offset int `json:"offset"` Total int64 `json:"total"` } // NewMetaSupplier creates an instance of Meta response. func NewMetaSupplier(limit, offset int, total int64) *MetaSupplier { return &MetaSupplier{ Limit: limit, Offset: offset, Total: total, } } // SupplierHandler handles HTTP request related to user flow. type SupplierHandler struct { service service.SupplierUseCase } // NewSupplierHandler creates an instance of SupplierHandler. func NewSupplierHandler(service service.SupplierUseCase) *SupplierHandler { return &SupplierHandler{ service: service, } } // Create handles supplier creation. // It will reject the request if the request doesn't have required data, func (handler *SupplierHandler) CreateSupplier(echoCtx echo.Context) error { var form CreateSupplierBodyRequest if err := echoCtx.Bind(&form); err != nil { errorResponse := buildErrorResponse(err, entity.ErrInvalidInput) return echoCtx.JSON(nethttp.StatusBadRequest, errorResponse) } supplierEntity := entity.NewSupplier( uuid.Nil, form.NamaSupplier, form.Telepon, form.Alamat, ) if err := handler.service.Create(echoCtx.Request().Context(), supplierEntity); err != nil { errorResponse := buildErrorResponse(err, entity.ErrInternalServerError) return echoCtx.JSON(nethttp.StatusInternalServerError, errorResponse) } var res = entity.NewResponse(nethttp.StatusCreated, "Request processed successfully.", supplierEntity) return echoCtx.JSON(res.Status, res) } func (handler *SupplierHandler) GetListSupplier(echoCtx echo.Context) error { var form QueryParamsSupplier if err := echoCtx.Bind(&form); err != nil { errorResponse := buildErrorResponse(err, entity.ErrInvalidInput) return echoCtx.JSON(nethttp.StatusBadRequest, errorResponse) } supplier, err := handler.service.GetListSupplier(echoCtx.Request().Context(), form.Limit, form.Offset) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInternalServerError) return echoCtx.JSON(nethttp.StatusInternalServerError, errorResponse) } var res = entity.NewResponse(nethttp.StatusOK, "Request processed successfully.", supplier) return echoCtx.JSON(res.Status, res) } func (handler *SupplierHandler) GetDetailSupplier(echoCtx echo.Context) error { idParam := echoCtx.Param("id") if len(idParam) == 0 { errorResponse := buildErrorResponse(nil, entity.ErrInvalidInput) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } id, err := uuid.Parse(idParam) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInvalidInput) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } supplier, err := handler.service.GetDetailSupplier(echoCtx.Request().Context(), id) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInternalServerError) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } var res = entity.NewResponse(nethttp.StatusOK, "Request processed successfully.", supplier) return echoCtx.JSON(res.Status, res) } func (handler *SupplierHandler) UpdateSupplier(echoCtx echo.Context) error { var form CreateSupplierBodyRequest if err := echoCtx.Bind(&form); err != nil { errorResponse := buildErrorResponse(err, entity.ErrInvalidInput) return echoCtx.JSON(nethttp.StatusBadRequest, errorResponse) } idParam := echoCtx.Param("id") if len(idParam) == 0 { errorResponse := buildErrorResponse(nil, entity.ErrInvalidInput) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } id, err := uuid.Parse(idParam) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInvalidInput) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } _, err = handler.service.GetDetailSupplier(echoCtx.Request().Context(), id) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInternalServerError) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } supplierEntity := &entity.Supplier{ id, form.NamaSupplier, form.Telepon, form.Alamat, } if err := handler.service.UpdateSupplier(echoCtx.Request().Context(), supplierEntity); err != nil { errorResponse := buildErrorResponse(err, entity.ErrInternalServerError) return echoCtx.JSON(nethttp.StatusInternalServerError, errorResponse) } var res = entity.NewResponse(nethttp.StatusOK, "Request processed successfully.", nil) return echoCtx.JSON(res.Status, res) } func (handler *SupplierHandler) DeleteSupplier(echoCtx echo.Context) error { idParam := echoCtx.Param("id") if len(idParam) == 0 { errorResponse := buildErrorResponse(nil, entity.ErrInvalidInput) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } id, err := uuid.Parse(idParam) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInvalidInput) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } err = handler.service.DeleteSupplier(echoCtx.Request().Context(), id) if err != nil { errorResponse := buildErrorResponse(err, entity.ErrInternalServerError) return echoCtx.JSON(http.StatusBadRequest, errorResponse) } var res = entity.NewResponse(nethttp.StatusOK, "Request processed successfully.", nil) return echoCtx.JSON(res.Status, res) }
/* * OFAC API * * OFAC (Office of Foreign Assets Control) API is designed to facilitate the enforcement of US government economic sanctions programs required by federal law. This project implements a modern REST HTTP API for companies and organizations to obey federal law and use OFAC data in their applications. * * API version: v1 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package openapi // Entity List (EL) - Bureau of Industry and Security type El struct { // The name of the entity Name string `json:"name,omitempty"` // Addresses associated with the entity Addresses []string `json:"addresses,omitempty"` // Known aliases associated with the entity AlternateNames []string `json:"alternateNames,omitempty"` // Date when the restriction came into effect StartDate string `json:"startDate,omitempty"` // Specifies the license requirement imposed on the named entity LicenseRequirement string `json:"licenseRequirement,omitempty"` // Identifies the policy BIS uses to review the licenseRequirements LicensePolicy string `json:"licensePolicy,omitempty"` // Identifies the corresponding Notice in the Federal Register FrNotice string `json:"frNotice,omitempty"` // The link to the official SSI list SourceListURL string `json:"sourceListURL,omitempty"` // The link for information regarding the source SourceInfoURL string `json:"sourceInfoURL,omitempty"` }
package generic import ( "strconv" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/objectstorage" "github.com/iotaledger/hive.go/stringify" ) // region CachedObject ////////////////////////////////////////////////////////////////////////////////////////// // CachedObject is a wrapper around a value that is stored in the object storage. // It provides necessary function that object storage needs to correctly handle the object. type CachedObject[T StorableObject] struct { cachedObject objectstorage.CachedObject } func newCachedObject[T StorableObject](cachedObject objectstorage.CachedObject) *CachedObject[T] { return &CachedObject[T]{ cachedObject: cachedObject, } } // NewEmptyCachedObject creates an "empty" CachedObject, that is not part of any ObjectStorage. // // Sometimes, we want to be able to offer a "filtered view" on the ObjectStorage and therefore be able to return an // "empty" value on load operations even if the underlying object exists (i.e. the value tangle on top of the normal // tangle only returns value transactions in its load operations). func NewEmptyCachedObject[T StorableObject](key []byte) (result *CachedObject[T]) { return &CachedObject[T]{ cachedObject: objectstorage.NewEmptyCachedObject(key), } } // Key returns the object storage key that is used to address the object. func (c *CachedObject[T]) Key() []byte { return c.cachedObject.Key() } // Exists returns true if the StorableObject in this container does exist (could be found in the database and was not // marked as deleted). func (c *CachedObject[T]) Exists() bool { return c.cachedObject.Exists() } // Get retrieves the StorableObject, that is cached in this container. func (c *CachedObject[T]) Get() (result T) { return c.cachedObject.Get().(T) } // Unwrap returns the underlying object with correct type. func (c *CachedObject[T]) Unwrap() (result T, exists bool) { if !c.Exists() { return } r := c.Get() result = r exists = true return } // Consume directly consumes the StorableObject. This method automatically Release()s the object when the callback is done. // Returns true if the callback was called. func (c *CachedObject[T]) Consume(consumer func(T), forceRelease ...bool) bool { return c.cachedObject.Consume(func(object objectstorage.StorableObject) { consumer(object.(T)) }) } // Retain registers a new consumer for this cached object. func (c *CachedObject[T]) Retain() *CachedObject[T] { return &CachedObject[T]{ cachedObject: c.cachedObject.Retain(), } } // Release the object, to be picked up by the persistence layer (as soon as all consumers are done). func (c *CachedObject[T]) Release(force ...bool) { c.cachedObject.Release(force...) } // Transaction is a synchronization primitive that executes the callback atomically which means that if multiple // Transactions are being started from different goroutines, then only one of them can run at the same time. // // The identifiers allow to define the scope of the Transaction. Transactions with different scopes can run at the same // time and act as if they are secured by different mutexes. // // It is also possible to provide multiple identifiers and the callback waits until all of them can be acquired at the // same time. In contrast to normal mutexes where acquiring multiple locks can lead to deadlocks, this method is // deadlock safe. // // Note: It is the equivalent of a mutex.Lock/Unlock. func (c *CachedObject[T]) Transaction(callback func(object T), identifiers ...interface{}) *CachedObject[T] { return &CachedObject[T]{ cachedObject: c.cachedObject.Transaction(func(object objectstorage.StorableObject) { callback(object.(T)) }), } } // RTransaction is a synchronization primitive that executes the callback together with other RTransactions but never // together with a normal Transaction. // // The identifiers allow to define the scope of the RTransaction. RTransactions with different scopes can run at the // same time independently of other RTransactions and act as if they are secured by different mutexes. // // It is also possible to provide multiple identifiers and the callback waits until all of them can be acquired at the // same time. In contrast to normal mutexes where acquiring multiple locks can lead to deadlocks, this method is // deadlock safe. // // Note: It is the equivalent of a mutex.RLock/RUnlock. func (c *CachedObject[T]) RTransaction(callback func(object T), identifiers ...interface{}) *CachedObject[T] { return &CachedObject[T]{ cachedObject: c.cachedObject.RTransaction(func(object objectstorage.StorableObject) { callback(object.(T)) }), } } // BatchWrite checks if the cachedObject should be persisted. // If all checks pass, the cachedObject is marshaled and added to the BatchedMutations. // Do not call this method for objects that should not be persisted. func (c *CachedObject[T]) BatchWrite(batchedMuts kvstore.BatchedMutations) { c.cachedObject.BatchWrite(batchedMuts) } // BatchWriteDone is called after the cachedObject was persisted. // It releases the cachedObject from the cache if no consumers are left and it was not modified in the meantime. func (c *CachedObject[T]) BatchWriteDone() { c.cachedObject.BatchWriteDone() } // BatchWriteScheduled returns true if the cachedObject is already scheduled for a BatchWrite operation. func (c *CachedObject[T]) BatchWriteScheduled() bool { return c.cachedObject.BatchWriteScheduled() } // ResetBatchWriteScheduled resets the flag that the cachedObject is scheduled for a BatchWrite operation. func (c *CachedObject[T]) ResetBatchWriteScheduled() { c.cachedObject.ResetBatchWriteScheduled() } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// // region CachedObjects ////////////////////////////////////////////////////////////////////////////////////////// // CachedObjects represents a collection of CachedObject objects. type CachedObjects[T StorableObject] []*CachedObject[T] // Unwrap is the type-casted equivalent of Get. It returns a slice of unwrapped objects and optionally skips any objects // that do not exist or are deleted, sets default type value for missing elements. func (c CachedObjects[T]) Unwrap(skip ...bool) (unwrappedChildBranches []T) { skipMissing := false if len(skip) > 0 && skip[0] { skipMissing = true } unwrappedChildBranches = make([]T, 0, len(c)) for _, cachedChildBranch := range c { val, exists := cachedChildBranch.Unwrap() if exists || !skipMissing { unwrappedChildBranches = append(unwrappedChildBranches, val) } } return } // Exists returns a slice of boolean values to indicate whether element at a given index exists. func (c CachedObjects[T]) Exists() (exists []bool) { exists = make([]bool, len(c)) for i, cachedChildBranch := range c { exists[i] = cachedChildBranch.Exists() } return } // Consume iterates over the CachedObjects, unwraps them and passes a type-casted version to the consumer (if the object // is not empty - it exists). It automatically releases the object when the consumer finishes. It returns true, if at // least one object was consumed. func (c CachedObjects[T]) Consume(consumer func(T), forceRelease ...bool) (consumed bool) { for _, cachedObject := range c { consumed = cachedObject.Consume(consumer, forceRelease...) || consumed } return } // Release is a utility function that allows us to release all CachedObjects in the collection. func (c CachedObjects[T]) Release(force ...bool) { for _, cachedObject := range c { cachedObject.Release(force...) } } // String returns a human-readable version of the CachedObjects. func (c CachedObjects[T]) String() string { structBuilder := stringify.NewStructBuilder("CachedObjects") for i, cachedObject := range c { structBuilder.AddField(stringify.NewStructField(strconv.Itoa(i), cachedObject)) } return structBuilder.String() } // endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
package app import ( "context" "crypto/tls" "fmt" "knife-panel/internal/app/routers/api/ctl" "knife-panel/internal/app/ws" "net/http" "time" "github.com/gin-gonic/gin" "go.uber.org/dig" "knife-panel/internal/app/config" "knife-panel/internal/app/middleware" "knife-panel/internal/app/routers/api" "knife-panel/pkg/logger" ) // InitWeb 初始化web引擎 func InitWeb(container *dig.Container) *gin.Engine { cfg := config.Global() gin.SetMode(cfg.RunMode) app := gin.New() app.NoMethod(middleware.NoMethodHandler()) app.NoRoute(middleware.NoRouteHandler()) apiPrefixes := []string{"/api/"} // 跟踪ID app.Use(middleware.TraceMiddleware(middleware.AllowPathPrefixNoSkipper(apiPrefixes...))) // 访问日志 app.Use(middleware.LoggerMiddleware(middleware.AllowPathPrefixNoSkipper(apiPrefixes...))) // 崩溃恢复 app.Use(middleware.RecoveryMiddleware()) // 跨域请求 if cfg.CORS.Enable { app.Use(middleware.CORSMiddleware()) } err := ctl.Inject(container) handleError(err) // 注册/api路由 err = api.RegisterRouter(app, container) //注册/ws路由 err = ws.RegisterRouter(app, container) handleError(err) // swagger文档 if dir := cfg.Swagger; dir != "" { app.Static("/swagger", dir) } // 静态站点 if dir := cfg.WWW; dir != "" { app.Use(middleware.WWWMiddleware(dir)) } return app } // InitHTTPServer 初始化http服务 func InitHTTPServer(ctx context.Context, container *dig.Container) func() { cfg := config.Global().HTTP addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port) srv := &http.Server{ Addr: addr, Handler: InitWeb(container), ReadTimeout: 5 * time.Second, WriteTimeout: 10 * time.Second, IdleTimeout: 15 * time.Second, } go func() { logger.Printf(ctx, "HTTP服务开始启动,地址监听在:[%s]", addr) var err error if cfg.CertFile != "" && cfg.KeyFile != "" { srv.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} err = srv.ListenAndServeTLS(cfg.CertFile, cfg.KeyFile) } else { err = srv.ListenAndServe() } if err != nil && err != http.ErrServerClosed { logger.Errorf(ctx, err.Error()) } }() return func() { ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(cfg.ShutdownTimeout)) defer cancel() srv.SetKeepAlivesEnabled(false) if err := srv.Shutdown(ctx); err != nil { logger.Errorf(ctx, err.Error()) } } }
package models import ( "encoding/json" "errors" "fmt" "log" "github.com/astaxie/beego" "github.com/gomodule/redigo/redis" ) // EventList. var ( EventList map[string]*Event ) const ( // DYMFEVENTSREPORT represents fatal errors DYMFEVENTSREPORT = "dymf_events_report" ) // addEventToRedis func. func addEventToRedis(e *Event) bool { client, err := redis.DialURL("redis://" + beego.AppConfig.String("redishost") + ":" + beego.AppConfig.String("redisport")) if err != nil { log.Fatalln(err) } defer client.Close() rel, _ := json.Marshal(&e) fmt.Println(string(rel)) v, err := client.Do("lpush", DYMFEVENTSREPORT, string(rel)) if err != nil { fmt.Println(err) } fmt.Println(v) return true } // getEventFromRedis func. func getEventFromRedis(id string) (e *Event, err error) { client, err := redis.DialURL("redis://" + beego.AppConfig.String("redishost") + ":" + beego.AppConfig.String("redisport")) if err != nil { log.Fatalln(err) } defer client.Close() values, err := redis.Values(client.Do("lrange", DYMFEVENTSREPORT, "0", "1000")) if err != nil { return e, err } var str string for _, v := range values { str = string(v.([]byte)) if id == str { json.Unmarshal(v.([]byte), &e) break } fmt.Println(string(v.([]byte))) } return e, err } // GetAllEventsFromRedis func. func GetAllEventsFromRedis() map[string]*Event { client, err := redis.DialURL("redis://" + beego.AppConfig.String("redishost") + ":" + beego.AppConfig.String("redisport")) if err != nil { log.Fatalln(err) } defer client.Close() values, err := redis.Values(client.Do("lrange", DYMFEVENTSREPORT, "0", "1000")) if err != nil { return EventList } var e Event for _, v := range values { fmt.Println(string(v.([]byte))) err = json.Unmarshal(v.([]byte), &e) if err != nil { fmt.Println("json Unmarshal error:", err) } EventList[e.PlatformID] = &e } return EventList } func init() { EventList = make(map[string]*Event) // event := Event{"dymf", "iostest", 1587109493067, "1.2.23", "768", "dbc218edc", "data list"} // EventList["dbc218edc"] = &event // addEventToRedis(&event) } // Event struct. type Event struct { Game string Platform string Time int64 Version string UID string PlatformID string Data string } // AddEvent func. func AddEvent(e Event, id string) string { e.PlatformID = id EventList[id] = &e addEventToRedis(&e) return id } // GetEvent func. func GetEvent(id string) (e *Event, err error) { if e, ok := EventList[id]; ok { return e, nil } e, err = getEventFromRedis(id) if err != nil { return e, nil } return nil, errors.New("Event not exists") } // GetAllEvents func. func GetAllEvents() map[string]*Event { if len(EventList) > 0 { fmt.Println("EventList data from memory") return EventList } return GetAllEventsFromRedis() } // UpdateEvent func. func UpdateEvent(id string, ee *Event) (e *Event, err error) { if e, ok := EventList[id]; ok { if ee.Game != "" { e.Game = ee.Game } if ee.Platform != "" { e.Platform = ee.Platform } if ee.Time != 0 { e.Time = ee.Time } if ee.Version != "" { e.Version = ee.Version } if ee.UID != "" { e.UID = ee.UID } if ee.PlatformID != "" { e.PlatformID = ee.PlatformID } if ee.Data != "" { e.Data = ee.Data } return e, nil } return nil, errors.New("Event Not Exist") } // DeleteEvent func. func DeleteEvent(id string) { delete(EventList, id) }
package client import ( "fmt" ) // BaseError is an error type that all other error types embed. type BaseError struct { DefaultErrString string Info string } func (e BaseError) Error() string { e.DefaultErrString = "An error occurred while executing a Gophercloud request." return e.choseErrString() } func (e BaseError) choseErrString() string { if e.Info != "" { return e.Info } return e.DefaultErrString } // ErrMissingInput is the error when input is required in a particular // situation but not provided by the user type ErrMissingInput struct { BaseError Argument string } func (e ErrMissingInput) Error() string { e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) return e.choseErrString() } // ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. type ErrInvalidInput struct { ErrMissingInput Value interface{} } func (e ErrInvalidInput) Error() string { e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) return e.choseErrString() } // ErrUnexpectedResponseCode is returned by the Request method when a response code other than // those listed in OkCodes is encountered. type ErrUnexpectedResponseCode struct { BaseError URL string Method string Expected []int Actual int Body []byte } func (e ErrUnexpectedResponseCode) Error() string { e.DefaultErrString = fmt.Sprintf( "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", e.Expected, e.Method, e.URL, e.Actual, e.Body, ) return e.choseErrString() } // ErrDefault400 is the default error type returned on a 400 HTTP response code. type ErrDefault400 struct { ErrUnexpectedResponseCode } // ErrDefault401 is the default error type returned on a 401 HTTP response code. type ErrDefault401 struct { ErrUnexpectedResponseCode } // ErrDefault403 is the default error type returned on a 403 HTTP response code. type ErrDefault403 struct { ErrUnexpectedResponseCode } // ErrDefault404 is the default error type returned on a 404 HTTP response code. type ErrDefault404 struct { ErrUnexpectedResponseCode } // ErrDefault405 is the default error type returned on a 405 HTTP response code. type ErrDefault405 struct { ErrUnexpectedResponseCode } // ErrDefault408 is the default error type returned on a 408 HTTP response code. type ErrDefault408 struct { ErrUnexpectedResponseCode } // ErrDefault429 is the default error type returned on a 429 HTTP response code. type ErrDefault429 struct { ErrUnexpectedResponseCode } // ErrDefault500 is the default error type returned on a 500 HTTP response code. type ErrDefault500 struct { ErrUnexpectedResponseCode } // ErrDefault503 is the default error type returned on a 503 HTTP response code. type ErrDefault503 struct { ErrUnexpectedResponseCode } func (e ErrDefault400) Error() string { return "Invalid request due to incorrect syntax or missing required parameters." } func (e ErrDefault401) Error() string { return "Authentication failed" } func (e ErrDefault403) Error() string { e.DefaultErrString = fmt.Sprintf( "Request forbidden: [%s %s], error message: %s", e.Method, e.URL, e.Body, ) return e.choseErrString() } func (e ErrDefault404) Error() string { return "Resource not found" } func (e ErrDefault405) Error() string { return "Method not allowed" } func (e ErrDefault408) Error() string { return "The server timed out waiting for the request" } func (e ErrDefault429) Error() string { return "Too many requests have been sent in a given amount of time. Pause" + " requests, wait up to one minute, and try again." } func (e ErrDefault500) Error() string { return "Internal Server Error" } func (e ErrDefault503) Error() string { return "The service is currently unable to handle the request due to a temporary" + " overloading or maintenance. This is a temporary condition. Try again later." } // Err400er is the interface resource error types implement to override the error message // from a 400 error. type Err400er interface { Error400(ErrUnexpectedResponseCode) error } // Err401er is the interface resource error types implement to override the error message // from a 401 error. type Err401er interface { Error401(ErrUnexpectedResponseCode) error } // Err403er is the interface resource error types implement to override the error message // from a 403 error. type Err403er interface { Error403(ErrUnexpectedResponseCode) error } // Err404er is the interface resource error types implement to override the error message // from a 404 error. type Err404er interface { Error404(ErrUnexpectedResponseCode) error } // Err405er is the interface resource error types implement to override the error message // from a 405 error. type Err405er interface { Error405(ErrUnexpectedResponseCode) error } // Err408er is the interface resource error types implement to override the error message // from a 408 error. type Err408er interface { Error408(ErrUnexpectedResponseCode) error } // Err429er is the interface resource error types implement to override the error message // from a 429 error. type Err429er interface { Error429(ErrUnexpectedResponseCode) error } // Err500er is the interface resource error types implement to override the error message // from a 500 error. type Err500er interface { Error500(ErrUnexpectedResponseCode) error } // Err503er is the interface resource error types implement to override the error message // from a 503 error. type Err503er interface { Error503(ErrUnexpectedResponseCode) error }
package main import "os" import "fmt" import "math" import "bufio" import "strconv" import "strings" func timeTaken(word_dict map[int][2]int, input string) int { var timeTakenToType float64 var current_row, current_col float64 var first rune for _, c := range input { first = c break } if rowcol, ok := word_dict[int(first)]; ok { current_row, current_col = float64(rowcol[0]), float64(rowcol[1]) } else { return -1 } for _, char := range input[1:] { if rowcol, ok := word_dict[int(char)]; ok { row, col := float64(rowcol[0]), float64(rowcol[1]) timeTakenToType += math.Abs(current_row - row) + math.Abs(current_col - col) } else {return -1} } return int(timeTakenToType) } func main() { word_dict := make(map[int][2]int) scanner := bufio.NewScanner(os.Stdin) scanner.Scan() row_col := scanner.Text() var n int for index, value := range strings.Split(row_col, " ") { int_value, _ := strconv.Atoi(value) if index == 0 { n = int_value } } for i := 1; i <= n; i++ { scanner.Scan() for pos, char := range scanner.Text() { rowcol := [2]int{i, pos} word_dict[int(char)] = rowcol } } scanner.Scan() fmt.Printf("%v\n", timeTaken(word_dict, scanner.Text())) }
package mgdb import ( "bytes" "context" "editorApi/config" "editorApi/init/qmlog" "log" "time" "github.com/mongodb/mongo-go-driver/mongo" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" ) var MongoClient *mongo.Client var EditorClient *mongo.Client var OnlineClient *mongo.Client var TestClient *mongo.Client type EnvConfig string var ( EnvTest EnvConfig = "test" EnvOnline EnvConfig = "online" EnvEditor EnvConfig = "editor" DbEditor string = "editor" DbDict string = "dict" DbKuyu string = "kuyu" DbContent string = "courseContent" ) func init() { MongoClient = GetClient("") EditorClient = GetClient("") OnlineClient = GetClient("to") TestClient = GetClient("test") } func getContext() (ctx context.Context) { ctx, _ = context.WithTimeout(context.Background(), 10*time.Second) return } //通过连接字符串,连接到MongoDB func GetClient(dbConfig string) *mongo.Client { connString := bytes.NewBufferString("mongodb://") if dbConfig == "to" { mgdb := config.GinVueAdminconfig.Tomongodb if mgdb.User != "" { connString.WriteString(mgdb.User + ":" + mgdb.Passwd + "@") } connString.WriteString(mgdb.Hosts) if mgdb.ReplicaSet != "" { connString.WriteString("?replicaSet=" + mgdb.ReplicaSet) } } else if dbConfig == "test" { mgdb := config.GinVueAdminconfig.Testmongodb if mgdb.User != "" { connString.WriteString(mgdb.User + ":" + mgdb.Passwd + "@") } connString.WriteString(mgdb.Hosts) if mgdb.ReplicaSet != "" { connString.WriteString("?replicaSet=" + mgdb.ReplicaSet) } } else { mgdb := config.GinVueAdminconfig.Mongodb if mgdb.User != "" { connString.WriteString(mgdb.User + ":" + mgdb.Passwd + "@") } connString.WriteString(mgdb.Hosts) if mgdb.ReplicaSet != "" { connString.WriteString("?replicaSet=" + mgdb.ReplicaSet) } } readconcern.Majority() opt := options.Client().ApplyURI(connString.String()) opt.SetLocalThreshold(3 * time.Second) //只使用与mongo操作耗时小于3秒的 opt.SetMaxConnIdleTime(5 * time.Second) //指定连接可以保持空闲的最大毫秒数 opt.SetMaxPoolSize(500) //使用最大的连接数 client, err := mongo.Connect(getContext(), opt) if err != nil { qmlog.QMLog.Error("Mongodb链接不可用12", connString.String(), err) return nil } if err := client.Ping(getContext(), readpref.Primary()); err != nil { qmlog.QMLog.Error("Mongodb链接不可用22", connString.String(), err) return nil } qmlog.QMLog.Info("链接到MongoDB数据库:" + connString.String()) return client } func Find( env EnvConfig, dbName, colName string, filter bson.M, sort interface{}, project interface{}, skip, limit int64, rst interface{}, ) { client := EditorClient if env == "online" { client = OnlineClient } ctx := getContext() opts := options.Find() if sort != nil { opts = opts.SetSort(sort) } if project != nil { opts = opts.SetProjection(project) } opts = opts.SetSkip(skip) if limit == 0 { limit = 10000 } opts = opts.SetLimit(limit) cusor, err := client.Database(dbName).Collection(colName).Find( ctx, filter, opts, ) defer cusor.Close(ctx) if err != nil { log.Println(err) return } cusor.All(ctx, rst) } func FindOne( env EnvConfig, dbName, colName string, filter interface{}, project interface{}, rst interface{}, ) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.FindOne() if project != nil { opts = opts.SetProjection(project) } single := client.Database(dbName).Collection(colName).FindOne(ctx, filter, opts) single.Decode(rst) } func FindOneAndUpdate( env EnvConfig, dbName, colName string, filter interface{}, sets interface{}, project interface{}, rst interface{}, ) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.FindOneAndUpdate() if project != nil { opts = opts.SetProjection(project) } single := client.Database(dbName).Collection(colName).FindOneAndUpdate(ctx, filter, sets, opts) single.Decode(rst) } func UpdateOne( env EnvConfig, dbName, colName string, filter interface{}, sets interface{}, upsert bool, ) (*mongo.UpdateResult, error) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.Update() if upsert { opts = opts.SetUpsert(true) } return client.Database(dbName).Collection(colName).UpdateOne( ctx, filter, sets, opts, ) } func UpdateMany( env EnvConfig, dbName, colName string, filter interface{}, sets interface{}, upsert bool, ) (*mongo.UpdateResult, error) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.Update() if upsert { opts = opts.SetUpsert(upsert) } return client.Database(dbName).Collection(colName).UpdateMany( ctx, filter, sets, opts, ) } func DeleteOne(env EnvConfig, dbName, colName string, filter interface{}, ) (*mongo.DeleteResult, error) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.Delete() return client.Database(dbName).Collection(colName).DeleteOne( ctx, filter, opts, ) } func DeleteMany( env EnvConfig, dbName, colName string, filter interface{}, ) (*mongo.DeleteResult, error) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.Delete() return client.Database(dbName).Collection(colName).DeleteMany( ctx, filter, opts, ) } func Count( env EnvConfig, dbName, colName string, filter interface{}, ) (int64, error) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() opts := options.Count() return client.Database(dbName).Collection(colName).CountDocuments( ctx, filter, opts, ) } func Aggregate( env EnvConfig, dbName, colName string, pipeline interface{}, ) (cursor *mongo.Cursor, err error) { client := EditorClient if env == "online" { client = OnlineClient } else if env == "test" { client = TestClient } ctx := getContext() collection := client.Database(dbName).Collection(colName) cursor, err = collection.Aggregate(ctx, pipeline) return }
package main import ( "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" ) type PresignedURL struct { URL string `json:"url"` Timeout int `json:"timeout"` } func GetS3Presigned(bucket, key string, timeout int) PresignedURL { svc := s3.New(nil) req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), }) url, err := req.Presign(time.Duration(timeout) * time.Second) if err != nil { panic(err) } p := PresignedURL{URL: url, Timeout: timeout} return p }
package netctl import ( "encoding/json" "fmt" "io/ioutil" "net/http" "os" "github.com/codegangsta/cli" ) var client = &http.Client{} func handleBasicError(ctx *cli.Context, err error) { if err != nil { errExit(ctx, exitRequest, err.Error(), false) } } func baseURL(ctx *cli.Context) string { return ctx.GlobalString("netmaster") } func versionURL(ctx *cli.Context) string { return fmt.Sprintf("%s/version", baseURL(ctx)) } func writeBody(resp *http.Response, ctx *cli.Context) { content, err := ioutil.ReadAll(resp.Body) if err != nil { errExit(ctx, exitIO, err.Error(), false) } os.Stderr.Write(content) } func getObject(ctx *cli.Context, url string, jdata interface{}) error { resp, err := client.Get(url) handleBasicError(ctx, err) respCheck(resp, ctx) content, err := ioutil.ReadAll(resp.Body) handleBasicError(ctx, err) handleBasicError(ctx, json.Unmarshal(content, jdata)) return nil }
package main func scan_rawstring() { var r string r = `` r = `compilers!` r = `\a \b \f \n \r \t \v \\ \"` }
package main import ( "flag" "fmt" "os" "strings" "Gaia/plugin" _ "Gaia/plugin/ftp" _ "Gaia/plugin/smb" _ "Gaia/plugin/ssh" "Gaia/util" ) var version = "0.1" var banner = ` _________ _____ __ ____/_____ ___(_)_____ _ _ / __ _ __ ` + "`" + `/_ /_ __ ` + "`" + `/ / /_/ / / /_/ /_ / / /_/ / \____/ \__,_/ /_/ \__,_/ ` var ( ips string userListStr string passListStr string onOptions string threadNum int ) func init() { flag.StringVar(&ips, "h", "", "set `host` to blast") flag.StringVar(&userListStr, "u", "", "set `username` list, such as: -u admin,root OR -u [file]:username.txt") flag.StringVar(&passListStr, "p", "", "set `password` list, such as: -p admin,toor OR -u [file]:password.txt") flag.StringVar(&onOptions, "s", "ftp", "select the `service` to blast, options: ftp,smb,ssh") flag.IntVar(&threadNum, "t", 10, "set `thread` num to blast") flag.Usage = usage } func main() { flag.Parse() if len(os.Args) == 1 { flag.Usage() return } if len(ips) == 0 || len(onOptions) == 0 { flag.Usage() return } pluginConfig := initConfig() pluginConfig.Start() } func initConfig() (pluginConfig plugin.Config) { var onOptionList []string var userList []string var passList []string for _, v := range strings.Split(onOptions, ",") { onOptionList = append(onOptionList, strings.TrimSpace(v)) } if strings.HasPrefix(userListStr, "[file]:") { userList, _ = util.GetConfigStrList(userListStr) } else { for _, v := range strings.Split(userListStr, ",") { userList = append(userList, strings.TrimSpace(v)) } } if strings.HasPrefix(passListStr, "[file]:") { passList, _ = util.GetConfigStrList(passListStr) } else { for _, v := range strings.Split(passListStr, ",") { passList = append(passList, strings.TrimSpace(v)) } } pluginConfig = plugin.Config{ IPStr: ips, Options: onOptionList, ThreadNum: threadNum, UserList: userList, PassList: passList, } return } func usage() { fmt.Fprintf(os.Stderr, `Gaia version: Gaia/%s %s Options: `, version, banner) flag.PrintDefaults() }
package database import "time" func Migrate() { db := ConnectToDatabase() db.AutoMigrate(&Product{}, &Option{}, &Image{}, &Description{}, &Variant{}, &User{}, &Order{}, &OrderDetail{}, &Bill{}) } type Description struct { ID int `json:"id"` IDProduct int `json:"id_product" gorm:"default:null"` Content string `json:"content" gorm:"type:mediumtext"` } type Image struct { ID int `json:"id"` IDProduct int `json:"id_product" gorm:"default:null"` Link string `json:"link" gorm:"type:mediumtext"` } type Option struct { ID int `json:"id"` IDProduct int `json:"id_product" gorm:"default:null"` Size string `json:"size" gorm:"type:tinytext"` Price int `json:"price" gorm:"type:int"` SalePrice int `json:"sale_price" gorm:"type:int"` Quantity int `json:"quantity" gorm:"type:mediumint"` Variant Variant `gorm:"foreignKey:IDOption;ASSOCIATION_FOREIGNKEY:ID"` } type Product struct { ID int `json:"id"` Name string `json:"name" gorm:"type:tinytext"` LinkDetail string `json:"link_detail" gorm:"type:mediumtext"` Technology string `json:"technology" gorm:"type:tinytext"` Resolution string `json:"resolution" gorm:"type:tinytext"` Type string `json:"type" gorm:"type:tinytext"` ES bool `json:"es"` ListDescriptions []Description `gorm:"foreignKey:IDProduct;ASSOCIATION_FOREIGNKEY:ID"` ListImages []Image `gorm:"foreignKey:IDProduct;ASSOCIATION_FOREIGNKEY:ID"` ListOptions []Option `gorm:"foreignKey:IDProduct;ASSOCIATION_FOREIGNKEY:ID"` } type Variant struct { ID int `json:"id"` IDProduct int `json:"id_product" gorm:"default:null"` IDOption int `json:"id_option" gorm:"default:null"` ProductName string `json:"product_name" gorm:"type:tinytext"` Size string `json:"size" gorm:"type:tinytext"` Price int `json:"price" gorm:"type:int"` SalePrice int `json:"sale_price" gorm:"type:int"` ListOrderDetails []OrderDetail `gorm:"foreignKey:IDVariant;ASSOCIATION_FOREIGNKEY:ID"` } type OrderDetail struct { ID int `json:"id"` IDVariant int `json:"id_variant" gorm:"default:null"` IDOrder int `json:"id_order"` Quantity int `json:"quantity" gorm:"type:mediumint"` ProductName string `json:"product_name" gorm:"type:tinytext"` TotalPrice int `json:"total_price" gorm:"type:int"` } type Order struct { ID int `json:"id"` IDUser int `json:"id_user" gorm:"default:null"` TotalPrice int `json:"total_price" gorm:"type:int"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` DeleteAt time.Time `json:"delete_at"` Username string `json:"username" gorm:"type:tinytext"` PhoneUser string `json:"phone_user" gorm:"type:tinytext"` AddressUser string `json:"address_user" gorm:"type:tinytext"` State string `json:"state" gorm:"type:tinytext"` Bill Bill `gorm:" foreignkey: IDOrder "` ListOrderDetails []OrderDetail `gorm:"foreignKey:IDOrder;ASSOCIATION_FOREIGNKEY:ID"` } type User struct { ID int `json:"id"` Name string `json:"name" gorm:"type:tinytext"` Username string `json:"username" gorm:"type:tinytext"` Password string `json:"password" gorm:"type:tinytext"` Email string `json:"email" gorm:"type:tinytext"` Address string `json:"address" gorm:"type:mediumtext"` Phone string `json:"phone" gorm:"type:tinytext"` IsAdmin bool `json:"isadmin"` ListOrders []Order `gorm:"foreignKey:IDUser;ASSOCIATION_FOREIGNKEY:ID"` ListBills []Bill `gorm:"foreignKey:IDUser;ASSOCIATION_FOREIGNKEY:ID"` } type Bill struct { ID int `json:"id"` IDOrder int `json:"id_order"` IDUser int `json:"id_user"` IdAdmin int `json:"id_admin"` Username string `json:"username" gorm:"type:tinytext"` AdminName string `json:"admin_name" gorm:"type:tinytext"` PhoneUser string `json:"phone_user" gorm:"type:tinytext"` TotalPrice int `json:"total_price" gorm:"type:int"` AddressUser string `json:"address_user" gorm:"type:tinytext"` State string `json:"state" gorm:"type:tinytext"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` }
package requests import ( "encoding/json" "fmt" "io/ioutil" "net/url" "strings" "github.com/atomicjolt/canvasapi" "github.com/atomicjolt/canvasapi/models" ) // ReLockModuleProgressions Resets module progressions to their default locked state and // recalculates them based on the current requirements. // // Adding progression requirements to an active course will not lock students // out of modules they have already unlocked unless this action is called. // https://canvas.instructure.com/doc/api/modules.html // // Path Parameters: // # Path.CourseID (Required) ID // # Path.ID (Required) ID // type ReLockModuleProgressions struct { Path struct { CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required) ID string `json:"id" url:"id,omitempty"` // (Required) } `json:"path"` } func (t *ReLockModuleProgressions) GetMethod() string { return "PUT" } func (t *ReLockModuleProgressions) GetURLPath() string { path := "courses/{course_id}/modules/{id}/relock" path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID)) path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID)) return path } func (t *ReLockModuleProgressions) GetQuery() (string, error) { return "", nil } func (t *ReLockModuleProgressions) GetBody() (url.Values, error) { return nil, nil } func (t *ReLockModuleProgressions) GetJSON() ([]byte, error) { return nil, nil } func (t *ReLockModuleProgressions) HasErrors() error { errs := []string{} if t.Path.CourseID == "" { errs = append(errs, "'Path.CourseID' is required") } if t.Path.ID == "" { errs = append(errs, "'Path.ID' is required") } if len(errs) > 0 { return fmt.Errorf(strings.Join(errs, ", ")) } return nil } func (t *ReLockModuleProgressions) Do(c *canvasapi.Canvas) (*models.Module, error) { response, err := c.SendRequest(t) if err != nil { return nil, err } body, err := ioutil.ReadAll(response.Body) response.Body.Close() if err != nil { return nil, err } ret := models.Module{} err = json.Unmarshal(body, &ret) if err != nil { return nil, err } return &ret, nil }
package k8s import ( "errors" "fmt" "strings" dolittleK8s "github.com/dolittle/platform-api/pkg/dolittle/k8s" platformK8s "github.com/dolittle/platform-api/pkg/platform/k8s" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func CreateApplicationResource(config CreateResourceConfig, customerID string, application dolittleK8s.ShortInfo) *batchv1.Job { namespace := config.Namespace gitRemote := config.GitRemote gitUserName := config.GitUserName gitUserEmail := config.GitUserEmail apiSecrets := config.ApiSecrets branch := config.GitBranch serviceAccountName := config.ServiceAccountName platformImage := config.PlatformImage platformEnvironment := config.PlatformEnvironment isProduction := config.IsProduction terraformCustomerName := strings.ToLower( fmt.Sprintf("customer_%s", customerID), ) applicationID := application.ID applicationName := application.Name terrformFileName := fmt.Sprintf("customer_%s_%s", customerID, applicationID) // Unique identifier of the job name := fmt.Sprintf("create-application-%s", applicationID) if len(name) >= 64 { panic("Not allowed due to kuberentes restriction") } annotations := platformK8s.GetAnnotationsForApplication(customerID, applicationID) terraformBaseContainer := terraformBase(platformImage, apiSecrets) return &batchv1.Job{ TypeMeta: metav1.TypeMeta{ Kind: "Job", APIVersion: "batch/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Annotations: annotations, }, Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: annotations, }, Spec: corev1.PodSpec{ ServiceAccountName: serviceAccountName, RestartPolicy: "Never", Volumes: []corev1.Volume{ { Name: "shared-data", VolumeSource: corev1.VolumeSource{}, }, { Name: "secrets", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: apiSecrets, Items: []corev1.KeyToPath{ { Key: "SSH_KEY_PUBLIC", Path: "operations.pub", }, { Key: "SSH_KEY_PRIVATE", Path: "operations", }, }, }, }, }, }, InitContainers: []corev1.Container{ sshSetup(), // We could write the env variables required? gitSetup(platformImage, gitRemote, branch, gitUserEmail, gitUserName), // Create terraform // TODO We don't really need envfrom here createTerraformWithCommand(terraformBaseContainer, []string{ "sh", "-c", fmt.Sprintf(` /app/bin/app tools terraform template application \ --application-name="%s" \ --application-id="%s" \ --customer="%s" > /pod-data/git/Source/V3/Azure/%s.tf; `, applicationName, applicationID, terraformCustomerName, terrformFileName, ), }), gitUpdateTerraform(platformImage, terrformFileName, branch), // Update git with the changes // Terraform init new module terraformInit(terraformBaseContainer), // Terraform apply new module terraformApply(terraformBaseContainer, terrformFileName), // Terraform create azure.json terraformOutputJSON(terraformBaseContainer), toolsStudioUpsertTerraform(platformImage, platformEnvironment, customerID), gitUpdateStudioTerraformInfo(platformImage, platformEnvironment, customerID, branch), buildApplicationInCluster(platformImage, platformEnvironment, customerID, applicationID, isProduction), gitUpdate(platformImage, "post-application-created", []string{ "sh", "-c", fmt.Sprintf(` cd /pod-data/git; git add ./Source/V3/platform-api/%s/%s; git status; git commit -m "Application created %s"; git log -1; GIT_SSH_COMMAND="ssh -i /pod-data/.ssh/operations -o IdentitiesOnly=yes -o StrictHostKeyChecking=no" git push origin %s; `, platformEnvironment, customerID, customerID, branch, ), }), terraformRemoveOutputJSON(platformImage), // TODO add logged in user to newly created application }, Containers: []corev1.Container{ { Name: "summary", Image: "busybox", Command: []string{ "sh", "-c", `echo "jobs done"`, }, }, }, }, }, }, } } func DeleteApplicationResource() error { // TODO return errors.New("TODO: currently we lock this in azure") } // buildApplicationInCluster // We rely on next steps to write to git func buildApplicationInCluster(platformImage string, platformEnvironment string, customerID string, applicationID string, isProduction bool) corev1.Container { envVars := []corev1.EnvVar{ { Name: "KUBECONFIG", Value: "incluster", }, } envVars = append(envVars, envVarGitNotInUse()...) return corev1.Container{ Name: "create-application", ImagePullPolicy: "Always", Image: platformImage, Env: envVars, Command: []string{ "sh", "-c", fmt.Sprintf( ` /app/bin/app tools automate create-application \ --platform-environment="%s" \ --with-environments \ --with-welcome-microservice \ --customer-id="%s" \ --application-id="%s" \ --is-production="%t" `, platformEnvironment, customerID, applicationID, isProduction, ), }, VolumeMounts: []corev1.VolumeMount{ { Name: "shared-data", MountPath: "/pod-data", }, }, } }
package importer import ( "context" "encoding/json" "net/http" "time" "github.com/pkg/errors" "github.com/ddouglas/ledger" "github.com/ddouglas/ledger/internal/account" "github.com/ddouglas/ledger/internal/gateway" "github.com/ddouglas/ledger/internal/item" "github.com/ddouglas/ledger/internal/transaction" "github.com/go-redis/redis/v8" "github.com/newrelic/go-agent/v3/newrelic" "github.com/sirupsen/logrus" "github.com/ulule/deepcopier" ) type Service interface { Run(ctx context.Context) VerifyWebhookMessage(ctx context.Context, header http.Header, message []byte) error PublishWebhookMessage(ctx context.Context, webhook *ledger.WebhookMessage) error PublishCustomWebhookMessage(ctx context.Context, webhook *ledger.WebhookMessage) error } type service struct { account account.Service item item.Service transaction transaction.Service redis *redis.Client gateway gateway.Service logger *logrus.Logger newrelic *newrelic.Application ledger.WebhookRepository } func New( newrelic *newrelic.Application, logger *logrus.Logger, client *redis.Client, gateway gateway.Service, account account.Service, item item.Service, transaction transaction.Service, webhook ledger.WebhookRepository, ) Service { return &service{ WebhookRepository: webhook, newrelic: newrelic, logger: logger, redis: client, gateway: gateway, account: account, item: item, transaction: transaction, } } func (s *service) Run(ctx context.Context) { entry := s.logger.WithFields(logrus.Fields{ "service": "Importer", "channel": gateway.PubSubPlaidWebhook, }) entry.Info("Monitoring Redis Queue for Messages") for { txn := s.newrelic.StartTransaction("check-plaid-message-queue") ctx := newrelic.NewContext(ctx, txn) entry := s.logger.WithContext(ctx) entry.Debug("checking message queue") data, err := s.redis.LPop(ctx, gateway.PubSubPlaidWebhook).Result() if err != nil && !errors.Is(err, redis.Nil) { entry.WithError(err).Error("failed to fetch messages from queue") txn.NoticeError(err) sleep() continue } if err != nil && errors.Is(err, redis.Nil) { entry.Debug("received nil, going to sleep") txn.Ignore() sleep() continue } entry.WithField("message", data).Info("webhook received, dispatching processor") var message = new(ledger.WebhookMessage) err = json.Unmarshal([]byte(data), message) if err != nil { entry.WithError(err).Error("failed to decode message") txn.NoticeError(err) continue } s.processMessage(ctx, message) s.logger.Info("message processed successfully") txn.End() sleep() } } func sleep() { time.Sleep(time.Second * 1) } func (s *service) processMessage(ctx context.Context, message *ledger.WebhookMessage) { switch message.WebhookType { case "TRANSACTIONS": s.processTransactionUpdate(ctx, message) default: s.logger.WithContext(ctx).WithField("message", message).Error("recieved message with unhandled webhook type") } } func (s *service) processTransactionUpdate(ctx context.Context, message *ledger.WebhookMessage) { txn := newrelic.FromContext(ctx) entry := s.logger.WithContext(ctx) seg := txn.StartSegment("checking for existing item") existingItem, err := s.item.Item(ctx, message.ItemID) if err != nil { entry.WithError(err).Error("failed to fetch item with itemID provided by message") return } seg.End() seg = txn.StartSegment("fetching updated item from plaid") item, err := s.gateway.Item(ctx, existingItem.AccessToken) if err != nil { entry.WithError(err).Error("failed to fetch plaid item with accessToken") return } seg.End() err = deepcopier.Copy(item).To(existingItem) if err != nil { entry.WithError(err).Error("failed to copy plaid item to ledger item") return } seg = txn.StartSegment("updating item") _, err = s.item.UpdateItem(ctx, existingItem.ItemID, existingItem) if err != nil { entry.WithError(err).Error("failed to update item") return } seg.End() seg = txn.StartSegment("updating accounts") accounts, err := s.gateway.Accounts(ctx, existingItem.AccessToken) if err != nil { entry.WithError(err).Error("failed to update item") return } for _, account := range accounts { account.ItemID = existingItem.ItemID _, err = s.account.UpdateAccount(ctx, existingItem.ItemID, account.AccountID, account) if err != nil { entry.WithError(err).WithField("account_id", account.AccountID).Error("failed to update account") return } } seg.End() seg = txn.StartSegment("evaluating webhook code") var start, end time.Time var accountIDs []string switch message.WebhookCode { case "INITIAL_UPDATE": start = time.Now().AddDate(0, 0, -30) end = time.Now() case "HISTORICAL_UPDATE": start = time.Now().AddDate(-2, 0, 0) end = time.Now().AddDate(0, 0, -30) case "DEFAULT_UPDATE": start = time.Now().AddDate(0, 0, 0) end = time.Now() case "CUSTOM_UPDATE": start = message.StartDate end = message.EndDate if message.Options != nil && len(message.Options.AccountIDs) > 0 { accountIDs = message.Options.AccountIDs } case "TRANSACTIONS_REMOVED": // How to handle this, thinking about calling a seperate func // and then returning here instead of allowing the func to continue processing default: // unhandled webhook code received } seg.AddAttribute("startDate", start.Format("2006-01-02")) seg.AddAttribute("endDate", end.Format("2006-01-02")) seg.End() seg = txn.StartSegment("fetching transactions from plaid") transactions, err := s.gateway.Transactions(ctx, item.AccessToken, start, end, accountIDs) if err != nil { entry.WithError(err).Error("failed to fetch transactions") return } seg.AddAttribute("transactionCount", len(transactions)) seg.End() seg = txn.StartSegment("processing transactions") err = s.transaction.ProcessTransactions(ctx, item, transactions) if err != nil { entry.WithError(err).Error("failed to process transactions") return } seg.End() if existingItem.IsRefreshing { entry.Info("Item is in refreshing state, updating to false") existingItem.IsRefreshing = false _, err = s.item.UpdateItem(ctx, existingItem.ItemID, existingItem) if err != nil { entry.WithError(err).Error("failed to toggle isRefreshing flag on item") return } } entry.Info("transactions processed successfully") }
package ascii import ( "fmt" "testing" "github.com/Snaxai/IS105/ICA02/Oppg1/ascii" ) const PasserTestC = `"Hello :-)"` func TestASCIIoppgC(t *testing.T) { for i := 0; i < len(PasserTestC); i++ { if PasserTestC[i] >= 126 { //fra og med 126 fordi det er utenfor normale ascii t.Fail() fmt.Println("Value not a part of ascii in loop number", i, PasserTestC[i]) } fmt.Println(PasserTestC[i]) } } func TestGreetingASCII(t *testing.T) { ascii.GreetingASCII() }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package util import ( "context" "fmt" "math" "os/exec" "strconv" "strings" "sync" "chromiumos/tast/common/perf" "chromiumos/tast/testing" ) // fioResult is a serializable structure representing fio results output. type fioResult struct { Jobs []struct { Jobname string `json:"jobname"` Read map[string]interface{} `json:"read"` Write map[string]interface{} `json:"write"` Trim map[string]interface{} `json:"trim"` Sync map[string]interface{} `json:"sync"` } DiskUtil []struct { Name string } `json:"disk_util"` } // fioResultReport is a result report for a single io run for a group. type fioResultReport struct { group string result *fioResult } // fioDiskUsageReport is a report of disk lifetime usage. type fioDiskUsageReport struct { name string percentageUsed int64 bytesWritten int64 } // FioResultWriter is a serial processor of fio results. type FioResultWriter struct { resultLock sync.Mutex results []fioResultReport diskUsages []fioDiskUsageReport } // Save processes and saves reported results. func (f *FioResultWriter) Save(ctx context.Context, path string, writeKeyVal bool) { f.resultLock.Lock() defer f.resultLock.Unlock() perfValues := perf.NewValues() for _, report := range f.results { reportResults(ctx, report.result, report.group, perfValues) } for _, disk := range f.diskUsages { reportDiskPercentageUsed(ctx, disk, perfValues) } perfValues.Save(path) if writeKeyVal { for _, report := range f.results { values := saveToKeyVal(ctx, report.result, report.group) if err := WriteKeyVals(path, values); err != nil { testing.ContextLog(ctx, "Error writing results to keyval file: ", err) } } } f.results = nil } // Report posts a single fio result to the writer. func (f *FioResultWriter) Report(group string, result *fioResult) { f.resultLock.Lock() defer f.resultLock.Unlock() f.results = append(f.results, fioResultReport{group, result}) } // ReportDiskUsage records the disk usage percents to report it at save time. func (f *FioResultWriter) ReportDiskUsage(diskName string, percentageUsed, totalBytesWritten int64) { if len(diskName) == 0 || percentageUsed == -1 { return // No reporting of empty or wrong disk usage. } f.resultLock.Lock() defer f.resultLock.Unlock() f.diskUsages = append(f.diskUsages, fioDiskUsageReport{diskName, percentageUsed, totalBytesWritten}) } func reportDiskPercentageUsed(ctx context.Context, diskUsage fioDiskUsageReport, perfValues *perf.Values) { perfValues.Set(perf.Metric{ Name: "disk_percentage_used", Variant: diskUsage.name, Unit: "percent", Direction: perf.SmallerIsBetter, Multiple: false, }, float64(diskUsage.percentageUsed)) perfValues.Append(perf.Metric{ Name: "total_bytes_written", Variant: diskUsage.name, Unit: "byte", Direction: perf.SmallerIsBetter, Multiple: true, }, float64(diskUsage.bytesWritten)) } // reportJobRWResult appends metrics for latency and bandwidth from the current test results // to the given perf values. func reportJobRWResult(ctx context.Context, testRes map[string]interface{}, prefix, dev string, perfValues *perf.Values) { flatResult, err := flattenNestedResults("", testRes) if err != nil { testing.ContextLog(ctx, "Error flattening results json: ", err) return } for k, v := range flatResult { if strings.Contains(k, "percentile") { perfValues.Append(perf.Metric{ Name: "_" + prefix + k, Variant: dev, Unit: "ns", Direction: perf.SmallerIsBetter, Multiple: true, }, v) } else if k == "_bw" { perfValues.Append(perf.Metric{ Name: "_" + prefix + k, Variant: dev, Unit: "KB_per_sec", Direction: perf.BiggerIsBetter, Multiple: true, }, v) } } } // flattenNestedResults flattens nested structures to the root level. // Names are prefixed with the nested names, i.e. {foo: { bar: {}}} -> {foo<prefix>bar: {}} // TODO(abergman): can we avoid creating map for each nest level? func flattenNestedResults(prefix string, nested interface{}) (flat map[string]float64, err error) { flat = make(map[string]float64) merge := func(to, from map[string]float64) { for kt, vt := range from { to[kt] = float64(vt) } } switch nested := nested.(type) { case map[string]interface{}: for k, v := range nested { fm1, fe := flattenNestedResults(prefix+"_"+k, v) if fe != nil { err = fe return } merge(flat, fm1) } case []interface{}: for i, v := range nested { fm1, fe := flattenNestedResults(prefix+"_"+strconv.Itoa(i), v) if fe != nil { err = fe return } merge(flat, fm1) } default: flat[prefix] = nested.(float64) } return } // diskSizePretty returns a size string (i.e. "128G") of a storage device. // TODO(abergman): Could we use gopsutil? func diskSizePretty(dev string) (sizeGB string, err error) { cmd := fmt.Sprintf("cat /proc/partitions | egrep '%v$' | awk '{print $3}'", dev) out, err := exec.Command("bash", "-c", cmd).CombinedOutput() if err != nil { return "", err } blocks, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) if err != nil { return "", err } // Covert number of blocks to bytes (*1024), then to a string of whole GB, // i.e. 125034840 -> "128G" return strconv.Itoa(int(1024*blocks/math.Pow(10, 9.0)+0.5)) + "G", nil } func reportResults(ctx context.Context, res *fioResult, group string, perfValues *perf.Values) { for _, job := range res.Jobs { reportJobRWResult(ctx, job.Read, job.Jobname+"_read", group, perfValues) reportJobRWResult(ctx, job.Write, job.Jobname+"_write", group, perfValues) } } func extractResultValues(ctx context.Context, testRes map[string]interface{}, prefix, dev string, values map[string]float64) { flatResult, err := flattenNestedResults("", testRes) if err != nil { testing.ContextLog(ctx, "Error flattening results json: ", err) return } for k, v := range flatResult { name := "_" + prefix + k + "{perf}" values[name] = v } } func saveToKeyVal(ctx context.Context, res *fioResult, group string) (values map[string]float64) { values = make(map[string]float64) for _, job := range res.Jobs { extractResultValues(ctx, job.Read, job.Jobname+"_read", group, values) extractResultValues(ctx, job.Write, job.Jobname+"_write", group, values) extractResultValues(ctx, job.Trim, job.Jobname+"_trim", group, values) extractResultValues(ctx, job.Sync, job.Jobname+"_sync", group, values) } return values }
package diag import ( "fmt" "strings" "github.com/gosuri/uitable" "github.com/spf13/cobra" "github.com/kapitanov/natandb/pkg/model" "github.com/kapitanov/natandb/pkg/storage" ) func init() { cmd := &cobra.Command{ Use: "snapshot", Short: "Inspect snapshot file", } Command.AddCommand(cmd) dataDir := cmd.Flags().StringP("data", "d", "./data", "path to data directory") cmd.Run = func(c *cobra.Command, args []string) { driver, err := storage.NewDriver(storage.DirectoryOption(*dataDir)) if err != nil { log.Printf("unable to init storage driver: %s", err) panic(err) } f, err := driver.SnapshotFile().Read() if err != nil { log.Printf("unable to read snapshot file: %s", err) panic(err) } defer func() { err = f.Close() if err != nil { log.Printf("unable to close snapshot file: %s", err) } }() root, err := model.ReadSnapshot(f) if err != nil { log.Printf("unable to read snapshot: %s", err) panic(err) } table := uitable.New() table.MaxColWidth = 80 table.Wrap = true table.AddRow("KEY", "VERSION", "VALUE") for _, node := range root.NodesMap { values := make([]string, len(node.Values)) for i, v := range node.Values { values[i] = fmt.Sprintf("\"%s\"", string(v)) } valueStr := fmt.Sprintf("[ %s ]", strings.Join(values, ", ")) table.AddRow(node.Key, fmt.Sprintf("%d", node.LastChangeID), valueStr) } fmt.Println(table) fmt.Printf("Version: %d\n", root.LastChangeID) } }
package lfsapi import ( "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) func TestAuthErrWithBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(401) w.Write([]byte(`{"message":"custom auth error"}`)) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsAuthError(err)) assert.Equal(t, "Authentication required: custom auth error", err.Error()) assert.EqualValues(t, 1, called) } func TestFatalWithBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(500) w.Write([]byte(`{"message":"custom fatal error"}`)) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsFatalError(err)) assert.Equal(t, "Fatal error: custom fatal error", err.Error()) assert.EqualValues(t, 1, called) } func TestWithNonFatal500WithBody(t *testing.T) { c := &Client{} var called uint32 nonFatalCodes := map[int]string{ 501: "custom 501 error", 507: "custom 507 error", 509: "custom 509 error", } for nonFatalCode, expectedErr := range nonFatalCodes { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(nonFatalCode) w.Write([]byte(`{"message":"` + expectedErr + `"}`)) })) req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) _, err = c.Do(req) t.Logf("non fatal code %d", nonFatalCode) assert.NotNil(t, err) assert.Equal(t, expectedErr, err.Error()) srv.Close() } assert.EqualValues(t, 3, called) } func TestAuthErrWithoutBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(401) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsAuthError(err)) assert.True(t, strings.HasPrefix(err.Error(), "Authentication required: Authorization error:"), err.Error()) assert.EqualValues(t, 1, called) } func TestFatalWithoutBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(500) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsFatalError(err)) assert.True(t, strings.HasPrefix(err.Error(), "Fatal error: Server error:"), err.Error()) assert.EqualValues(t, 1, called) } func TestWithNonFatal500WithoutBody(t *testing.T) { c := &Client{} var called uint32 nonFatalCodes := map[int]string{ 501: "Not Implemented:", 507: "Insufficient server storage:", 509: "Bandwidth limit exceeded:", } for nonFatalCode, errPrefix := range nonFatalCodes { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(nonFatalCode) })) req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) _, err = c.Do(req) t.Logf("non fatal code %d", nonFatalCode) assert.NotNil(t, err) assert.True(t, strings.HasPrefix(err.Error(), errPrefix)) srv.Close() } assert.EqualValues(t, 3, called) }
package logfiles import ( "fmt" "log" "net" "regexp" "sync/atomic" "time" "github.com/ActiveState/tail" ) var counter uint64 // TailFile tails a specific file func TailFile(search Search, interval time.Duration, server string, hostname string) { seek := &tail.SeekInfo{0, 2} regex := regexp.MustCompile(search.Regex) if regex == nil { log.Fatal("Unable to compile regex, please ensure this is a valid regular expression") } if conn, addr, err := connectToGraphite(server); err == nil { label := fmt.Sprintf("%v.%v", hostname, search.Label) go sendAtInterval(interval, label, conn, addr) } else { log.Panicf("Error connecting to Graphite server: %v", err) } for { if t, err := tail.TailFile(search.Filename, tail.Config{Follow: true, Location: seek}); err == nil { for line := range t.Lines { if r := regex.FindAllStringSubmatch(line.Text, -1); len(r) >= 1 { atomic.AddUint64(&counter, 1) } } } else { log.Fatal("Error tailing file: ", err) } } } func sendAtInterval(interval time.Duration, label string, conn *net.UDPConn, addr *net.UDPAddr) { for { time.Sleep(interval) count := float64(atomic.LoadUint64(&counter)) log.Printf("Logging %v %v", label, count) writeData(label, count, conn, addr) atomic.StoreUint64(&counter, 0) } }
// Showcase the usage of the 3rd party package `validator` // https://github.com/go-playground/validator for validation of struct objects. // // Code taken from: https://github.com/go-playground/validator/blob/master/_examples/simple/main.go package main import ( "fmt" "github.com/go-playground/validator/v10" ) // User contains user information type User struct { FirstName string `validate:"required"` LastName string `validate:"required"` Age uint8 `validate:"gte=0,lte=130"` Email string `validate:"required,email"` FavouriteColor string `validate:"iscolor"` // alias for 'hexcolor|rgb|rgba|hsl|hsla' Addresses []*Address `validate:"required,dive"` // a person can have a home and cottage... } // Address houses a users address information type Address struct { Street string `validate:"required"` City string `validate:"required"` Planet string `validate:"required"` Phone string `validate:"required"` } // use a single instance of Validate, it caches struct info var validate *validator.Validate func validateStruct() { address := &Address{ Street: "Eavesdown Docks", Planet: "Persphone", Phone: "none", } user := &User{ FirstName: "Badger", LastName: "Smith", Age: 135, Email: "Badger.Smith@gmail.com", FavouriteColor: "#000-", Addresses: []*Address{address}, } // returns nil or ValidationErrors ( []FieldError ) err := validate.Struct(user) if err != nil { // this check is only needed when your code could produce // an invalid value for validation such as interface with nil values // most including myself do not usually have code like this. if _, ok := err.(*validator.InvalidValidationError); !ok { fmt.Printf("Error: %v:\n", err) // return } // loop through all validation errors and print some infZZZ for _, err := range err.(validator.ValidationErrors) { fmt.Printf("Validation Error: %v\n", err) fmt.Printf(" Namespace: %s\n", err.Namespace()) fmt.Printf(" StructNamespace: %s\n", err.StructNamespace()) fmt.Printf(" Field: %s\n", err.Field()) fmt.Printf(" StructField: %s\n", err.StructField()) fmt.Printf(" Tag: %s\n", err.Tag()) fmt.Printf(" ActualTag: %s\n", err.ActualTag()) fmt.Printf(" Kind: %s\n", err.Kind()) fmt.Printf(" Type: %s\n", err.Type()) fmt.Printf(" Value: %s\n", err.Value()) fmt.Printf(" Param: %s\n", err.Param()) fmt.Println() } // here you can have your own checks and whatnot return } // from here all is good } func validateVariable() { myEmail := "joeybloggs.gmail.com" errs := validate.Var(myEmail, "required,email") if errs != nil { fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag return } // email ok, move on } func main() { validate = validator.New() fmt.Println("\n--- 1 structs ---") validateStruct() fmt.Println("\n--- 2 variables ---") validateVariable() }
package main; import "fmt"; func main() { // var nombre_variable tipo_dato /* Examples var x, y, z int; var cadena string; var bandera bool; var cadenas []string;*/ // Inicializacion de variables con := nombre := "Coco"; nombre = "cocooooo"; fmt.Println(nombre); }
package main import ( "net/http" "github.com/johnamadeo/server" log "github.com/sirupsen/logrus" ) func LogAndWriteErr(w http.ResponseWriter, err error, status int, function string) { log.WithFields(log.Fields{ "logger": "logrus", "status": status, "function": function, }).Error(err) w.WriteHeader(status) w.Write(server.ErrToBytes(err)) } func LogAndWrite(w http.ResponseWriter, bytes []byte, status int, function string) { log.WithFields(log.Fields{ "logger": "logrus", "function": function, }).Debug(status) w.Write(bytes) } func LogAndWriteStatusBadRequest(w http.ResponseWriter, err error, function string) { LogAndWriteErr(w, err, http.StatusBadRequest, function) } func LogAndWriteStatusInternalServerError(w http.ResponseWriter, err error, function string) { LogAndWriteErr(w, err, http.StatusInternalServerError, function) }
package sources import ( "fmt" "math" "time" MQTT "github.com/eclipse/paho.mqtt.golang" ) // BeatWave generates a sine wave with beats and sends it over MQTT func BeatWave(cxn MQTT.Client) { for i := 0.0; ; i = i + 0.01 { cxn.Publish("rocket_view/data/beat", 0, false, fmt.Sprintf("%.3f", math.Sin(i) + math.Sin(5 * i))) time.Sleep(25 * time.Millisecond) } }
package main import "fmt" type Rnum struct { bottom, top1, top2, z, gcd int64 } func gcd(a, b int64) int64 { if a < 0 { a = -a } if b < 0 { b = -b } var t int64 for ; b != 0; { t = b b = a % b a = t } return a } func norm(r *Rnum) { r.gcd = gcd(r.top1, r.bottom) r.top1 /= r.gcd r.bottom /= r.gcd r.z = r.top1 / r.bottom r.top2 = r.top1 % r.bottom } func print_rational(r *Rnum) { if r.z != 0 { if r.z < 0 { fmt.Printf("(%d", r.z) } else { fmt.Printf("%d", r.z) } if r.top2 < 0 { r.top2 = -r.top2 } if r.bottom < 0 { r.bottom = -r.bottom } if r.top2 != 0 { fmt.Printf(" %d/%d", r.top2, r.bottom) } if r.z < 0 { fmt.Printf(")") } } else { if (r.top2 < 0 && r.bottom < 0) || (r.top2 > 0 && r.bottom < 0) { r.top2 = -r.top2 r.bottom = -r.bottom } if r.top2 < 0 { fmt.Printf("(") } if r.top2 != 0 { fmt.Printf("%d/%d", r.top2, r.bottom) } else { fmt.Printf("0") } if r.top2 < 0 { fmt.Printf(")") } } } func calc(a, b, c *Rnum, op byte) { print_rational(a) fmt.Printf(" %c ", op) print_rational(b) switch op { case '+': c.top1 = a.top1*b.bottom + b.top1*a.bottom case '-': c.top1 = a.top1*b.bottom - b.top1*a.bottom case '*': c.top1 = a.top1 * b.top1 case '/': if b.top1 == 0 { fmt.Printf(" = Inf\n") return } else { c.top1 = a.top1 * b.bottom c.bottom = a.bottom * b.top1 } default: fmt.Printf("Unknown op!") return } norm(c) fmt.Printf(" = ") print_rational(c) fmt.Printf("\n") } func main() { var a, b ,c Rnum fmt.Scanf("%d/%d %d/%d", &a.top1, &a.bottom, &b.top1, &b.bottom) norm(&a) norm(&b) c.bottom = a.bottom * b.bottom calc(&a, &b, &c, '+') calc(&a, &b, &c, '-') calc(&a, &b, &c, '*') calc(&a, &b, &c, '/') }
package keeper import ( "encoding/binary" "encoding/json" "io/ioutil" "os" "testing" "time" "github.com/cosmwasm/wasmd/x/wasm/internal/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" ) func TestNewKeeper(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) _, _, keeper := CreateTestInput(t, false, tempDir) require.NotNil(t, keeper) } func TestCreate(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) ctx, accKeeper, keeper := CreateTestInput(t, false, tempDir) deposit := sdk.NewCoins(sdk.NewInt64Coin("denom", 100000)) creator := createFakeFundedAccount(ctx, accKeeper, deposit) wasmCode, err := ioutil.ReadFile("./testdata/contract.wasm") require.NoError(t, err) contractID, err := keeper.Create(ctx, creator, wasmCode, "https://github.com/cosmwasm/wasmd/blob/master/x/wasm/testdata/escrow.wasm", "cosmwasm-opt:0.5.2") require.NoError(t, err) require.Equal(t, uint64(1), contractID) // and verify content storedCode, err := keeper.GetByteCode(ctx, contractID) require.NoError(t, err) require.Equal(t, wasmCode, storedCode) } func TestCreateWithGzippedPayload(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) ctx, accKeeper, keeper := CreateTestInput(t, false, tempDir) deposit := sdk.NewCoins(sdk.NewInt64Coin("denom", 100000)) creator := createFakeFundedAccount(ctx, accKeeper, deposit) wasmCode, err := ioutil.ReadFile("./testdata/contract.wasm.gzip") require.NoError(t, err) contractID, err := keeper.Create(ctx, creator, wasmCode, "https://github.com/cosmwasm/wasmd/blob/master/x/wasm/testdata/escrow.wasm", "") require.NoError(t, err) require.Equal(t, uint64(1), contractID) // and verify content storedCode, err := keeper.GetByteCode(ctx, contractID) require.NoError(t, err) rawCode, err := ioutil.ReadFile("./testdata/contract.wasm") require.NoError(t, err) require.Equal(t, rawCode, storedCode) } func TestInstantiate(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) ctx, accKeeper, keeper := CreateTestInput(t, false, tempDir) deposit := sdk.NewCoins(sdk.NewInt64Coin("denom", 100000)) creator := createFakeFundedAccount(ctx, accKeeper, deposit) wasmCode, err := ioutil.ReadFile("./testdata/contract.wasm") require.NoError(t, err) contractID, err := keeper.Create(ctx, creator, wasmCode, "https://github.com/cosmwasm/wasmd/blob/master/x/wasm/testdata/escrow.wasm", "") require.NoError(t, err) _, _, bob := keyPubAddr() _, _, fred := keyPubAddr() initMsg := InitMsg{ Verifier: fred, Beneficiary: bob, } initMsgBz, err := json.Marshal(initMsg) require.NoError(t, err) gasBefore := ctx.GasMeter().GasConsumed() // create with no balance is also legal addr, err := keeper.Instantiate(ctx, contractID, creator, initMsgBz, nil) require.NoError(t, err) require.Equal(t, "cosmos18vd8fpwxzck93qlwghaj6arh4p7c5n89uzcee5", addr.String()) gasAfter := ctx.GasMeter().GasConsumed() require.Equal(t, uint64(36923), gasAfter-gasBefore) } func TestInstantiateWithNonExistingCodeID(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) ctx, accKeeper, keeper := CreateTestInput(t, false, tempDir) deposit := sdk.NewCoins(sdk.NewInt64Coin("denom", 100000)) creator := createFakeFundedAccount(ctx, accKeeper, deposit) require.NoError(t, err) initMsg := InitMsg{} initMsgBz, err := json.Marshal(initMsg) require.NoError(t, err) const nonExistingCodeID = 9999 addr, err := keeper.Instantiate(ctx, nonExistingCodeID, creator, initMsgBz, nil) require.True(t, types.ErrNotFound.Is(err), err) require.Nil(t, addr) } func TestExecute(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) ctx, accKeeper, keeper := CreateTestInput(t, false, tempDir) deposit := sdk.NewCoins(sdk.NewInt64Coin("denom", 100000)) topUp := sdk.NewCoins(sdk.NewInt64Coin("denom", 5000)) creator := createFakeFundedAccount(ctx, accKeeper, deposit.Add(deposit)) fred := createFakeFundedAccount(ctx, accKeeper, topUp) wasmCode, err := ioutil.ReadFile("./testdata/contract.wasm") require.NoError(t, err) contractID, err := keeper.Create(ctx, creator, wasmCode, "", "") require.NoError(t, err) _, _, bob := keyPubAddr() initMsg := InitMsg{ Verifier: fred, Beneficiary: bob, } initMsgBz, err := json.Marshal(initMsg) require.NoError(t, err) addr, err := keeper.Instantiate(ctx, contractID, creator, initMsgBz, deposit) require.NoError(t, err) require.Equal(t, "cosmos18vd8fpwxzck93qlwghaj6arh4p7c5n89uzcee5", addr.String()) // ensure bob doesn't exist bobAcct := accKeeper.GetAccount(ctx, bob) require.Nil(t, bobAcct) // ensure funder has reduced balance creatorAcct := accKeeper.GetAccount(ctx, creator) require.NotNil(t, creatorAcct) // we started at 2*deposit, should have spent one above assert.Equal(t, deposit, creatorAcct.GetCoins()) // ensure contract has updated balance contractAcct := accKeeper.GetAccount(ctx, addr) require.NotNil(t, contractAcct) assert.Equal(t, deposit, contractAcct.GetCoins()) // unauthorized - trialCtx so we don't change state trialCtx := ctx.WithMultiStore(ctx.MultiStore().CacheWrap().(sdk.MultiStore)) res, err := keeper.Execute(trialCtx, addr, creator, []byte(`{}`), nil) require.Error(t, err) require.Contains(t, err.Error(), "Unauthorized") // verifier can execute, and get proper gas amount start := time.Now() gasBefore := ctx.GasMeter().GasConsumed() res, err = keeper.Execute(ctx, addr, fred, []byte(`{}`), topUp) diff := time.Now().Sub(start) require.NoError(t, err) require.NotNil(t, res) assert.Equal(t, uint64(119513), res.GasUsed) // make sure gas is properly deducted from ctx gasAfter := ctx.GasMeter().GasConsumed() require.Equal(t, uint64(31723), gasAfter-gasBefore) // ensure bob now exists and got both payments released bobAcct = accKeeper.GetAccount(ctx, bob) require.NotNil(t, bobAcct) balance := bobAcct.GetCoins() assert.Equal(t, deposit.Add(topUp), balance) // ensure contract has updated balance contractAcct = accKeeper.GetAccount(ctx, addr) require.NotNil(t, contractAcct) assert.Equal(t, sdk.Coins(nil), contractAcct.GetCoins()) t.Logf("Duration: %v (81488 gas)\n", diff) } func TestExecuteWithNonExistingAddress(t *testing.T) { tempDir, err := ioutil.TempDir("", "wasm") require.NoError(t, err) defer os.RemoveAll(tempDir) ctx, accKeeper, keeper := CreateTestInput(t, false, tempDir) deposit := sdk.NewCoins(sdk.NewInt64Coin("denom", 100000)) creator := createFakeFundedAccount(ctx, accKeeper, deposit.Add(deposit)) // unauthorized - trialCtx so we don't change state nonExistingAddress := addrFromUint64(9999) _, err = keeper.Execute(ctx, nonExistingAddress, creator, []byte(`{}`), nil) require.True(t, types.ErrNotFound.Is(err), err) } type InitMsg struct { Verifier sdk.AccAddress `json:"verifier"` Beneficiary sdk.AccAddress `json:"beneficiary"` } func createFakeFundedAccount(ctx sdk.Context, am auth.AccountKeeper, coins sdk.Coins) sdk.AccAddress { _, _, addr := keyPubAddr() baseAcct := auth.NewBaseAccountWithAddress(addr) _ = baseAcct.SetCoins(coins) am.SetAccount(ctx, &baseAcct) return addr } var keyCounter uint64 = 0 // we need to make this deterministic (same every test run), as encoded address size and thus gas cost, // depends on the actual bytes (due to ugly CanonicalAddress encoding) func keyPubAddr() (crypto.PrivKey, crypto.PubKey, sdk.AccAddress) { keyCounter++ seed := make([]byte, 8) binary.BigEndian.PutUint64(seed, keyCounter) key := ed25519.GenPrivKeyFromSecret(seed) pub := key.PubKey() addr := sdk.AccAddress(pub.Address()) return key, pub, addr }
package usecase import ( "marketplace/accounts/domain" "marketplace/accounts/internal/infrastructure/ads" "github.com/go-pg/pg/v10" "github.com/gin-gonic/gin" ) type GetMeCmd func (db *pg.DB, c *gin.Context, user *domain.Account) (*domain.Account, error) func GetMe(adsFetcher ads.Fetcher) GetMeCmd { return func (db *pg.DB, c *gin.Context, user *domain.Account) (*domain.Account, error) { var adss []domain.Ads adss, err := adsFetcher.GetMyAds(c) if err != nil { return nil, err } user.Ads = adss return user, nil } }
package tempodb import ( "errors" "fmt" "time" cortex_cache "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/grafana/tempo/tempodb/backend/azure" "github.com/grafana/tempo/tempodb/backend/cache/memcached" "github.com/grafana/tempo/tempodb/backend/cache/redis" "github.com/grafana/tempo/tempodb/backend/gcs" "github.com/grafana/tempo/tempodb/backend/local" "github.com/grafana/tempo/tempodb/backend/s3" "github.com/grafana/tempo/tempodb/encoding" "github.com/grafana/tempo/tempodb/pool" "github.com/grafana/tempo/tempodb/wal" ) const DefaultBlocklistPollConcurrency = uint(50) const DefaultRetentionConcurrency = uint(10) // Config holds the entirety of tempodb configuration type Config struct { Pool *pool.Config `yaml:"pool,omitempty"` WAL *wal.Config `yaml:"wal"` Block *encoding.BlockConfig `yaml:"block"` BlocklistPoll time.Duration `yaml:"blocklist_poll"` BlocklistPollConcurrency uint `yaml:"blocklist_poll_concurrency"` // backends Backend string `yaml:"backend"` Local *local.Config `yaml:"local"` GCS *gcs.Config `yaml:"gcs"` S3 *s3.Config `yaml:"s3"` Azure *azure.Config `yaml:"azure"` // caches Cache string `yaml:"cache"` BackgroundCache *cortex_cache.BackgroundConfig `yaml:"background_cache"` Memcached *memcached.Config `yaml:"memcached"` Redis *redis.Config `yaml:"redis"` } // CompactorConfig contains compaction configuration options type CompactorConfig struct { ChunkSizeBytes uint32 `yaml:"chunk_size_bytes"` // todo: do we need this? FlushSizeBytes uint32 `yaml:"flush_size_bytes"` MaxCompactionRange time.Duration `yaml:"compaction_window"` MaxCompactionObjects int `yaml:"max_compaction_objects"` MaxBlockBytes uint64 `yaml:"max_block_bytes"` BlockRetention time.Duration `yaml:"block_retention"` CompactedBlockRetention time.Duration `yaml:"compacted_block_retention"` RetentionConcurrency uint `yaml:"retention_concurrency"` IteratorBufferSize int `yaml:"iterator_buffer_size"` } func validateConfig(cfg *Config) error { if cfg.WAL == nil { return errors.New("wal config should be non-nil") } if cfg.Block == nil { return errors.New("block config should be non-nil") } err := encoding.ValidateConfig(cfg.Block) if err != nil { return fmt.Errorf("block config validation failed: %w", err) } return nil }
package main import "fmt" func sendping(pingchannel chan<- string, msg string) { pingchannel <- msg // Add the message to the ping channel } func recievepong(pingchannel <-chan string, pongchannel chan<- string) { msg := <-pingchannel // take the message from ping channel and store it in msg pongchannel <- msg // move the message from one channel to the other. } func main() { pingchannel := make(chan string, 1) // For sending pongchannel := make(chan string, 1) // For recieving sendping(pingchannel, "passed message") recievepong(pingchannel, pongchannel) fmt.Println(<-pongchannel) }
package tracing //go:generate mockgen -package mock -destination mock/tracing_mock.go github.com/caos/zitadel/internal/tracing Tracer