text
stringlengths
11
4.05M
// SPDX-License-Identifier: MIT // Package asttest 提供了一个合法的 ast.APIDoc 对象 package asttest import ( "net/http" "path/filepath" "github.com/issue9/assert/v3" "github.com/issue9/source" "github.com/caixw/apidoc/v7/core" "github.com/caixw/apidoc/v7/internal/ast" "github.com/caixw/apidoc/v7/internal/xmlenc" ) // Filename 文档的文件名 const Filename = "index.xml" // Get 返回 doc.APIDoc 对象 // // 同时当前目录下的 index.xml 文件与此返回对象内容是相同的。 func Get() *ast.APIDoc { return &ast.APIDoc{ APIDoc: &ast.APIDocVersionAttribute{Value: xmlenc.String{Value: ast.Version}}, Version: &ast.VersionAttribute{Value: xmlenc.String{Value: "1.0.1"}}, Title: &ast.Element{Content: ast.Content{Value: "test"}}, Description: &ast.Richtext{ Text: &ast.CData{Value: xmlenc.String{Value: "<p>desc</p>"}}, Type: &ast.Attribute{Value: xmlenc.String{Value: ast.RichtextTypeHTML}}, }, Servers: []*ast.Server{ { URL: &ast.Attribute{Value: xmlenc.String{Value: "https://example.com/admin"}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "admin"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "admin"}}, }, { URL: &ast.Attribute{Value: xmlenc.String{Value: "https://example.com"}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "client"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "client"}}, }, }, Tags: []*ast.Tag{ { Name: &ast.Attribute{Value: xmlenc.String{Value: "t1"}}, Title: &ast.Attribute{Value: xmlenc.String{Value: "t1"}}, }, { Name: &ast.Attribute{Value: xmlenc.String{Value: "t2"}}, Title: &ast.Attribute{Value: xmlenc.String{Value: "t2"}}, }, { Name: &ast.Attribute{Value: xmlenc.String{Value: "tag1"}}, Title: &ast.Attribute{Value: xmlenc.String{Value: "tag1"}}, }, }, Mimetypes: []*ast.Element{ {Content: ast.Content{Value: "application/json"}}, {Content: ast.Content{Value: "application/xml"}}, }, APIs: []*ast.API{ { Method: &ast.MethodAttribute{Value: xmlenc.String{Value: http.MethodGet}}, Tags: []*ast.TagValue{ {Content: ast.Content{Value: "t1"}}, {Content: ast.Content{Value: "t2"}}, }, Path: &ast.Path{Path: &ast.Attribute{Value: xmlenc.String{Value: "/users"}}}, Servers: []*ast.ServerValue{ {Content: ast.Content{Value: "admin"}}, }, Requests: []*ast.Request{ { Summary: &ast.Attribute{Value: xmlenc.String{Value: "request"}}, Headers: []*ast.Param{ { Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeString}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "authorization"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "authorization"}}, }, }, Examples: []*ast.Example{ { Mimetype: &ast.Attribute{Value: xmlenc.String{Value: "application/json"}}, Content: &ast.ExampleValue{Value: xmlenc.String{Value: "xxx"}}, }, }, }, }, Responses: []*ast.Request{ { Description: &ast.Richtext{ Type: &ast.Attribute{Value: xmlenc.String{Value: "html"}}, Text: &ast.CData{Value: xmlenc.String{Value: "<p>desc</p>"}}, }, Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeObject}}, Status: &ast.StatusAttribute{Value: ast.Number{Int: http.StatusOK}}, Headers: []*ast.Param{ { Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeString}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "authorization"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "authorization"}}, }, }, Examples: []*ast.Example{ { Mimetype: &ast.Attribute{Value: xmlenc.String{Value: "application/json"}}, Content: &ast.ExampleValue{Value: xmlenc.String{Value: "xxx"}}, }, }, Items: []*ast.Param{ { Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeNumber}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "id"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "ID"}}, }, { Summary: &ast.Attribute{Value: xmlenc.String{Value: "summary"}}, Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeString}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "name"}}, }, }, }, }, }, { Method: &ast.MethodAttribute{Value: xmlenc.String{Value: http.MethodPost}}, Tags: []*ast.TagValue{ {Content: ast.Content{Value: "t1"}}, {Content: ast.Content{Value: "tag1"}}, }, Path: &ast.Path{Path: &ast.Attribute{Value: xmlenc.String{Value: "/users"}}}, Deprecated: &ast.VersionAttribute{Value: xmlenc.String{Value: "1.0.1"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "summary"}}, Servers: []*ast.ServerValue{ {Content: ast.Content{Value: "admin"}}, {Content: ast.Content{Value: "client"}}, }, Requests: []*ast.Request{ { Name: &ast.Attribute{Value: xmlenc.String{Value: "root"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "request"}}, Headers: []*ast.Param{ { Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeString}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "authorization"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "authorization"}}, }, }, Examples: []*ast.Example{ { Mimetype: &ast.Attribute{Value: xmlenc.String{Value: "application/json"}}, Content: &ast.ExampleValue{Value: xmlenc.String{Value: "xxx"}}, }, }, Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeObject}}, Items: []*ast.Param{ { Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeNumber}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "id"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "ID"}}, }, { Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeString}}, Name: &ast.Attribute{Value: xmlenc.String{Value: "name"}}, Summary: &ast.Attribute{Value: xmlenc.String{Value: "name summary"}}, }, }, }, }, Responses: []*ast.Request{ { Status: &ast.StatusAttribute{Value: ast.Number{Int: http.StatusCreated}}, Description: &ast.Richtext{ Type: &ast.Attribute{Value: xmlenc.String{Value: "html"}}, Text: &ast.CData{Value: xmlenc.String{Value: "<p>desc</p>"}}, }, Type: &ast.TypeAttribute{Value: xmlenc.String{Value: ast.TypeNone}}, }, }, }, }, } } // XML 获取 Get 返回对象的 XML 编码 func XML(a *assert.Assertion) []byte { data, err := xmlenc.Encode("", Get(), core.XMLNamespace, "apidoc") a.NotError(err).NotNil(data) return data } // URI 返回测试文件基于 URI 的表示方式 func URI(a *assert.Assertion) core.URI { p := core.FileURI(pp(a, Filename)) a.NotEmpty(p) return p } // Path 返回测试文件的绝对路径 // // NOTE: 该文件与 Get() 对象的内容是相同的。 func Path(a *assert.Assertion) string { return pp(a, Filename) } // Dir 返回测试文件所在的目录 func Dir(a *assert.Assertion) string { return pp(a, "") } func pp(a *assert.Assertion, p string) string { p = source.CurrentPath(p) p, err := filepath.Abs(p) a.NotError(err).NotEmpty(p) return p }
package util import "github.com/gin-gonic/gin" // RestHandleResponse handles the REST API response. func RestHandleResponse(context *gin.Context, body interface{}, err error) { if err == nil { context.JSON(200, body) } else { context.String(400, err.Error()) } }
package cassandradatacenter import ( "context" "fmt" "github.com/go-logr/logr" cassandraoperatorv1alpha1 "github.com/instaclustr/cassandra-operator/pkg/apis/cassandraoperator/v1alpha1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) const pvcDeletionFinalizer = "finalizer.pvcs.cassandraoperator.instaclustr.com" func (r *ReconcileCassandraDataCenter) deletePersistenceVolumeClaim(reqLogger logr.Logger, pvc corev1.PersistentVolumeClaim) error { if err := r.client.Delete(context.TODO(), &pvc); err != nil { reqLogger.Info(fmt.Sprintf("Unable to delete pvc %s.", pvc.Name)) return err } else { reqLogger.Info(fmt.Sprintf("Successfully submitted deletion of pvc %s.", pvc.Name)) } return nil } type pvcFilterFunc func(corev1.PersistentVolumeClaim) bool func (r *ReconcileCassandraDataCenter) getPVCs( instance *cassandraoperatorv1alpha1.CassandraDataCenter, filterFn *pvcFilterFunc, ) ([]corev1.PersistentVolumeClaim, error) { pvcList := &corev1.PersistentVolumeClaimList{} listOpts := []client.ListOption{ client.InNamespace(instance.Namespace), client.MatchingLabels{ "cassandra-operator.instaclustr.com/datacenter": instance.DataCenter, "cassandra-operator.instaclustr.com/cluster": instance.Cluster, }, } if err := r.client.List(context.TODO(), pvcList, listOpts...); err != nil { return nil, err } else { if filterFn == nil { return pvcList.Items, nil } var filterPVCs []corev1.PersistentVolumeClaim for _, pvc := range pvcList.Items { if (*filterFn)(pvc) { filterPVCs = append(filterPVCs, pvc) } } return filterPVCs, nil } } func (r *ReconcileCassandraDataCenter) finalizePVCs(reqLogger logr.Logger, instance *cassandraoperatorv1alpha1.CassandraDataCenter) error { pvcList := corev1.PersistentVolumeClaimList{} listOpts := []client.ListOption{ client.InNamespace(instance.Namespace), client.MatchingLabels{ "cassandra-operator.instaclustr.com/datacenter": instance.DataCenter, "cassandra-operator.instaclustr.com/cluster": instance.Cluster, }, } if err := r.client.List(context.TODO(), &pvcList, listOpts...); err != nil { return err } if !instance.Spec.DeletePVCs { return nil } for _, pvc := range pvcList.Items { if err := r.deletePersistenceVolumeClaim(reqLogger, pvc); err != nil { return err } } return nil } func (r *ReconcileCassandraDataCenter) addFinalizer(reqLogger logr.Logger, instance *cassandraoperatorv1alpha1.CassandraDataCenter) error { if !contains(instance.GetFinalizers(), pvcDeletionFinalizer) && instance.Spec.DeletePVCs { reqLogger.Info("Adding Finalizer for the CassandraDataCenter") instance.SetFinalizers(append(instance.GetFinalizers(), pvcDeletionFinalizer)) err := r.client.Update(context.TODO(), instance) if err != nil { reqLogger.Error(err, "Failed to update CassandraDataCenter with finalizer "+pvcDeletionFinalizer) return err } } return nil } func (r *ReconcileCassandraDataCenter) finalizeIfNecessary(reqLogger logr.Logger, instance *cassandraoperatorv1alpha1.CassandraDataCenter) (bool, error) { if instance.GetDeletionTimestamp() == nil { return false, nil } if contains(instance.GetFinalizers(), pvcDeletionFinalizer) { if err := r.finalizePVCs(reqLogger, instance); err != nil { return false, err } else { r.recorder.Event( instance, corev1.EventTypeNormal, "SuccessEvent", fmt.Sprintf("%s was finalized.", instance.Name)) } instance.SetFinalizers(remove(instance.GetFinalizers(), pvcDeletionFinalizer)) if err := r.client.Update(context.TODO(), instance); err != nil { return false, err } return true, nil } return false, nil } func (r *ReconcileCassandraDataCenter) finalizeDeletedPods(reqLogger logr.Logger, instance *cassandraoperatorv1alpha1.CassandraDataCenter) error { if deletedPods, err := AllDeletedPods(r.client, instance); err != nil { return err } else { if len(deletedPods) == 0 { return nil } for _, pod := range deletedPods { r.recorder.Event( instance, corev1.EventTypeNormal, "SuccessEvent", fmt.Sprintf("Decommissioning of %s was successful.", pod.Name)) } if !instance.Spec.DeletePVCs { return nil } if existingPVCs, err := r.getPVCs(instance, nil); err != nil { return err } else { for _, pod := range deletedPods { for _, volume := range pod.Spec.Volumes { podsPVC := volume.VolumeSource.PersistentVolumeClaim if podsPVC != nil { for _, c := range existingPVCs { if c.Name == podsPVC.ClaimName { if err := r.deletePersistenceVolumeClaim(reqLogger, c); err != nil { r.recorder.Event( instance, corev1.EventTypeWarning, "FailureEvent", fmt.Sprintf("Deletion of PVC %s failed: %v", c.Name, err)) return err } r.recorder.Event( instance, corev1.EventTypeNormal, "SuccessEvent", fmt.Sprintf("Deletion of PVC %s was successful.", c.Name)) break } } } } } } } return nil }
package stat import ( "errors" "io" "os" "github.com/loov/goda/internal/memory" ) // Source contains basic analysis of arbitrary source code. type Source struct { // Files count in this stat. Files int // Binary file count. Binary int // Size in bytes of all files. Size memory.Bytes // Count of non-empty lines. Lines int // Count of empty lines. Blank int } func (c *Source) Add(s Source) { c.Files += s.Files c.Binary += s.Binary c.Size += s.Size c.Blank += s.Blank c.Lines += s.Lines } func SourceFromBytes(data []byte) Source { count := Source{Files: 1} if len(data) == 0 { return count } count.Size += memory.Bytes(len(data)) emptyline := true for _, c := range data { switch c { case 0x0: count.Blank = 0 count.Lines = 0 count.Files = 0 count.Binary = 1 return count case '\n': if emptyline { count.Blank++ } else { count.Lines++ } emptyline = true case '\r', ' ', '\t': // ignore default: emptyline = false } } if !emptyline { count.Lines++ } return count } var ErrEmptyFile = errors.New("empty file") func SourceFromPath(path string) (Source, error) { count := Source{ Files: 1, } file, err := os.Open(path) if err != nil { return count, err } defer file.Close() stat, err := file.Stat() if err != nil { return count, err } if stat.Size() <= 0 { return count, ErrEmptyFile } count.Size += memory.Bytes(stat.Size()) buf := make([]byte, 8196) emptyline := true for { n, err := file.Read(buf) if err != nil && err != io.EOF { return count, err } for _, c := range buf[:n] { switch c { case 0x0: count.Blank = 0 count.Lines = 0 count.Files = 0 count.Binary = 1 return count, nil case '\n': if emptyline { count.Blank++ } else { count.Lines++ } emptyline = true case '\r', ' ', '\t': // ignore default: emptyline = false } } if err == io.EOF { break } } if !emptyline { count.Lines++ } return count, nil }
package metrics import ( "github.com/prometheus/client_golang/prometheus" ) const namespace = "caddy" var ( requestCount *prometheus.CounterVec requestDuration *prometheus.HistogramVec responseSize *prometheus.HistogramVec responseStatus *prometheus.CounterVec responseLatency *prometheus.HistogramVec ) func (m Metrics) define(subsystem string) { if subsystem == "" { subsystem = "http" } if m.latencyBuckets == nil { m.latencyBuckets = append(prometheus.DefBuckets, 15, 20, 30, 60, 120, 180, 240, 480, 960) } if m.sizeBuckets == nil { m.sizeBuckets = []float64{0, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000, 1e5, 5e5, 1e6, 2e6, 3e6, 4e6, 5e6, 10e6} } extraLabels := m.extraLabelNames() requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "request_count_total", Help: "Counter of HTTP(S) requests made.", }, append([]string{"host", "family", "proto"}, extraLabels...)) requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: "request_duration_seconds", Help: "Histogram of the time (in seconds) each request took.", Buckets: m.latencyBuckets, }, append([]string{"host", "family", "proto"}, extraLabels...)) responseSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: "response_size_bytes", Help: "Size of the returns response in bytes.", Buckets: m.sizeBuckets, }, append([]string{"host", "family", "proto", "status"}, extraLabels...)) responseStatus = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "response_status_count_total", Help: "Counter of response status codes.", }, append([]string{"host", "family", "proto", "status"}, extraLabels...)) responseLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: "response_latency_seconds", Help: "Histogram of the time (in seconds) until the first write for each request.", Buckets: m.latencyBuckets, }, append([]string{"host", "family", "proto", "status"}, extraLabels...)) }
package utils import ( "crypto/rand" "fmt" "io" "net/http" "o2clock/constants/appconstant" "runtime" "time" "google.golang.org/grpc" ) func ValidateAndPrintMemUsage(srv *grpc.Server) { var m runtime.MemStats runtime.ReadMemStats(&m) fmt.Println(time.Now()) fmt.Println(appconstant.MEM_ALLOC, bToMb(m.Alloc)) fmt.Println(appconstant.MEM_TOTAL_ALLOC, bToMb(m.TotalAlloc)) fmt.Println(appconstant.MEM_SYS, bToMb(m.Sys)) fmt.Println(appconstant.NUM_GC, m.NumGC) fmt.Println(appconstant.LOOKUPS, bToMb(m.Lookups)) fmt.Println(appconstant.MALLOCS, bToMb(m.Mallocs)) fmt.Println() } func bToMb(b uint64) uint64 { return b / 1024 / 1024 } // Job memory usage func CreateJobMemUsage(srv *grpc.Server) { jt := NewJobMemUsage() for { <-jt.t.C ValidateAndPrintMemUsage(srv) jt.updateJobMemCheck() } } // current srv status func CurrentMemStatus() string { var m runtime.MemStats runtime.ReadMemStats(&m) return appconstant.MEM_ALLOC + fmt.Sprint(bToMb(m.Alloc)) + "\n" + appconstant.MEM_TOTAL_ALLOC + fmt.Sprint(bToMb(m.TotalAlloc)) + "\n" + appconstant.MEM_SYS + fmt.Sprint(bToMb(m.Sys)) } // generate random string func GenerateRandomString(max int) string { var table = [...]byte{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0'} b := make([]byte, max) n, err := io.ReadAtLeast(rand.Reader, b, max) if n != max { panic(err) } for i := 0; i < len(b); i++ { b[i] = table[int(b[i])%len(table)] } return string(b) } // generate random string with type func RandomStringGenerateWithType(size int, randType string) string { var dictionary string switch randType { case appconstant.ALPHA_NUM: dictionary = appconstant.DIC_ALPHA_NUM break case appconstant.ALPHA: dictionary = appconstant.DIC_ALPHA break case appconstant.NUM: dictionary = appconstant.DIC_NUM break default: return "" } var bytes = make([]byte, size) rand.Read(bytes) for k, v := range bytes { bytes[k] = dictionary[v%byte(len(dictionary))] } return string(bytes) } // parse request func ParseRequest(r *http.Request, key string) string { return r.PostFormValue(key) }
package middleware import ( "enroll/interfaces" "enroll/providers" "net/http" "strings" "github.com/gin-gonic/gin" ) func AdminAuth() gin.HandlerFunc { return func(c *gin.Context) { authHeader := c.GetHeader("Authorization") if len(authHeader) == 0 { c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{ Data: interfaces.ErrorMessage{ Message: "Token not provided", Status: 401, }, }) return } bearerAndToken := strings.Fields(authHeader) if len(bearerAndToken) != 2 { c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{ Data: interfaces.ErrorMessage{ Message: "Token bad formated", Status: 401, }, }) return } authControl := providers.AuthControl{} respValidToken, err := authControl.ValidToken(providers.ValidTokenInput{ Token: bearerAndToken[1], TokenKind: "LOGIN_ADMIN", }) if err.Message != "" { c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{ Data: interfaces.ErrorMessage{ Message: err.Message, Status: http.StatusUnauthorized, }, }) return } if !respValidToken { c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{ Data: interfaces.ErrorMessage{ Message: "Access not allowed to this endpoint", Status: http.StatusUnauthorized, }, }) return } c.Next() } }
package podres import ( "fmt" "log" "time" podresources "k8s.io/kubernetes/pkg/kubelet/apis/podresources" podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" ) const ( defaultPodResourcesTimeout = 10 * time.Second defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb // obtained these values from node e2e tests : https://github.com/kubernetes/kubernetes/blob/82baa26905c94398a0d19e1b1ecf54eb8acb6029/test/e2e_node/util.go#L70 ) func GetPodResClient(socketPath string) (podresourcesapi.PodResourcesListerClient, error) { var err error podResourceClient, _, err := podresources.GetClient(socketPath, defaultPodResourcesTimeout, defaultPodResourcesMaxSize) if err != nil { return nil, fmt.Errorf("Can't create client: %v", err) } log.Printf("connected to '%v'!", socketPath) return podResourceClient, nil }
package Authentication //TODO: better way for import import ( "errors" "io/ioutil" "proxy/Logger" "strings" "github.com/gin-gonic/gin" "github.com/mitchellh/mapstructure" "net/http" ) type Authentication struct { Name string Auth_scheme string Auth_addr string Auth_type string Url_path string Req_headers []string } var lauth *Logger.Logger var AuthMiddlewares = map[string]*gin.RouterGroup{} func (auth *Authentication) Validate() error { var rv string = "" if auth.Name == "" { rv += "name, " } if auth.Auth_addr == "" { rv += "auth_name, " } if auth.Auth_scheme == "" { rv += "auth_scheme, " } else if auth.Auth_scheme != "http" { return errors.New("Invalid auth_scheme provided: " + auth.Auth_scheme + " . Currently supported only http") } if auth.Auth_type == "" { rv += "auth_type, " } else if auth.Auth_type != "epp" { return errors.New("Supported auth type is only 'epp'") } if auth.Url_path == "" { rv += "url_path, " } if rv != "" { rv = "Missing required fields: " + rv return errors.New(rv) } return nil } //TODO: investigate method, it's comparably slow func ReadAuthFromFile(auth interface{}) []Authentication { lauth = Logger.New("Authentication", 0, nil) var rv []Authentication auth2, ok := auth.([]interface{}) if ok == false { panic("Can't cast Authentication interface{} to map[string]interface{}") } for _,v := range auth2 { var tmp Authentication err := mapstructure.Decode(v, &tmp) if err != nil { panic("Can't decode one of authentication from json to structure. Error: " + err.Error()) } if err := tmp.Validate(); err != nil {panic(err.Error())} rv = append(rv, tmp) } return rv } func RegisterAuth(cl *gin.Engine, file map[string]interface{}) { auths := ReadAuthFromFile(file["Auth"]) for _,a := range auths { middle := RegisterMiddleware(a) tmp := cl.Group("/") tmp.Use(middle) AuthMiddlewares[a.Name] = tmp } } func DefaultAuthMiddleware(auth Authentication) gin.HandlerFunc { return func(c *gin.Context) { //init client and request cl := http.Client{} req, err := http.NewRequest("GET", auth.Auth_scheme + "://" + auth.Auth_addr + auth.Url_path, nil) if err != nil { lauth.Error(map[string]string{"Error": err.Error()}, "Error while creating new 'Request'") c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } //fill request with required auth headers for _,h := range auth.Req_headers { if v := c.Request.Header.Get(h); len(v) != 0 { req.Header.Add(h, v) } else { lauth.Error( map[string]string{"Missing header": h, "Required headers": strings.Join(auth.Req_headers[:], ",")}, "Missing required header") c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": "Missing required header: " + h}) return } } // send request to auth server resp, err := cl.Do(req) if err != nil { lauth.Error(map[string]string{"Error": err.Error()}, "Error when trying to send auth request") code := http.StatusInternalServerError if resp != nil { code = resp.StatusCode } c.AbortWithStatusJSON(code, gin.H{"error": err.Error()}) return } else if resp.StatusCode >= 300 { //need to check status not only for 200 bodyB, _ := ioutil.ReadAll(resp.Body) lauth.Error(map[string]string{"Status": resp.Status, "Response": string(bodyB)}, "Unsuccessful code return form auth service") c.AbortWithStatusJSON(resp.StatusCode, gin.H{"Status": resp.Status, "body": resp.Body}) return } //process if authorized c.Next() } } func RegisterMiddleware(auth Authentication) gin.HandlerFunc { // currently supported only type 'endpoint per permission if auth.Auth_type == "epp" { return DefaultAuthMiddleware(auth) } panic("Unsupported auth_type is used: " + auth.Auth_type ) }
package main import ( "context" "fmt" "os" "runtime" "github.com/tanaton/covid-19-chart/app" ) func main() { defer func() { if err := recover(); err != nil { fmt.Fprintf(os.Stderr, "Error:\n%s", err) os.Exit(1) } }() os.Exit(_main()) } func _main() int { if envvar := os.Getenv("GOMAXPROCS"); envvar == "" { runtime.GOMAXPROCS(runtime.NumCPU()) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() chart := app.New() if err := chart.Run(ctx); err != nil { fmt.Fprintf(os.Stderr, "Error:%s\n", err) return 1 } return 0 }
package stream import ( "bytes" "io" containers "github.com/ernoaapa/eliot/pkg/api/services/containers/v1" "github.com/pkg/errors" ) // PipeStdout reads stdout from grpc stream and writes it to stdout/stderr func PipeStdout(stream StdoutStreamClient, stdout, stderr io.Writer) error { for { resp, err := stream.Recv() if err == io.EOF { err = stream.CloseSend() if err != nil { return err } return nil } if err != nil { return errors.Wrapf(err, "Received error while reading attach stream") } target := stdout if resp.Stderr { target = stderr } _, err = io.Copy(target, bytes.NewReader(resp.Output)) if err != nil { return errors.Wrapf(err, "Error while copying data") } } } // PipeStdin reads input from Stdin and writes it to the grpc stream func PipeStdin(stream StdinStreamClient, stdin io.Reader) error { for { buf := make([]byte, 1024) n, err := stdin.Read(buf) if err == io.EOF { return nil } if err != nil { return errors.Wrapf(err, "Error while reading stdin to buffer") } if err := stream.Send(&containers.StdinStreamRequest{Input: buf[:n]}); err != nil { return errors.Wrapf(err, "Sending to stream returned error") } } }
package engine import ( "errors" "fmt" "runtime/debug" "strings" "time" "oh-my-posh/platform" "oh-my-posh/properties" "oh-my-posh/segments" "oh-my-posh/shell" "oh-my-posh/template" ) // Segment represent a single segment and it's configuration type Segment struct { Type SegmentType `json:"type,omitempty"` Tips []string `json:"tips,omitempty"` Style SegmentStyle `json:"style,omitempty"` PowerlineSymbol string `json:"powerline_symbol,omitempty"` InvertPowerline bool `json:"invert_powerline,omitempty"` Foreground string `json:"foreground,omitempty"` ForegroundTemplates template.List `json:"foreground_templates,omitempty"` Background string `json:"background,omitempty"` BackgroundTemplates template.List `json:"background_templates,omitempty"` LeadingDiamond string `json:"leading_diamond,omitempty"` TrailingDiamond string `json:"trailing_diamond,omitempty"` Template string `json:"template,omitempty"` Templates template.List `json:"templates,omitempty"` TemplatesLogic template.Logic `json:"templates_logic,omitempty"` Properties properties.Map `json:"properties,omitempty"` Interactive bool `json:"interactive,omitempty"` Alias string `json:"alias,omitempty"` writer SegmentWriter Enabled bool `json:"-"` text string env platform.Environment backgroundCache string foregroundCache string } // SegmentTiming holds the timing context for a segment type SegmentTiming struct { name string nameLength int active bool text string duration time.Duration } // SegmentWriter is the interface used to define what and if to write to the prompt type SegmentWriter interface { Enabled() bool Template() string Init(props properties.Properties, env platform.Environment) } // SegmentStyle the style of segment, for more information, see the constants type SegmentStyle string // SegmentType the type of segment, for more information, see the constants type SegmentType string const ( // Plain writes it without ornaments Plain SegmentStyle = "plain" // Powerline writes it Powerline style Powerline SegmentStyle = "powerline" // Accordion writes it Powerline style but collapses the segment when disabled instead of hiding Accordion SegmentStyle = "accordion" // Diamond writes the prompt shaped with a leading and trailing symbol Diamond SegmentStyle = "diamond" // ANGULAR writes which angular cli version us currently active ANGULAR SegmentType = "angular" // AWS writes the active aws context AWS SegmentType = "aws" // AZ writes the Azure subscription info we're currently in AZ SegmentType = "az" // AZFUNC writes current AZ func version AZFUNC SegmentType = "azfunc" // BATTERY writes the battery percentage BATTERY SegmentType = "battery" // Brewfather segment BREWFATHER SegmentType = "brewfather" // cds (SAP CAP) version CDS SegmentType = "cds" // Cloud Foundry segment CF SegmentType = "cf" // Cloud Foundry logged in target CFTARGET SegmentType = "cftarget" // CMAKE writes the active cmake version CMAKE SegmentType = "cmake" // CMD writes the output of a shell command CMD SegmentType = "command" // CONNECTION writes a connection's information CONNECTION SegmentType = "connection" // CRYSTAL writes the active crystal version CRYSTAL SegmentType = "crystal" // DART writes the active dart version DART SegmentType = "dart" // DENO writes the active deno version DENO SegmentType = "deno" // DOTNET writes which dotnet version is currently active DOTNET SegmentType = "dotnet" // EXECUTIONTIME writes the execution time of the last run command EXECUTIONTIME SegmentType = "executiontime" // EXIT writes the last exit code EXIT SegmentType = "exit" // FLUTTER writes the flutter version FLUTTER SegmentType = "flutter" // FOSSIL writes the fossil status FOSSIL SegmentType = "fossil" // GCP writes the active GCP context GCP SegmentType = "gcp" // GIT represents the git status and information GIT SegmentType = "git" // GOLANG writes which go version is currently active GOLANG SegmentType = "go" // HASKELL segment HASKELL SegmentType = "haskell" // IPIFY segment IPIFY SegmentType = "ipify" // ITERM inserts the Shell Integration prompt mark on iTerm zsh/bash/fish ITERM SegmentType = "iterm" // JAVA writes the active java version JAVA SegmentType = "java" // JULIA writes which julia version is currently active JULIA SegmentType = "julia" // KOTLIN writes the active kotlin version KOTLIN SegmentType = "kotlin" // KUBECTL writes the Kubernetes context we're currently in KUBECTL SegmentType = "kubectl" // LUA writes the active lua version LUA SegmentType = "lua" // NBGV writes the nbgv version information NBGV SegmentType = "nbgv" // NIGHTSCOUT is an open source diabetes system NIGHTSCOUT SegmentType = "nightscout" // NODE writes which node version is currently active NODE SegmentType = "node" // npm version NPM SegmentType = "npm" // NX writes which Nx version us currently active NX SegmentType = "nx" // OS write os specific icon OS SegmentType = "os" // OWM writes the weather coming from openweatherdata OWM SegmentType = "owm" // PATH represents the current path segment PATH SegmentType = "path" // PERL writes which perl version is currently active PERL SegmentType = "perl" // PHP writes which php version is currently active PHP SegmentType = "php" // PLASTIC represents the plastic scm status and information PLASTIC SegmentType = "plastic" // Project version PROJECT SegmentType = "project" // PYTHON writes the virtual env name PYTHON SegmentType = "python" // R version R SegmentType = "r" // ROOT writes root symbol ROOT SegmentType = "root" // RUBY writes which ruby version is currently active RUBY SegmentType = "ruby" // RUST writes the cargo version information if cargo.toml is present RUST SegmentType = "rust" // SESSION represents the user info segment SESSION SegmentType = "session" // SHELL writes which shell we're currently in SHELL SegmentType = "shell" // SPOTIFY writes the SPOTIFY status for Mac SPOTIFY SegmentType = "spotify" // STRAVA is a sports activity tracker STRAVA SegmentType = "strava" // Subversion segment SVN SegmentType = "svn" // SWIFT writes the active swift version SWIFT SegmentType = "swift" // SYSTEMINFO writes system information (memory, cpu, load) SYSTEMINFO SegmentType = "sysinfo" // TERRAFORM writes the terraform workspace we're currently in TERRAFORM SegmentType = "terraform" // TEXT writes a text TEXT SegmentType = "text" // TIME writes the current timestamp TIME SegmentType = "time" // UI5 Tooling segment UI5TOOLING SegmentType = "ui5tooling" // WAKATIME writes tracked time spend in dev editors WAKATIME SegmentType = "wakatime" // WINREG queries the Windows registry. WINREG SegmentType = "winreg" // WITHINGS queries the Withings API. WITHINGS SegmentType = "withings" // XMAKE write the xmake version if xmake.lua is present XMAKE SegmentType = "xmake" // YTM writes YouTube Music information and status YTM SegmentType = "ytm" ) func (segment *Segment) shouldIncludeFolder() bool { if segment.env == nil { return true } cwdIncluded := segment.cwdIncluded() cwdExcluded := segment.cwdExcluded() return cwdIncluded && !cwdExcluded } func (segment *Segment) isPowerline() bool { return segment.Style == Powerline || segment.Style == Accordion } func (segment *Segment) cwdIncluded() bool { value, ok := segment.Properties[properties.IncludeFolders] if !ok { // IncludeFolders isn't specified, everything is included return true } list := properties.ParseStringArray(value) if len(list) == 0 { // IncludeFolders is an empty array, everything is included return true } return segment.env.DirMatchesOneOf(segment.env.Pwd(), list) } func (segment *Segment) cwdExcluded() bool { value, ok := segment.Properties[properties.ExcludeFolders] if !ok { value = segment.Properties[properties.IgnoreFolders] } list := properties.ParseStringArray(value) return segment.env.DirMatchesOneOf(segment.env.Pwd(), list) } func (segment *Segment) shouldInvokeWithTip(tip string) bool { for _, t := range segment.Tips { if t == tip { return true } } return false } func (segment *Segment) foreground() string { if len(segment.foregroundCache) == 0 { segment.foregroundCache = segment.ForegroundTemplates.FirstMatch(segment.writer, segment.env, segment.Foreground) } return segment.foregroundCache } func (segment *Segment) background() string { if len(segment.backgroundCache) == 0 { segment.backgroundCache = segment.BackgroundTemplates.FirstMatch(segment.writer, segment.env, segment.Background) } return segment.backgroundCache } func (segment *Segment) mapSegmentWithWriter(env platform.Environment) error { segment.env = env functions := map[SegmentType]SegmentWriter{ ANGULAR: &segments.Angular{}, AWS: &segments.Aws{}, AZ: &segments.Az{}, AZFUNC: &segments.AzFunc{}, BATTERY: &segments.Battery{}, BREWFATHER: &segments.Brewfather{}, CDS: &segments.Cds{}, CF: &segments.Cf{}, CFTARGET: &segments.CfTarget{}, CMD: &segments.Cmd{}, CONNECTION: &segments.Connection{}, CRYSTAL: &segments.Crystal{}, CMAKE: &segments.Cmake{}, DART: &segments.Dart{}, DENO: &segments.Deno{}, DOTNET: &segments.Dotnet{}, EXECUTIONTIME: &segments.Executiontime{}, EXIT: &segments.Exit{}, FLUTTER: &segments.Flutter{}, FOSSIL: &segments.Fossil{}, GCP: &segments.Gcp{}, GIT: &segments.Git{}, GOLANG: &segments.Golang{}, HASKELL: &segments.Haskell{}, IPIFY: &segments.IPify{}, ITERM: &segments.ITerm{}, JAVA: &segments.Java{}, JULIA: &segments.Julia{}, KOTLIN: &segments.Kotlin{}, KUBECTL: &segments.Kubectl{}, LUA: &segments.Lua{}, NBGV: &segments.Nbgv{}, NIGHTSCOUT: &segments.Nightscout{}, NODE: &segments.Node{}, NPM: &segments.Npm{}, NX: &segments.Nx{}, OS: &segments.Os{}, OWM: &segments.Owm{}, PATH: &segments.Path{}, PERL: &segments.Perl{}, PHP: &segments.Php{}, PLASTIC: &segments.Plastic{}, PROJECT: &segments.Project{}, PYTHON: &segments.Python{}, R: &segments.R{}, ROOT: &segments.Root{}, RUBY: &segments.Ruby{}, RUST: &segments.Rust{}, SESSION: &segments.Session{}, SHELL: &segments.Shell{}, SPOTIFY: &segments.Spotify{}, STRAVA: &segments.Strava{}, SVN: &segments.Svn{}, SWIFT: &segments.Swift{}, SYSTEMINFO: &segments.SystemInfo{}, TERRAFORM: &segments.Terraform{}, TEXT: &segments.Text{}, TIME: &segments.Time{}, UI5TOOLING: &segments.UI5Tooling{}, WAKATIME: &segments.Wakatime{}, WINREG: &segments.WindowsRegistry{}, WITHINGS: &segments.Withings{}, XMAKE: &segments.XMake{}, YTM: &segments.Ytm{}, } if segment.Properties == nil { segment.Properties = make(properties.Map) } if writer, ok := functions[segment.Type]; ok { writer.Init(segment.Properties, env) segment.writer = writer return nil } return errors.New("unable to map writer") } func (segment *Segment) string() string { var templatesResult string if !segment.Templates.Empty() { templatesResult = segment.Templates.Resolve(segment.writer, segment.env, "", segment.TemplatesLogic) if len(segment.Template) == 0 { return templatesResult } } if len(segment.Template) == 0 { segment.Template = segment.writer.Template() } tmpl := &template.Text{ Template: segment.Template, Context: segment.writer, Env: segment.env, TemplatesResult: templatesResult, } text, err := tmpl.Render() if err != nil { return err.Error() } return text } func (segment *Segment) SetEnabled(env platform.Environment) { defer func() { err := recover() if err == nil { return } // display a message explaining omp failed(with the err) message := fmt.Sprintf("\noh-my-posh fatal error rendering %s segment:%s\n\n%s\n", segment.Type, err, debug.Stack()) fmt.Println(message) segment.Enabled = true }() err := segment.mapSegmentWithWriter(env) if err != nil || !segment.shouldIncludeFolder() { return } if segment.writer.Enabled() { segment.Enabled = true name := segment.Alias if len(name) == 0 { name = string(segment.Type) } env.TemplateCache().AddSegmentData(name, segment.writer) } } func (segment *Segment) SetText() { if !segment.Enabled { return } segment.text = segment.string() segment.Enabled = len(strings.ReplaceAll(segment.text, " ", "")) > 0 if segment.Interactive { return } // we have to do this to prevent bash/zsh from misidentifying escape sequences switch segment.env.Shell() { case shell.BASH: segment.text = strings.NewReplacer("`", "\\`", `\`, `\\`).Replace(segment.text) case shell.ZSH: segment.text = strings.NewReplacer("`", "\\`", `%`, `%%`).Replace(segment.text) } }
package main import "fmt" func main() { fmt.Println("Muhammad Hajid Al Akhtar") }
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package models import ( "fmt" "go.mongodb.org/mongo-driver/bson/primitive" ) type Proxy struct { ID primitive.ObjectID `bson:"_id,omitempty" json:"id,omitempty"` // http或socks5 暂时只支持http代理 Type string `bson:"type" json:"type"` Address string `bson:"address" json:"address"` Port int `bson:"port" json:"port"` NeedPassword bool `bson:"need_password" json:"need_password"` Username string `bson:"username" json:"username"` Password string `bson:"password" json:"password"` // 代理用途,app表示应用代理,repo表示代码库代理。保留字段,暂时默认设置为default,以后可能用到。 Usage string `bson:"usage" json:"usage"` EnableRepoProxy bool `bson:"enable_repo_proxy" json:"enable_repo_proxy"` EnableApplicationProxy bool `bson:"enable_application_proxy" json:"enable_application_proxy"` CreateTime int64 `bson:"create_time" json:"create_time"` UpdateTime int64 `bson:"update_time" json:"update_time"` UpdateBy string `bson:"update_by" json:"update_by"` } func (Proxy) TableName() string { return "proxy" } func (p *Proxy) GetProxyUrl() string { var uri string if p.NeedPassword { uri = fmt.Sprintf("%s://%s:%s@%s:%d", p.Type, p.Username, p.Password, p.Address, p.Port, ) } else { uri = fmt.Sprintf("%s://%s:%d", p.Type, p.Address, p.Port, ) } return uri }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package apps import ( "context" "path/filepath" "strconv" "time" "chromiumos/tast/ctxutil" "chromiumos/tast/errors" "chromiumos/tast/local/apps" "chromiumos/tast/local/bundles/cros/apps/fixture" "chromiumos/tast/local/bundles/cros/apps/helpapp" "chromiumos/tast/local/bundles/cros/apps/pre" "chromiumos/tast/local/chrome/ash" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/faillog" "chromiumos/tast/local/input" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: LaunchHelpAppFromShortcut, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Help app can be launched using shortcut Ctrl+Shift+/", Contacts: []string{ "showoff-eng@google.com", }, SoftwareDeps: []string{"chrome", "chrome_internal"}, Params: []testing.Param{ { Name: "stable", Fixture: fixture.LoggedIn, ExtraHardwareDeps: hwdep.D(pre.AppsStableModels), ExtraAttr: []string{"group:mainline"}, }, { Name: "unstable", Fixture: fixture.LoggedIn, // b:238260020 - disable aged (>1y) unpromoted informational tests // ExtraAttr: []string{"group:mainline", "informational"}, ExtraHardwareDeps: hwdep.D(pre.AppsUnstableModels), }, { Name: "stable_guest", Fixture: fixture.LoggedInGuest, ExtraHardwareDeps: hwdep.D(pre.AppsStableModels), ExtraAttr: []string{"group:mainline"}, }, { Name: "unstable_guest", Fixture: fixture.LoggedInGuest, // b:238260020 - disable aged (>1y) unpromoted informational tests // ExtraAttr: []string{"group:mainline", "informational"}, ExtraHardwareDeps: hwdep.D(pre.AppsUnstableModels), }, }, }) } // LaunchHelpAppFromShortcut verifies launching Help app from Ctrl+Shift+/. func LaunchHelpAppFromShortcut(ctx context.Context, s *testing.State) { cr := s.FixtValue().(fixture.FixtData).Chrome tconn := s.FixtValue().(fixture.FixtData).TestAPIConn cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second) defer cancel() defer faillog.DumpUITreeOnError(cleanupCtx, s.OutDir(), s.HasError, tconn) kw, err := input.Keyboard(ctx) if err != nil { s.Fatal("Failed to get keyboard handle: ", err) } defer kw.Close() // On some low-end devices and guest mode sometimes Chrome is still // initializing when the shortcut keys are emitted. Check that the // app is showing up as installed before emitting the shortcut keys. if err := ash.WaitForChromeAppInstalled(ctx, tconn, apps.Help.ID, 30*time.Second); err != nil { s.Fatal("Failed to wait for Explore to be installed: ", err) } helpCtx := helpapp.NewContext(cr, tconn) shortcuts := []string{"Ctrl+Shift+/", "Ctrl+/"} for index, shortcut := range shortcuts { // Using 'shortcut_{index} as test name. testName := "shortcut_" + strconv.Itoa(index) s.Run(ctx, testName, func(ctx context.Context, s *testing.State) { defer func() { outDir := filepath.Join(s.OutDir(), testName) faillog.DumpUITreeWithScreenshotOnError(ctx, outDir, s.HasError, cr, "ui_tree_"+testName) if err := helpCtx.Close()(ctx); err != nil { s.Log("Failed to close the app, may not have been opened: ", err) } }() ui := uiauto.New(tconn).WithTimeout(time.Minute) if err := ui.Retry(5, func(ctx context.Context) error { if err := kw.Accel(ctx, shortcut); err != nil { return errors.Wrapf(err, "failed to press %q keys", shortcut) } return helpapp.NewContext(cr, tconn).WaitForApp()(ctx) })(ctx); err != nil { s.Fatalf("Failed to launch or render Help app by shortcut %q: %v", shortcut, err) } }) } }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package internal const ( // CrasStopTimeFile is the file stores previous CRAS stop time. CrasStopTimeFile = "/var/lib/cras/stop" )
package solutions func trap(h []int) int { if len(h) < 3 { return 0 } var result, left, right, maxLeft, maxRight int right = len(h) - 1 for left < right { if h[left] > maxLeft { maxLeft = h[left] } if h[right] > maxRight { maxRight = h[right] } if maxLeft < maxRight { result += maxLeft - h[left] left++ } else { result += maxRight - h[right] right-- } } return result }
// Copyright 2020 Ye Zi Jie. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: FishGoddess // Email: fishgoddess@qq.com // Created at 2020/03/03 14:58:21 package files import ( "errors" "os" "sync" "time" ) // DurationRollingFile is a time sensitive file. // // file := NewDurationRollingFile(time.Second, func(now time.Time) string { // return "D:/" + now.Format(formatOfTime) + ".txt" // }) // defer file.Close() // file.Write([]byte("Hello!")) // // You can use it like using os.File! type DurationRollingFile struct { // file points the writer which will be used this moment. file *os.File // directory is the target storing all created files. directory string // lastTime is the created time of current file above. lastTime time.Time // duration is the core field of this struct. // Every times currentTime - lastTime >= duration, the file will // roll to an entire new file for writing. This field should be always // larger than minDuration for some safe considerations. See minDuration. duration time.Duration // nameGenerator is for generating the name of every created file. // You can customize your format of filename by implementing this function. // Default is DefaultNameGenerator(). nameGenerator NameGenerator // mu is a lock for safe concurrency. mu *sync.Mutex } const ( // minDuration prevents io system from creating file too fast. // Default is one second. minDuration = 1 * time.Second ) // NewDurationRollingFile creates a new duration rolling file. // duration is how long did it roll to next file. // nextFilename is a function for generating next file name. // Every times rolling to next file will call nextFilename first. // now is the created time of next file. Notice that duration's min value // is one second. See minDuration. func NewDurationRollingFile(directory string, duration time.Duration) *DurationRollingFile { // 防止时间间隔太小导致滚动文件时 IO 的疯狂蠕动 if duration < minDuration { panic(errors.New("Duration is smaller than " + minDuration.String() + "\n")) } return &DurationRollingFile{ directory: directory, duration: duration, nameGenerator: DefaultNameGenerator(), mu: &sync.Mutex{}, } } // rollingToNextFile will roll to next file for drf. func (drf *DurationRollingFile) rollingToNextFile(now time.Time) { // 如果创建新文件发生错误,就继续使用当前的文件,等到下一次时间间隔再重试 newFile, err := CreateFileOf(drf.nameGenerator.NextName(drf.directory, now)) if err != nil { return } // 关闭当前使用的文件,初始化新文件 drf.file.Close() drf.file = newFile drf.lastTime = now } // ensureFileIsCorrect ensures drf is writing to a correct file this moment. func (drf *DurationRollingFile) ensureFileIsCorrect() { now := time.Now() if drf.file == nil || now.Sub(drf.lastTime) >= drf.duration { drf.rollingToNextFile(now) } } // Write writes len(p) bytes from p to the underlying data stream. // It returns the number of bytes written from p (0 <= n <= len(p)) // and any error encountered that caused the write to stop early. func (drf *DurationRollingFile) Write(p []byte) (n int, err error) { drf.mu.Lock() defer drf.mu.Unlock() // 确保当前文件对于当前时间点来说是正确的 drf.ensureFileIsCorrect() return drf.file.Write(p) } // Close releases any resources using just moment. // It returns error when closing. func (drf *DurationRollingFile) Close() error { drf.mu.Lock() defer drf.mu.Unlock() return drf.file.Close() } // SetNameGenerator replaces drf.nameGenerator to newNameGenerator. func (drf *DurationRollingFile) SetNameGenerator(newNameGenerator NameGenerator) { drf.mu.Lock() defer drf.mu.Unlock() drf.nameGenerator = newNameGenerator }
// Copyright 2019 - 2022 The Samply Community // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fhir import ( "encoding/json" "fmt" "strings" ) // THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models // PLEASE DO NOT EDIT BY HAND // GroupMeasure is documented here http://hl7.org/fhir/ValueSet/group-measure type GroupMeasure int const ( GroupMeasureMean GroupMeasure = iota GroupMeasureMedian GroupMeasureMeanOfMean GroupMeasureMeanOfMedian GroupMeasureMedianOfMean GroupMeasureMedianOfMedian ) func (code GroupMeasure) MarshalJSON() ([]byte, error) { return json.Marshal(code.Code()) } func (code *GroupMeasure) UnmarshalJSON(json []byte) error { s := strings.Trim(string(json), "\"") switch s { case "mean": *code = GroupMeasureMean case "median": *code = GroupMeasureMedian case "mean-of-mean": *code = GroupMeasureMeanOfMean case "mean-of-median": *code = GroupMeasureMeanOfMedian case "median-of-mean": *code = GroupMeasureMedianOfMean case "median-of-median": *code = GroupMeasureMedianOfMedian default: return fmt.Errorf("unknown GroupMeasure code `%s`", s) } return nil } func (code GroupMeasure) String() string { return code.Code() } func (code GroupMeasure) Code() string { switch code { case GroupMeasureMean: return "mean" case GroupMeasureMedian: return "median" case GroupMeasureMeanOfMean: return "mean-of-mean" case GroupMeasureMeanOfMedian: return "mean-of-median" case GroupMeasureMedianOfMean: return "median-of-mean" case GroupMeasureMedianOfMedian: return "median-of-median" } return "<unknown>" } func (code GroupMeasure) Display() string { switch code { case GroupMeasureMean: return "Mean" case GroupMeasureMedian: return "Median" case GroupMeasureMeanOfMean: return "Mean of Study Means" case GroupMeasureMeanOfMedian: return "Mean of Study Medins" case GroupMeasureMedianOfMean: return "Median of Study Means" case GroupMeasureMedianOfMedian: return "Median of Study Medians" } return "<unknown>" } func (code GroupMeasure) Definition() string { switch code { case GroupMeasureMean: return "Aggregated using Mean of participant values." case GroupMeasureMedian: return "Aggregated using Median of participant values." case GroupMeasureMeanOfMean: return "Aggregated using Mean of study mean values." case GroupMeasureMeanOfMedian: return "Aggregated using Mean of study median values." case GroupMeasureMedianOfMean: return "Aggregated using Median of study mean values." case GroupMeasureMedianOfMedian: return "Aggregated using Median of study median values." } return "<unknown>" }
package main import "fmt" func main() { //Provando que os slice fazem referencia ao mesmo trecho de memoria s1 := make([]int, 10, 20) s2 := append(s1, 1, 2, 3) fmt.Println(s1, s2) // Atribuindo valor novo apenas ao slice 1 altera o mesmo indice e valor do slice 2 s1[0] = 7 fmt.Println(s1, s2) }
package kuma import ( "context" "github.com/layer5io/meshery-adapter-library/adapter" "github.com/layer5io/meshery-adapter-library/status" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func (kuma *Kuma) installSampleApp(del bool, namespace string, templates []adapter.Template) (string, error) { st := status.Installing if del { st = status.Removing } for _, template := range templates { err := kuma.applyManifest(del, namespace, []byte(template.String())) if err != nil { return st, ErrSampleApp(err, st) } } return status.Installed, nil } func (kuma *Kuma) sidecarInjection(namespace string, del bool) error { kclient := kuma.KubeClient if kclient == nil { return ErrNilClient } // updating the label on the namespace ns, err := kclient.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) if err != nil { return err } // updating the annotations on the namespace if ns.ObjectMeta.Annotations == nil { ns.ObjectMeta.Annotations = map[string]string{} } ns.ObjectMeta.Annotations["kuma.io/sidecar-injection"] = "enabled" if del { delete(ns.ObjectMeta.Annotations, "kuma.io/sidecar-injection") } _, err = kclient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) if err != nil { return err } return nil }
package main import ( "flag" "fmt" "github.com/coopernurse/retina/ws" "log" "os" "os/signal" "strconv" "strings" "syscall" "time" ) func run(url string, workers int, done chan bool, msgs chan string) { handler := func(headers map[string][]string, body []byte) (map[string][]string, []byte) { msgs <- string(body) queue, ok := headers["X-Hub-Queue"] if !ok || len(queue) < 1 { log.Println("backend: message missing X-Hub-Queue header") return map[string][]string{"X-Hub-Status": []string{"500"}}, []byte("Missing X-Hub-Queue header") } else { switch queue[0] { case "echo": return nil, body case "add": parts := strings.Split(string(body), ",") sum := 0 for _, part := range parts { x, _ := strconv.Atoi(part) sum += x } return nil, []byte(strconv.Itoa(sum)) case "sleep": parts := strings.Split(string(body), ",") if len(parts) == 2 { sleepMillis, _ := strconv.Atoi(parts[0]) time.Sleep(time.Duration(sleepMillis) * time.Millisecond) } return nil, body default: return map[string][]string{"X-Hub-Status": []string{"500"}}, []byte("Unknown queue: " + queue[0]) } } } url = url + "echo,add,sleep" retinaws.BackendServer(url, workers, handler, done) } func initSignalHandlers(done chan bool) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGKILL) go func() { for sig := range c { log.Printf("backend: sending shutdown message - got signal: %v\n", sig) done <- true } }() } func main() { var wsUrl string var logFname string var msgFname string var workers int flag.StringVar(&wsUrl, "u", "ws://localhost:9391/", "Retina websocket endpoint URL") flag.StringVar(&logFname, "l", "", "Path to log file to write to") flag.StringVar(&msgFname, "m", "", "Path to msg file to write to") flag.IntVar(&workers, "w", 10, "Number of workers") flag.Parse() if msgFname == "" { log.Fatalln("-m flag not provided") } if logFname != "" { logFile, err := os.Create(logFname) if err != nil { log.Fatalln("Cannot write to log file:", logFile, err) } defer logFile.Close() log.SetOutput(logFile) } msgFile, err := os.Create(msgFname) if err != nil { log.Fatalln("Cannot write to msg file:", msgFname, err) } defer msgFile.Close() done := make(chan bool) initSignalHandlers(done) msgs := make(chan string, workers) go func() { for { msg, ok := <-msgs if !ok { return } fmt.Fprintln(msgFile, msg) } }() log.Println("backend: starting") run(wsUrl, workers, done, msgs) close(msgs) msgFile.Sync() log.Println("backend: exiting") }
package BLC import ( "bytes" "crypto/sha256" "fmt" "math/big" ) type ProofOfWork struct { Block *Block // The block which will be calculate target *big.Int // a block hash should satisfy hash < target } func PoWFactory(block *Block) *ProofOfWork { target := big.NewInt(1) target = target.Lsh(target, 256-targetBit) return &ProofOfWork{block, target} } func (pow *ProofOfWork) Run() ([]byte, int64) { nonce := 0 var hashInt big.Int var hash [32]byte dataBytes := pow.prepareData() for { dataBytes := bytes.Join( [][]byte{ //[]byte的切片 dataBytes, IntToHex(int64(nonce)), }, []byte{}, ) hash = sha256.Sum256(dataBytes) hashInt.SetBytes(hash[:]) if pow.target.Cmp(&hashInt) == 1 { fmt.Printf("\nhash: %x\n", hash) //hash: 00ea9e3743900b6086acbb86390457f72fb3a4908609bd900536064f8e89448d break } nonce = nonce + 1 } return hash[:], int64(nonce) } func (pow *ProofOfWork) prepareData() []byte { data := bytes.Join([][]byte{ pow.Block.PrevBlockHash, pow.Block.HashTransactions(), IntToHex(pow.Block.Timestamp), IntToHex(int64(targetBit)), IntToHex(int64(pow.Block.Height)), }, []byte{}, ) return data } func (pow *ProofOfWork) IsValid() bool { var hashInt big.Int hashInt.SetBytes(pow.Block.BlockHash) if pow.target.Cmp(&hashInt) == 1 { return true } return false }
package views import ( "PixelToolWindow/models" "github.com/asaskevich/EventBus" "github.com/therecipe/qt/widgets" ) /* --- Bus signal information --- Tag :sideWin:settingInfo */ /* SideWindow :Side window */ type SideWindow struct { // --- Enviroment Setting group --- gammaAdjuster *SliderInput // slider object for gamma correction lightSourceSelector *ComboBoxSelector // light source selector patchSizeInput *PixelSizeInputField // patch size setting // --- file save group --- stdPatchSave *SavePathField // standard Macbeth Patch save point devPatchSave *SavePathField // Simulated Macbeth Pathc save point deltaESave *SavePathField // deltaE save pint // --- input file group --- deviceQEData *InputField // device QE file input whitePixelData *InputField // white pixel file input linearMatData *InputField // linear matrix elem file input // Apply button applybutton *widgets.QPushButton // apply button //defaultButton *widgets.QPushButton // defalut setting loading button // Cell Cell *widgets.QWidget } /* NewSideWindow :initializer of SideWindow */ func NewSideWindow(bus EventBus.Bus) *SideWindow { obj := new(SideWindow) // initialize widgets obj.Cell = widgets.NewQWidget(nil, 0) // initalize button obj.applybutton = widgets.NewQPushButton2("Apply", obj.Cell) //obj.defaultButton = widgets.NewQPushButton2("Default Setting", obj.Cell) // initialize each gourp envGroup := obj.setupEnvGroup() fileSaveGroup := obj.setFileSaveGroup() inputFileGroup := obj.setInputFileGroup() // layout layout := widgets.NewQVBoxLayout() layout.SetContentsMargins(8, 8, 8, 8) layout.AddWidget(envGroup, 0, 0) layout.AddWidget(fileSaveGroup, 0, 0) layout.AddWidget(inputFileGroup, 0, 0) layout.AddWidget(obj.applybutton, 0, 0) //layout.AddWidget(obj.defaultButton, 0, 0) // apply layout obj.Cell.SetLayout(layout) // action connection obj.applybutton.ConnectClicked(func(checked bool) { info := new(models.SettingInfo) // info.Gamma = obj.gammaAdjuster.Value info.LightSource = obj.lightSourceSelector.SelectedItem // patch size info.PatchSize.H = obj.patchSizeInput.HorizontalSize info.PatchSize.V = obj.patchSizeInput.VerticalSize // field info.StdPatchSavePath = obj.stdPatchSave.textLabelForPath.Text() info.DevPatchSavePath = obj.devPatchSave.textLabelForPath.Text() info.DeltaESavePath = obj.deltaESave.textLabelForPath.Text() info.DeiceQEDataPath = obj.deviceQEData.textField.Text() info.WhitePixelDataPath = obj.whitePixelData.textField.Text() info.LinearMatrixDataPath = obj.linearMatData.textField.Text() // validation validationStatus := true if !(obj.validation(info.StdPatchSavePath) && obj.validation(info.DevPatchSavePath) && obj.validation(info.DeltaESavePath)) { errorMessage := "We found some empty fields in Save Path category" bus.Publish("main:message", errorMessage) validationStatus = false } if !obj.validation(info.DeiceQEDataPath) { errorMessage := "Device QE data is missing" bus.Publish("main:message", errorMessage) validationStatus = false } if !obj.validation(info.WhitePixelDataPath) { errorMessage := "White Pixel data is missing" bus.Publish("main:message", errorMessage) validationStatus = false } if !obj.validation(info.LinearMatrixDataPath) { errorMessage := "Linear matrix elements data is missing" bus.Publish("main:message", errorMessage) validationStatus = false } if validationStatus { bus.Publish("sideWin:settingInfo", info) } }) return obj } // Enviroment setting group func (sw *SideWindow) setupEnvGroup() *widgets.QGroupBox { sw.gammaAdjuster = NewSliderInput("Gamma", 0.24) sw.lightSourceSelector = NewComboBoxSelector("Light Source", []string{"D65", "D50", "Ill-A"}) sw.patchSizeInput = NewPixelSizeInputField("Patch Size", 100, 100) layout := widgets.NewQVBoxLayout() layout.AddWidget(sw.gammaAdjuster.Cell, 0, 0) layout.AddWidget(sw.lightSourceSelector.Cell, 0, 0) layout.AddWidget(sw.patchSizeInput.Cell, 0, 0) group := widgets.NewQGroupBox2("Simulation Enviroment Setting", nil) group.SetLayout(layout) return group } // file save group func (sw *SideWindow) setFileSaveGroup() *widgets.QGroupBox { sw.stdPatchSave = NewSavePathField("Std Patch Save") sw.devPatchSave = NewSavePathField("Dev Patch Save") sw.deltaESave = NewSavePathField("DeltaE Data Save") layout := widgets.NewQVBoxLayout() layout.AddWidget(sw.stdPatchSave.Cell, 0, 0) layout.AddWidget(sw.devPatchSave.Cell, 0, 0) layout.AddWidget(sw.deltaESave.Cell, 0, 0) group := widgets.NewQGroupBox2("File Save Setting", nil) group.SetLayout(layout) return group } // input file group func (sw *SideWindow) setInputFileGroup() *widgets.QGroupBox { sw.deviceQEData = NewInputField("Device QE", "Device QE raw data") sw.whitePixelData = NewInputField("White Pixel", "White pixel raw data") sw.linearMatData = NewInputField("Linear Matrix", "Linear Matrix element data") layout := widgets.NewQVBoxLayout() layout.AddWidget(sw.deviceQEData.Cell, 0, 0) layout.AddWidget(sw.whitePixelData.Cell, 0, 0) layout.AddWidget(sw.linearMatData.Cell, 0, 0) group := widgets.NewQGroupBox2("Input File information", nil) group.SetLayout(layout) return group } // func validation func (sw *SideWindow) validation(str string) bool { if str == "" { return false } return true }
package chat import ( "github.com/gorilla/websocket" uuid "github.com/satori/go.uuid" "github.com/sirupsen/logrus" ) type Community struct { hubs map[*Hub]bool handler MessageServiceInterface logger *logrus.Logger } type Hub struct { id uuid.UUID clients map[*Client]bool register chan *Client unregister chan *Client broadcast chan []byte handler MessageServiceInterface logger *logrus.Logger } type Client struct { hub *Hub ws *websocket.Conn broadcast chan []byte logger *logrus.Logger } type Message struct { ID string `json:"id"` Author string `json:"author"` Content string `json:"content"` CreatedAt int64 `json:"created_at"` } type MessageServiceInterface interface { Process(uuid.UUID, []byte) []byte GetMessageCollection(uuid.UUID) [][]byte } type MessageRepositoryInterface interface { AddMessage(string, *Message) error GetMessageCollection(string) ([]Message, error) }
package future // A thennable function type Promise interface { OnSuccess() OnFailure() }
// Package contradb contains functions for interacting with ContraDB. package contradb import ( "errors" "fmt" "net" "net/url" "reflect" "runtime/debug" "time" "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/stdlib" "github.com/jmoiron/sqlx" "go.uber.org/atomic" "github.com/ajruckman/ContraCore/internal/system" ) var ( xdb *sqlx.DB // The sqlx connection to ContraDB. pdb *pgx.Conn // The PGX connection to ContraDB. failedOnce atomic.Bool // Whether the last connection failed. ) // Package setup function. func Setup() { dbURL, err := url.Parse(system.ContraDBURL) if err != nil { system.Console.Error("invalid ContraCore database URL") panic(err) } system.Console.Info("ContraDB address: ", dbURL.Host) connect() ping() go monitor() } // Attempts to connect to ContraDB. func connect() { var err error xdb, err = sqlx.Connect("pgx", system.ContraDBURL) if err != nil { if !failedOnce.Load() { system.Console.Error("failed to connect to PostgreSQL database server with error:") system.Console.Error(errors.Unwrap(err)) // Don't print username + password system.ContraDBOnline.Store(false) failedOnce.Store(true) } } else { pdb, err = stdlib.AcquireConn(xdb.DB) if err != nil { system.Console.Error("failed to acquire PGX connection with error:") system.Console.Error(err) system.ContraDBOnline.Store(false) } else { system.Console.Info("connected to ContraDB") system.ContraDBOnline.Store(true) failedOnce.Store(false) if !configLoaded.Load() { ReadConfig() } } } } func monitor() { for range time.Tick(time.Second * 10) { ping() } } type ErrContraDBOffline struct { } func (e *ErrContraDBOffline) Error() string { return "ContraDB is disconnected" } func checkOfflineError(err error) bool { if err == nil { return false } _, isOpErr := errors.Unwrap(err).(*net.OpError) return isOpErr || reflect.TypeOf(err).String() == "*pgconn.connLockError" } func errOfflineOrOriginal(err error) error { if checkOfflineError(err) { offline(err) return &ErrContraDBOffline{} } else { return err } } // Pings ContraDB to trigger online/offline code. func ping() { //var err error if xdb == nil || pdb == nil { connect() return } //err = pdb.Ping(context.Background()) //if err != nil { // fmt.Println(err) // offline(err) //} else { // online() //} } func offline(err error) { if system.ContraDBOnline.Load() { if !checkOfflineError(err) { system.Console.Error("failed to ping ContraDB with unanticipated error:") system.Console.Error(err.Error()) } else { fmt.Println(string(debug.Stack())) system.Console.Error("failed to ping ContraDB because it is offline") } system.ContraDBOnline.Store(false) } } func online() { if !system.ContraDBOnline.Load() { system.Console.Info("PostgreSQL health check succeeded") system.ContraDBOnline.Store(true) } }
package main import ( "fmt" "github.com/gin-gonic/gin" "github.com/goweb/config" "os" "log" "io" "github.com/goweb/router" _ "github.com/goweb/db" "github.com/goweb/middleware" "github.com/swaggo/gin-swagger" "github.com/swaggo/gin-swagger/swaggerFiles" _ "github.com/goweb/docs" ) func main() { fmt.Printf("gin version %s:", gin.Version) if config.ServerConfig.Env != "dev" { gin.SetMode(gin.ReleaseMode) gin.DisableConsoleColor() logFile, err := os.OpenFile(config.ServerConfig.LogFile, os.O_WRONLY | os.O_APPEND | os.O_CREATE, 0666) if err != nil { log.Fatal("open log file failed") os.Exit(-1) } gin.DefaultWriter = io.MultiWriter(logFile) } app := gin.New() app.GET("/swagger/*any",ginSwagger.WrapHandler(swaggerFiles.Handler)) app.Use(gin.Logger()) app.Use(gin.Recovery()) app.Use(middleware.Auth) router.Route(app) app.Run(":8081") }
/* * Copyright 2020 zpxio (Jeff Sharpe) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package generator import "strings" // Token represents a single item which can be placed into the generated output of a Generator. type Token struct { Category string Content string Rarity float64 Properties map[string]string SetVars map[string]string } // Properties defines the structure used to store token properties. type Properties map[string]string // BuildToken builds a new Token based upon the supplied information. No SetVars are defined. If // you want to set State variables on render, use OnRenderSet to define them. func BuildToken(category string, content string, rarity float64, tags Properties) Token { t := Token{ Category: category, Content: content, Rarity: rarity, Properties: tags, SetVars: make(map[string]string), } return t } // OnRenderSet defines a State variable to set when this Token is rendered to Generator output. func (t *Token) OnRenderSet(variable string, value string) { t.SetVars[variable] = value } // Normalize updates the Token to ensure that it matches required behaviors. Categories must not start // or end with whitespace. Rarities must not be zero or negative. If the Rarity is invalid, it is set // to a default of 1.0 func (t *Token) Normalize() { t.Category = strings.TrimSpace(t.Category) if t.Rarity <= 0.0 { t.Rarity = 1.0 } } // IsValid checks to see if this Token is valid and usable for generation. Invalid Tokens may become valid // if Normalize is called on them, but this is not guaranteed. If you are reading or creating Tokens with // non-validated input, you should call Normalize before checking validity. func (t *Token) IsValid() bool { if t.Category == "" { return false } if t.Content == "" { return false } if t.Rarity <= 0.0 { return false } return true }
package fileUtil import ( "bytes" "compress/zlib" "io" ) // 字节压缩(zlib方式压缩) // 参数: // data:待压缩的数组 // level:等级 // 返回值: // 1.压缩后的数据 // 2.错误对象 func Zlib(data []byte, level int) ([]byte, error) { var buffer bytes.Buffer zlibWriter, err := zlib.NewWriterLevelDict(&buffer, level, nil) if err != nil { return nil, err } zlibWriter.Write(data) zlibWriter.Close() return buffer.Bytes(), nil } // 字节解压(zlib方式压缩) // data:待解压的数据 // 返回值: // 1.解压后的数据 // 2.错误对象 func UnZlib(data []byte) ([]byte, error) { dataReader := bytes.NewReader(data) zlibReader, err := zlib.NewReader(dataReader) if err != nil { return nil, err } defer zlibReader.Close() var buffer bytes.Buffer _, err = io.Copy(&buffer, zlibReader) if err != nil { return nil, err } return buffer.Bytes(), nil }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package audio import ( "context" "encoding/json" "strconv" "time" "chromiumos/tast/common/perf" "chromiumos/tast/common/testexec" "chromiumos/tast/testing" ) type schedPolicy int const ( // rrSched uses rr as the scheduler. rrSched schedPolicy = iota // otherSched uses other(normal) as the scheduler. otherSched ) type affinity int const ( // defaultAff will use all the processors in round-robin order. defaultAff affinity = iota // smallCore will run all the threads on a single small core. smallCore // bigCore will run all the threads on a single big core. bigCore ) type schedConfig struct { Policy schedPolicy // the schedule policy. Priority int // Priority of the process. If `Policy` is real time, `Priority` is real time priority. If `Policy` is CFS, `Priority` specify the nice value. } // cyclicTestParameters contains all the data needed to run a single test iteration. type cyclicTestParameters struct { Config schedConfig // The schedule config of the cyclictest. Threads int // Number of threads. Interval time.Duration // Interval time. Loops int // Number of times. Affinity affinity // Run cyclictest threads on which sets of processors. P99Threshold time.Duration // P99 latency threshold. StressConfig *schedConfig // The schedule config of the stress process. if `StressConfig` is nil, no stress process will be run. } const ( // crasPrioriy indicates the rt-priority of cras. crasPriority = 12 // crasClientPriority indicates the rt-priority of cras client. crasClientPriority = 10 // defaultStressPriority indicates the default rt-priority of stress threads. defaultStressPriority = 20 // defaultInterval is the default interval used in cyclictest. defaultInterval = 10000 * time.Microsecond // defaultLoops is the default number of loops tested in cyclictest. defaultLoops = 6000 // defaultP99Threshold is the default p99 latency threshold allowed in cyclictest. defaultP99Threshold = 200 * time.Microsecond // defaultStressWorker is the number of workers spawned in the stress test per cpu thread. defaultStressWorker = 2 ) func init() { testing.AddTest(&testing.Test{ Func: CyclicBench, Desc: "Benchmarks for scheduling latency with cyclictest binary", Contacts: []string{"eddyhsu@chromium.org", "paulhsia@chromium.org", "cychiang@chromium.org"}, Attr: []string{"group:crosbolt", "crosbolt_perbuild"}, SoftwareDeps: []string{"cras"}, Timeout: 3 * time.Minute, Params: []testing.Param{ { Name: "rr12_1thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasPriority, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: defaultP99Threshold, StressConfig: nil, }, }, { Name: "rr10_1thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasClientPriority, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: defaultP99Threshold, StressConfig: nil, }, }, { Name: "rr12_4thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasPriority, }, Threads: 4, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: defaultP99Threshold, StressConfig: nil, }, }, { Name: "rr10_4thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasClientPriority, }, Threads: 4, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: defaultP99Threshold, StressConfig: nil, }, }, { Name: "rr12_1thread_10ms_stress_rr20_2workers_per_cpu", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasPriority, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: defaultP99Threshold, StressConfig: &schedConfig{ Policy: rrSched, Priority: defaultStressPriority, }, }, }, { Name: "rr12_1thread_10ms_stress_nice_p0_2workers_per_cpu", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasPriority, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: defaultP99Threshold, StressConfig: &schedConfig{ Policy: otherSched, Priority: 0, }, }, }, { Name: "nice_p0_1thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: otherSched, Priority: 0, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: 1000 * time.Microsecond, StressConfig: nil, }, }, { Name: "nice_n20_1thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: otherSched, Priority: -20, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: 500 * time.Microsecond, StressConfig: nil, }, }, { Name: "nice_p19_1thread_10ms", Val: cyclicTestParameters{ Config: schedConfig{ Policy: otherSched, Priority: 19, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: 5000 * time.Microsecond, StressConfig: nil, }, }, { Name: "nice_p0_1thread_10ms_stress_nice_p0_2workers_per_cpu", Val: cyclicTestParameters{ Config: schedConfig{ Policy: otherSched, Priority: 0, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: defaultAff, P99Threshold: 30000 * time.Microsecond, StressConfig: &schedConfig{ Policy: otherSched, Priority: 0, }, }, }, { Name: "rr12_1thread_10ms_small_core", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasPriority, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: smallCore, P99Threshold: defaultP99Threshold, StressConfig: nil, }, ExtraSoftwareDeps: []string{"arm"}, // arm has heterogeneous cores. }, { Name: "rr12_1thread_10ms_big_core", Val: cyclicTestParameters{ Config: schedConfig{ Policy: rrSched, Priority: crasPriority, }, Threads: 1, Interval: defaultInterval, Loops: defaultLoops, Affinity: bigCore, P99Threshold: defaultP99Threshold, StressConfig: nil, }, ExtraSoftwareDeps: []string{"arm"}, // arm has heterogeneous cores. }, }, }) } func (s schedPolicy) String() string { return []string{"rr", "other"}[s] } func (a affinity) String() string { return []string{"default", "small_core", "big_core"}[a] } func CyclicBench(ctx context.Context, s *testing.State) { param := s.Param().(cyclicTestParameters) cmdStr := []string{"cyclic_bench.py", "--policy=" + param.Config.Policy.String(), "--priority=" + strconv.Itoa(param.Config.Priority), "--interval=" + strconv.Itoa(int(param.Interval/time.Microsecond)), "--threads=" + strconv.Itoa(param.Threads), "--loops=" + strconv.Itoa(param.Loops), "--affinity=" + param.Affinity.String(), "--json", } if param.StressConfig != nil { cmdStr = append(cmdStr, "--stress_policy="+param.StressConfig.Policy.String(), "--stress_priority="+strconv.Itoa(param.StressConfig.Priority), "--workers_per_cpu="+strconv.Itoa(defaultStressWorker)) } out, err := testexec.CommandContext(ctx, cmdStr[0], cmdStr[1:]...).Output(testexec.DumpLogOnError) if err != nil { s.Fatal("Failed to execute cyclic_bench.py: ", err) } stats := struct { CyclicTestStat []struct { ThreadID float64 `json:"thread_id"` Min float64 `json:"min"` Median float64 `json:"median"` P99 float64 `json:"p99"` Max float64 `json:"max"` } `json:"stats"` }{} err = json.Unmarshal(out, &stats) if err != nil { s.Error("Failed to parse result file: ", err) } p := perf.NewValues() for _, stat := range stats.CyclicTestStat { threadID := int(stat.ThreadID) name := "Thread_" + strconv.Itoa(threadID) minLatency := perf.Metric{ Name: name, Variant: "min_latency", Unit: "us", Direction: perf.SmallerIsBetter} p.Set(minLatency, stat.Min) medianLatency := perf.Metric{ Name: name, Variant: "p50_latency", Unit: "us", Direction: perf.SmallerIsBetter} p.Set(medianLatency, stat.Median) p99Latency := perf.Metric{ Name: name, Variant: "p99_latency", Unit: "us", Direction: perf.SmallerIsBetter} p.Set(p99Latency, stat.P99) maxLatency := perf.Metric{ Name: name, Variant: "max_latency", Unit: "us", Direction: perf.SmallerIsBetter} p.Set(maxLatency, stat.Max) if stat.P99 > float64(param.P99Threshold/time.Microsecond) { s.Log("p99 latency exceeds threshold: ", stat.P99, " > ", param.P99Threshold) } } if err := p.Save(s.OutDir()); err != nil { s.Error("Failed saving perf data: ", err) } }
package lifting import ( "sort" "strconv" "time" "cloud.google.com/go/civil" ) func parseInt(value string) (int, error) { if len(value) < 1 { return 0, nil } i, errs := strconv.Atoi(value) return i, errs } // Category is the category of exercises on a date type Category struct { Category string Reps []Repetition } // Group groups by date type Group struct { Date civil.Date Categories []Category } // Weekday computes the weekday for the group's Date func (r *Group) Weekday() string { date := time.Date(r.Date.Year, r.Date.Month, r.Date.Day, 0, 0, 0, 0, time.UTC) return date.Weekday().String() } func mapGroup(reps []Repetition) map[civil.Date]map[string][]Repetition { m := make(map[civil.Date]map[string][]Repetition) for _, rep := range reps { date, present := m[rep.SessionDate] if !present { m[rep.SessionDate] = make(map[string][]Repetition) m[rep.SessionDate][rep.Category] = []Repetition{rep} } else { exercise, present := date[rep.Category] if !present { m[rep.SessionDate][rep.Category] = []Repetition{rep} } else { m[rep.SessionDate][rep.Category] = append(exercise, rep) } } } return m } // Groups is a collection of Group. type Groups []Group func (s Groups) Len() int { return len(s) } func (s Groups) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s Groups) Less(i, j int) bool { if s[i].Date.Year == s[j].Date.Year { if s[i].Date.Month > s[j].Date.Month { return true } if s[i].Date.Month == s[j].Date.Month { return s[i].Date.Day > s[j].Date.Day } } if s[i].Date.Year > s[j].Date.Year { return true } return false } func group(reps []Repetition) Groups { m := mapGroup(reps) gs := make(Groups, 0) for date, exercises := range m { g := Group{ Date: date, Categories: make([]Category, 0), } for exercise, reps := range exercises { g.Categories = append(g.Categories, Category{Category: exercise, Reps: reps}) } gs = append(gs, g) } sort.Sort(gs) return gs }
package models //PrecomputedStatistics Result of precomputed statistics type PrecomputedStatistics struct { Min float64 `json:"min"` Max float64 `json:"max"` Avg float64 `json:"avg"` MinCountry string `json:"minCountry"` MaxCountry string `json:"maxCountry"` }
package bind import ( "context" "fmt" "os" "os/exec" "strings" "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" binddnsv1 "github.com/bind-dns/binddns-operator/pkg/apis/binddns/v1" "github.com/bind-dns/binddns-operator/pkg/kube" "github.com/bind-dns/binddns-operator/pkg/utils" zlog "github.com/bind-dns/binddns-operator/pkg/utils/zaplog" ) // initAllZones used to init the bind9 zone config func (handler *DnsHandler) initAllZones(ctx context.Context) (err error) { zlog.Infof("Start to init all zones >>>>>>") defer func() { if err == nil { zlog.Infof("Zones init successfully.") } }() domains, err := kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsDomains().List(ctx, v1.ListOptions{ ResourceVersion: "0", }) if err != nil { zlog.Error(err) return err } for i := range domains.Items { err = handler.initZone(ctx, &domains.Items[i]) if err != nil { return err } } return nil } // ZoneAdd used to add a zone. func (handler *DnsHandler) ZoneAdd(zone string) error { ctx := context.Background() domain, err := kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsDomains().Get(ctx, zone, v1.GetOptions{}) if err != nil { zlog.Error(err) return err } if err := handler.initZone(ctx, domain); err != nil { return err } // There is only one default view. if err := exec.Command("/etc/named/rndc", "addzone", zone, "IN", defaultView, fmt.Sprintf("{ type master; file \"%s\";};", handler.getZoneFilePath(zone, defaultView))).Run(); err != nil { zlog.Error(err) return err } return nil } // ZoneUpdate used to update a zone. func (handler *DnsHandler) ZoneUpdate(zone string) error { ctx := context.Background() domain, err := kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsDomains().Get(ctx, zone, v1.GetOptions{}) if err != nil { zlog.Error(err) return err } if err := handler.initZone(ctx, domain); err != nil { return err } // There is only one default view. cmd := exec.Command("/etc/named/rndc", "freeze", zone, "IN", defaultView) if err := cmd.Run(); err != nil { zlog.Error(cmd) return err } cmd = exec.Command("/etc/named/rndc", "reload", zone, "IN", defaultView) if err := cmd.Run(); err != nil { zlog.Error(cmd) return err } cmd = exec.Command("/etc/named/rndc", "thaw", zone, "IN", defaultView) if err := cmd.Run(); err != nil { zlog.Error(cmd) return err } return nil } // ZoneDelete used to delete a zone. func (handler *DnsHandler) ZoneDelete(zone string) error { if err := os.RemoveAll(handler.getZoneDir(zone)); err != nil { zlog.Error(err) return err } views := []string{defaultView} for _, view := range views { if err := exec.Command("/etc/named/rndc", "delzone", zone, "IN", view).Run(); err != nil { zlog.Error(err) return err } } return nil } // initZone will init a single zone config file. func (handler *DnsHandler) initZone(ctx context.Context, domain *binddnsv1.DnsDomain) error { var records []string if domain.Spec.Enabled { rules, err := kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsRules().List(ctx, v1.ListOptions{ ResourceVersion: "0", LabelSelector: utils.LabelZoneDnsRule + "=" + domain.Name, }) if err != nil { zlog.Error(err) return err } for i := range rules.Items { item := &rules.Items[i] if !item.Spec.Enabled { continue } if item.Spec.Type == "MX" { records = append(records, fmt.Sprintf("%s %d %s 10 %s \n", strings.TrimSpace(item.Spec.Host), item.Spec.Ttl, item.Spec.Type, item.Spec.Data)) continue } records = append(records, fmt.Sprintf("%s %d %s %s\n", strings.TrimSpace(item.Spec.Host), item.Spec.Ttl, item.Spec.Type, item.Spec.Data)) } } if err := os.MkdirAll(handler.getZoneDir(domain.Name), 0777); err != nil { return err } // There is only one default view. file, err := os.Create(handler.getZoneFilePath(domain.Name, defaultView)) defer func() { if file != nil { file.Close() } }() if err != nil { zlog.Error(err) return err } _, err = file.Write(utils.StringToBytes(fmt.Sprintf(ZoneTemplate, domain.Name, time.Now().Unix(), strings.Join(records, "\n")))) if err != nil { return err } return nil } func (handler *DnsHandler) getZoneDir(zone string) string { return fmt.Sprintf("%s/%s", handler.ZoneDst, zone) } func (handler *DnsHandler) getZoneFilePath(zone, view string) string { return fmt.Sprintf("%s/%s/db.%s.conf", handler.ZoneDst, zone, view) }
package internal import ( "encoding/json" "fmt" "os" "path/filepath" ) // ConfigFileName holds the name of the config file const ConfigFileName = "ahab.json" // UserConfigFilePath holds the path of the user's config file, relative to their home dir const UserConfigFilePath = ".config/ahab/config.json" // Version holds the build-time ahab version var Version string // Configuration contains docker config fields type Configuration struct { AhabVersion string `json:"ahab"` BuildContext string `json:"buildContext"` Command string `json:"command"` Dockerfile string `json:"dockerfile"` Entrypoint string `json:"entrypoint"` Environment []string `json:"environment"` Hostname string `json:"hostname"` ImageURI string `json:"image"` Init []string `json:"init"` Name string `json:"name"` Options []string `json:"options"` Permissions PermConfiguration `json:"permissions"` RestartAfterSetup bool `json:"restartAfterSetup"` ShareDisplay bool `json:"shareDisplay"` User string `json:"user"` Volumes []string `json:"volumes"` Workdir string `json:"workdir"` } // UserConfiguration contains global user config fields type UserConfiguration struct { Environment []string `json:"environment"` Options []string `json:"options"` HideCommands bool `json:"hideCommands"` Volumes []string `json:"volumes"` } // PermConfiguration contains information regarding container user permissions setup type PermConfiguration struct { CmdSet string `json:"cmdSet"` Disable bool `json:"disable"` Groups []string `json:"groups"` } // UserConfig finds and parses the user's docker config file // If the user's config is not found, an empty userConfig is returned func UserConfig() (userConfig *UserConfiguration, err error) { homeDir, err := os.UserHomeDir() if err != nil { err = fmt.Errorf("Failed to get user home directory: %s", err) return } configPath := filepath.Join(homeDir, UserConfigFilePath) configFile, err := os.Open(configPath) if err != nil && os.IsNotExist(err) { var blankConfig UserConfiguration return &blankConfig, nil } defer configFile.Close() decoder := json.NewDecoder(configFile) if err = decoder.Decode(&userConfig); err != nil { err = fmt.Errorf("Failed to parse user config file: %s", err) } return } // checkConfigVersion returns a non-nil err if the passed version is newer the active dcfg version func checkConfigVersion(configVersion string) error { configVersionOrd, err := versionOrdinal(configVersion) if err != nil { return err } selfVersionOrd, err := versionOrdinal(Version) if err != nil { return err } if configVersionOrd > selfVersionOrd { return fmt.Errorf("Config file requires ahab >= %s (your version: %s)", configVersion, Version) } return nil } // findConfigPath recursively searches for a config file starting at topDir, ending at fs root func findConfigPath(topDir string) (configPath string, err error) { configTestPath := filepath.Join(topDir, ConfigFileName) _, err = os.Stat(configTestPath) if err != nil && os.IsNotExist(err) && filepath.Clean(topDir) != "/" { configPath, err = findConfigPath(filepath.Join(topDir, "..")) } else if err != nil && os.IsNotExist(err) { err = fmt.Errorf("No config file '%s' found in current or parent directories", ConfigFileName) } else if err != nil { err = fmt.Errorf("Failed to find config file '%s': %s", ConfigFileName, err) } else { configPath = configTestPath } return } // return a non-nil error if config is invalid func (config *Configuration) validateConfig() (err error) { if config.AhabVersion == "" { err = fmt.Errorf("Missing required version field 'ahab'") } else if config.ImageURI == "" && config.Dockerfile == "" { err = fmt.Errorf("Either 'image' or 'dockerfile' must be present") } else if config.ImageURI != "" && config.Dockerfile != "" { err = fmt.Errorf("'image' and `dockerfile' cannot both be present") } return }
package operator import ( "context" "os" "github.com/Dynatrace/dynatrace-operator/src/cmd/config" cmdManager "github.com/Dynatrace/dynatrace-operator/src/cmd/manager" "github.com/Dynatrace/dynatrace-operator/src/controllers/certificates" "github.com/Dynatrace/dynatrace-operator/src/kubeobjects" "github.com/Dynatrace/dynatrace-operator/src/kubesystem" "github.com/Dynatrace/dynatrace-operator/src/version" "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) const ( use = "operator" ) type CommandBuilder struct { configProvider config.Provider bootstrapManagerProvider cmdManager.Provider operatorManagerProvider cmdManager.Provider namespace string podName string signalHandler context.Context client client.Client } func NewOperatorCommandBuilder() CommandBuilder { return CommandBuilder{} } func (builder CommandBuilder) SetConfigProvider(provider config.Provider) CommandBuilder { builder.configProvider = provider return builder } func (builder CommandBuilder) setOperatorManagerProvider(provider cmdManager.Provider) CommandBuilder { builder.operatorManagerProvider = provider return builder } func (builder CommandBuilder) setBootstrapManagerProvider(provider cmdManager.Provider) CommandBuilder { builder.bootstrapManagerProvider = provider return builder } func (builder CommandBuilder) SetNamespace(namespace string) CommandBuilder { builder.namespace = namespace return builder } func (builder CommandBuilder) SetPodName(podName string) CommandBuilder { builder.podName = podName return builder } func (builder CommandBuilder) setSignalHandler(ctx context.Context) CommandBuilder { builder.signalHandler = ctx return builder } func (builder CommandBuilder) setClient(client client.Client) CommandBuilder { builder.client = client return builder } func (builder CommandBuilder) getOperatorManagerProvider(isDeployedByOlm bool) cmdManager.Provider { if builder.operatorManagerProvider == nil { builder.operatorManagerProvider = NewOperatorManagerProvider(isDeployedByOlm) } return builder.operatorManagerProvider } func (builder CommandBuilder) getBootstrapManagerProvider() cmdManager.Provider { if builder.bootstrapManagerProvider == nil { builder.bootstrapManagerProvider = NewBootstrapManagerProvider() } return builder.bootstrapManagerProvider } func (builder CommandBuilder) getSignalHandler() context.Context { if builder.signalHandler == nil { builder.signalHandler = ctrl.SetupSignalHandler() } return builder.signalHandler } func (builder CommandBuilder) Build() *cobra.Command { return &cobra.Command{ Use: use, RunE: builder.buildRun(), } } func (builder CommandBuilder) setClientFromConfig(kubeCfg *rest.Config) (CommandBuilder, error) { if builder.client == nil { clt, err := client.New(kubeCfg, client.Options{}) if err != nil { return builder, err } return builder.setClient(clt), nil } return builder, nil } func (builder CommandBuilder) buildRun() func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { version.LogVersion() kubeCfg, err := builder.configProvider.GetConfig() if err != nil { return err } builder, err = builder.setClientFromConfig(kubeCfg) if err != nil { return err } if isInDebugMode() { log.Info("running locally in debug mode") return builder.runLocally(kubeCfg) } return builder.runInPod(kubeCfg) } } func (builder CommandBuilder) runInPod(kubeCfg *rest.Config) error { operatorPod, err := kubeobjects.GetPod(context.TODO(), builder.client, builder.podName, builder.namespace) if err != nil { return err } isDeployedViaOlm := kubesystem.IsDeployedViaOlm(*operatorPod) if !isDeployedViaOlm { err = builder.runBootstrapper(kubeCfg) if err != nil { return err } } return builder.runOperatorManager(kubeCfg, isDeployedViaOlm) } func (builder CommandBuilder) runLocally(kubeCfg *rest.Config) error { err := builder.runBootstrapper(kubeCfg) if err != nil { return err } return builder.runOperatorManager(kubeCfg, false) } func isInDebugMode() bool { return os.Getenv("RUN_LOCAL") == "true" } func (builder CommandBuilder) runBootstrapper(kubeCfg *rest.Config) error { bootstrapManager, err := builder.getBootstrapManagerProvider().CreateManager(builder.namespace, kubeCfg) if err != nil { return err } return startBootstrapperManager(bootstrapManager, builder.namespace) } func (builder CommandBuilder) runOperatorManager(kubeCfg *rest.Config, isDeployedViaOlm bool) error { operatorManager, err := builder.getOperatorManagerProvider(isDeployedViaOlm).CreateManager(builder.namespace, kubeCfg) if err != nil { return err } err = operatorManager.Start(builder.getSignalHandler()) return errors.WithStack(err) } func startBootstrapperManager(bootstrapManager ctrl.Manager, namespace string) error { ctx, cancelFn := context.WithCancel(context.TODO()) err := certificates.AddBootstrap(bootstrapManager, namespace, cancelFn) if err != nil { return errors.WithStack(err) } err = bootstrapManager.Start(ctx) if err != nil { return errors.WithStack(err) } return nil }
package wxapp import ( "context" "github.com/gin-gonic/gin" "go.mongodb.org/mongo-driver/bson" "jxc/api" "jxc/auth" "jxc/models" "jxc/serializer" "net/http" ) type UserAddressService struct { CustomerID int64 `json:"customer_id" bson:"customer_id"` AddressIDs []int64 `json:"address_ids" bson:"address_ids"` } // 用户收货地址管理 func ListUserAddress(c *gin.Context) { token := c.GetHeader("Access-Token") claims, _ := auth.ParseToken(token) var userAddrSrv UserAddressService if err := c.ShouldBindJSON(&userAddrSrv); err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "Params error", }) return } collection := models.Client.Collection("address") var addresses []models.Address filter := bson.M{} filter["com_id"] = claims.ComId filter["customer_id"] = userAddrSrv.CustomerID cur, err := collection.Find(context.TODO(), filter) if err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "user address error", }) return } for cur.Next(context.TODO()) { var res models.Address if err := cur.Decode(&res); err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "decode address error", }) return } addresses = append(addresses, res) } c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeSuccess, Msg: "User address", Data: addresses, }) } func AddUserAddress(c *gin.Context) { token := c.GetHeader("Access-Token") claims, _ := auth.ParseToken(token) var address models.Address if err := c.ShouldBindJSON(&address); err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "params error", }) return } address.ComID = claims.ComId address.AddressID = api.GetLastID("address") collection := models.Client.Collection("address") _, err := collection.InsertOne(context.TODO(), address) if err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "Create user address error", }) return } c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeSuccess, Msg: "Create user address", }) } // 可以同时删除多个商品 func DeleteUserAddress(c *gin.Context) { token := c.GetHeader("Access-Token") claims, _ := auth.ParseToken(token) var userAddrSrv UserAddressService if err := c.ShouldBindJSON(&userAddrSrv); err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "params error", }) return } collection := models.Client.Collection("address") filter := bson.M{} filter["com_id"] = claims.ComId filter["customer_id"] = userAddrSrv.CustomerID if len(userAddrSrv.AddressIDs) < 0 { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "No address to delete", }) return } filter["address_id"] = bson.M{"$in" : userAddrSrv.AddressIDs} _, err := collection.DeleteMany(context.TODO(), filter) if err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "Delete user address error", }) return } c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeSuccess, Msg: "Delete user address", }) } func UpdateAddress(c *gin.Context) { var address models.Address if err := c.ShouldBindJSON(&address); err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "params error", }) return } collection := models.Client.Collection("address") filter := bson.M{} filter["com_id"] = address.ComID filter["address_id"] = address.AddressID filter["customer_id"] = address.CustomerID _, err := collection.UpdateOne(context.TODO(), filter, bson.M{"$set" : address}) if err != nil { c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeError, Msg: "Update user address error", }) return } c.JSON(http.StatusOK, serializer.Response{ Code: serializer.CodeSuccess, Msg: "Update user address", }) }
/* Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cluster import ( "context" "fmt" "strings" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/build/kaniko" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/platform" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/version" ) const ( // kubernetes.io/arch and kubernetes.io/os are known node labels. See https://kubernetes.io/docs/reference/labels-annotations-taints/ nodeOperatingSystemLabel = "kubernetes.io/os" nodeArchitectureLabel = "kubernetes.io/arch" ) func (b *Builder) kanikoPodSpec(artifact *latest.KanikoArtifact, tag string, platforms platform.Matcher) (*v1.Pod, error) { args, err := kanikoArgs(artifact, tag, b.cfg.GetInsecureRegistries()) if err != nil { return nil, fmt.Errorf("building args list: %w", err) } vm := v1.VolumeMount{ Name: kaniko.DefaultEmptyDirName, MountPath: kaniko.DefaultEmptyDirMountPath, } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: b.ClusterDetails.Annotations, GenerateName: "kaniko-", Labels: map[string]string{"skaffold-kaniko": "skaffold-kaniko"}, Namespace: b.ClusterDetails.Namespace, }, Spec: v1.PodSpec{ InitContainers: []v1.Container{{ Name: initContainer, Image: artifact.InitImage, ImagePullPolicy: v1.PullIfNotPresent, Command: []string{"sh", "-c", "while [ ! -f /tmp/complete ]; do sleep 1; done"}, VolumeMounts: []v1.VolumeMount{vm}, Resources: resourceRequirements(b.ClusterDetails.Resources), }}, Containers: []v1.Container{{ Name: kaniko.DefaultContainerName, Image: artifact.Image, ImagePullPolicy: v1.PullIfNotPresent, Args: args, Env: b.env(artifact, b.ClusterDetails.HTTPProxy, b.ClusterDetails.HTTPSProxy), VolumeMounts: []v1.VolumeMount{vm}, Resources: resourceRequirements(b.ClusterDetails.Resources), TerminationMessagePath: artifact.DigestFile, // setting this lets us get the built image digest from container logs directly }}, RestartPolicy: v1.RestartPolicyNever, Volumes: []v1.Volume{{ Name: vm.Name, VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }}, }, } // Add secret for pull secret if b.ClusterDetails.PullSecretName != "" { addSecretVolume(pod, kaniko.DefaultSecretName, b.ClusterDetails.PullSecretMountPath, b.ClusterDetails.PullSecretName) } // Add host path volume for cache if artifact.Cache != nil && artifact.Cache.HostPath != "" { addHostPathVolume(pod, kaniko.DefaultCacheDirName, kaniko.DefaultCacheDirMountPath, artifact.Cache.HostPath) } if b.ClusterDetails.DockerConfig != nil { // Add secret for docker config if specified addSecretVolume(pod, kaniko.DefaultDockerConfigSecretName, kaniko.DefaultDockerConfigPath, b.ClusterDetails.DockerConfig.SecretName) } // Add Service Account if b.ClusterDetails.ServiceAccountName != "" { pod.Spec.ServiceAccountName = b.ClusterDetails.ServiceAccountName } // Add SecurityContext for runAsUser if b.ClusterDetails.RunAsUser != nil { if pod.Spec.SecurityContext == nil { pod.Spec.SecurityContext = &v1.PodSecurityContext{} } pod.Spec.SecurityContext.RunAsUser = b.ClusterDetails.RunAsUser } // Add Tolerations for kaniko pod setup if len(b.ClusterDetails.Tolerations) > 0 { pod.Spec.Tolerations = b.ClusterDetails.Tolerations } // Add nodeSelector for kaniko pod setup if b.ClusterDetails.NodeSelector != nil { pod.Spec.NodeSelector = b.ClusterDetails.NodeSelector } // Add nodeSelector for image target platform. // Kaniko doesn't support building cross platform images, so the pod platform needs to match the image target platform. if len(platforms.Platforms) == 1 { if pod.Spec.NodeSelector == nil { pod.Spec.NodeSelector = make(map[string]string) } if _, found := pod.Spec.NodeSelector[nodeArchitectureLabel]; !found { pod.Spec.NodeSelector[nodeArchitectureLabel] = platforms.Platforms[0].Architecture } if _, found := pod.Spec.NodeSelector[nodeOperatingSystemLabel]; !found { pod.Spec.NodeSelector[nodeOperatingSystemLabel] = platforms.Platforms[0].OS } } // Add used-defines Volumes pod.Spec.Volumes = append(pod.Spec.Volumes, b.Volumes...) // Add user-defined VolumeMounts for _, vm := range artifact.VolumeMounts { pod.Spec.InitContainers[0].VolumeMounts = append(pod.Spec.InitContainers[0].VolumeMounts, vm) pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, vm) } return pod, nil } func (b *Builder) env(artifact *latest.KanikoArtifact, httpProxy, httpsProxy string) []v1.EnvVar { env := []v1.EnvVar{{ // This should be same https://github.com/GoogleContainerTools/kaniko/blob/77cfb912f3483c204bfd09e1ada44fd200b15a78/pkg/executor/push.go#L49 Name: "UPSTREAM_CLIENT_TYPE", Value: fmt.Sprintf("UpstreamClient(skaffold-%s)", version.Get().Version), }} for _, v := range artifact.Env { if v.Name != "" && v.Value != "" { env = append(env, v) } } if httpProxy != "" { env = append(env, v1.EnvVar{ Name: "HTTP_PROXY", Value: httpProxy, }) } if httpsProxy != "" { env = append(env, v1.EnvVar{ Name: "HTTPS_PROXY", Value: httpsProxy, }) } // if cluster.PullSecretName is non-empty populate secret path and use as GOOGLE_APPLICATION_CREDENTIALS // by default it is not empty, so need to if b.ClusterDetails.PullSecretName != "" { pullSecretPath := strings.Join( []string{b.ClusterDetails.PullSecretMountPath, b.ClusterDetails.PullSecretPath}, "/", // linux filepath separator. ) env = append(env, v1.EnvVar{ Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: pullSecretPath, }) } return env } func addSecretVolume(pod *v1.Pod, name, mountPath, secretName string) { pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: name, MountPath: mountPath, }) pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ Name: name, VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: secretName, }, }, }) } func addHostPathVolume(pod *v1.Pod, name, mountPath, path string) { pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: name, MountPath: mountPath, }) pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ Name: name, VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: path, }, }, }) } func resourceRequirements(rr *latest.ResourceRequirements) v1.ResourceRequirements { req := v1.ResourceRequirements{} if rr != nil { if rr.Limits != nil { req.Limits = v1.ResourceList{} if rr.Limits.CPU != "" { req.Limits[v1.ResourceCPU] = resource.MustParse(rr.Limits.CPU) } if rr.Limits.Memory != "" { req.Limits[v1.ResourceMemory] = resource.MustParse(rr.Limits.Memory) } if rr.Limits.ResourceStorage != "" { req.Limits[v1.ResourceStorage] = resource.MustParse(rr.Limits.ResourceStorage) } if rr.Limits.EphemeralStorage != "" { req.Limits[v1.ResourceEphemeralStorage] = resource.MustParse(rr.Limits.EphemeralStorage) } } if rr.Requests != nil { req.Requests = v1.ResourceList{} if rr.Requests.CPU != "" { req.Requests[v1.ResourceCPU] = resource.MustParse(rr.Requests.CPU) } if rr.Requests.Memory != "" { req.Requests[v1.ResourceMemory] = resource.MustParse(rr.Requests.Memory) } if rr.Requests.ResourceStorage != "" { req.Requests[v1.ResourceStorage] = resource.MustParse(rr.Requests.ResourceStorage) } if rr.Requests.EphemeralStorage != "" { req.Requests[v1.ResourceEphemeralStorage] = resource.MustParse(rr.Requests.EphemeralStorage) } } } return req } func kanikoArgs(artifact *latest.KanikoArtifact, tag string, insecureRegistries map[string]bool) ([]string, error) { for reg := range insecureRegistries { artifact.InsecureRegistry = append(artifact.InsecureRegistry, reg) } // Create pod spec args, err := kaniko.Args(artifact, tag, fmt.Sprintf("dir://%s", kaniko.DefaultEmptyDirMountPath)) if err != nil { return nil, fmt.Errorf("unable build kaniko args: %w", err) } log.Entry(context.TODO()).Trace("kaniko arguments are ", strings.Join(args, " ")) return args, nil }
package main import ( "errors" "fmt" ) func divide(divided float64, divisor float64) (float64, error) { if divisor == 0.0 { return 0, errors.New("На ноль делить ЗАПРЕЩЕНО") } return divided / divisor, nil } func main() { quotient, err := divide(5.6, 0.0) if err != nil { fmt.Println(err) } else { fmt.Printf("%0.2f\n", quotient) } }
package codegen import ( "bytes" "os" "regexp" "strings" "text/template" "golang.org/x/xerrors" "github.com/iotaledger/hive.go/lo" ) // Template is a wrapper around the text/template package that provides a generic way for generating files according // to the "go generate" pattern. // // In addition to the standard template delimiters (https://pkg.go.dev/text/template), it supports /*{{ ... }}*/, which // allows to "hide" template code in go comments. Data is provided to the template as function pipelines. type Template struct { // header contains the "fixed" header of the file above the "go generate" statement (not processed by the template). header string // content contains "dynamic" the content of the file below the "go generate" statement. content string // mappings is a set of tokens that are being mapped to pipelines in the template. mappings template.FuncMap } // NewTemplate creates a new Template with the given pipeline mappings. func NewTemplate(mappings template.FuncMap) *Template { return &Template{ mappings: mappings, } } // Parse parses the given file and extracts the header and content by splitting the file at the "go:generate" directive. // It automatically removes existing "//go:build ignore" directives from the header. func (t *Template) Parse(fileName string) error { readFile, err := os.ReadFile(fileName) if err != nil { return xerrors.Errorf("could not read file %s: %w", fileName, err) } splitTemplate := strings.Split(string(readFile), "//go:generate") if len(splitTemplate) != 2 { return xerrors.Errorf("could not find go:generate directive in %s", fileName) } t.header = strings.TrimSpace(strings.ReplaceAll(splitTemplate[0], "//go:build ignore", "")) t.content = strings.TrimSpace(splitTemplate[1][strings.Index(splitTemplate[1], "\n"):]) return nil } // Generate generates the file with the given fileName (it can receive an optional generator function that overrides the // way the content is generated). func (t *Template) Generate(fileName string, optGenerator ...func() (string, error)) error { generatedContent, err := lo.First(optGenerator, t.GenerateContent)() if err != nil { return xerrors.Errorf("could not generate content: %w", err) } return os.WriteFile(fileName, []byte(strings.Join([]string{ generatedFileHeader + t.header, generatedContent + "\n", }, "\n\n")), 0644) } // GenerateContent generates the dynamic content of the file by processing the template. func (t *Template) GenerateContent() (string, error) { // replace /*{{ and }}*/ with {{ and }} to "unpack" statements that are embedded as comments content := regexp.MustCompile(`/\*{{`).ReplaceAll([]byte(t.content), []byte("{{")) content = regexp.MustCompile(`}}\*/`).ReplaceAll(content, []byte("}}")) tmpl, err := template.New("template").Funcs(t.mappings).Parse(string(content)) if err != nil { return "", xerrors.Errorf("could not parse template: %w", err) } buffer := new(bytes.Buffer) if err := tmpl.Execute(buffer, nil); err != nil { return "", xerrors.Errorf("could not execute template: %w", err) } return buffer.String(), nil } // generatedFileHeader is the header that is being added to the top of the generated file. const generatedFileHeader = "// Code generated by go generate; DO NOT EDIT.\n"
package model import ( "gopkg.in/mgo.v2/bson" ) type Role int const ( Admin Role = iota Teacher Student ) type User struct { ID bson.ObjectId `json:"id" bson:"_id"` Name string `json:"name" bson:"name"` Email string `json:"email" bson:"email" validate:"regexp=^[0-9a-z]+@[0-9a-z]+(\\.[0-9a-z]+)+$"` // Username string `json:"username" bson:"username"` Password string `bson:"password" validate:"min=8"` Status string `json:"status" bson:"status"` Phone string `json:"phone" bson:"phone"` Role Role `json:"role" bson:"role"` Categories []Categorie `json:"categories" bson:"categories"` } type UserSession struct { Token string `json:"token"` }
package keystore import ( crypto_rand "crypto/rand" "fmt" "io" "strconv" "sync" "github.com/golang/protobuf/proto" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "kope.io/auth/pkg/keystore/pb" //"k8s.io/apimachinery/pkg/watch" "strings" "time" "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" ) type KubernetesKeyStore struct { client kubernetes.Interface namespace string name string mutex sync.Mutex keySets map[string]*keySet resourceVersion int64 } var _ KeyStore = &KubernetesKeyStore{} type keySet struct { data pb.KeySetData keystore *KubernetesKeyStore name string versions map[int32]*secretboxKey } var _ KeySet = &keySet{} func NewKubernetesKeyStore(client kubernetes.Interface, namespace string, name string) (*KubernetesKeyStore, error) { s := &KubernetesKeyStore{ client: client, namespace: namespace, name: name, } return s, nil } func (k *KubernetesKeyStore) KeySet(name string) (KeySet, error) { var key *secretboxKey ks := k.keySets[name] if ks != nil { key = ks.versions[ks.data.ActiveId] } // TODO: Start key expiry / rotation thread? if key != nil { return ks, nil } // TODO: Strategy for consistency with multiple servers, avoid thundering herd etc err := k.ensureKeySet(name, pb.KeyType_KEYTYPE_SECRETBOX) if err != nil { return nil, fmt.Errorf("error creating keyset: %v", err) } ks = k.keySets[name] if ks != nil { key = ks.versions[ks.data.ActiveId] } if key == nil { return nil, fmt.Errorf("created key was not found") } return ks, nil } func (k *keySet) Encrypt(plaintext []byte) ([]byte, error) { key, err := k.activeKey() if err != nil { return nil, err } return key.encrypt(plaintext) } func (k *keySet) Decrypt(ciphertext []byte) ([]byte, error) { encryptedData := &pb.EncryptedData{} err := proto.Unmarshal(ciphertext, encryptedData) if err != nil { return nil, fmt.Errorf("error deserializing data: %v", err) } key, err := k.findKey(encryptedData.KeyId) if err != nil { return nil, err } if key == nil { return nil, fmt.Errorf("unknown keyid (%d)", encryptedData.KeyId) } return key.decrypt(encryptedData) } func (k *KubernetesKeyStore) mutateSecret(mutator func(secret *v1.Secret) error) error { secret, err := k.client.CoreV1().Secrets(k.namespace).Get(k.name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { glog.V(2).Infof("secret %s/%s not found; will create", k.namespace, k.name) secret = nil } else { return fmt.Errorf("error fetching secret %s/%s: %v", k.namespace, k.name, err) } } create := false if secret == nil { secret = &v1.Secret{} secret.Name = k.name secret.Namespace = k.namespace create = true } if secret.Data == nil { secret.Data = make(map[string][]byte) } err = mutator(secret) if err != nil { return err } if create { created, err := k.client.CoreV1().Secrets(k.namespace).Create(secret) if err != nil { // TODO: Handle concurrent create - retry? return fmt.Errorf("error creating secret %s/%s: %v", k.namespace, k.name, err) } k.updateSecret(created) } else { // TODO: Make sure this is a conditional update // https://github.com/kubernetes/client-go/issues/150 updated, err := k.client.CoreV1().Secrets(k.namespace).Update(secret) if err != nil { // TODO: Handle condition update - retry? return fmt.Errorf("error updating secret %s/%s: %v", k.namespace, k.name, err) } k.updateSecret(updated) } // TODO: Update directly, before watch returns? return nil } func generateSecret(keyType pb.KeyType) ([]byte, error) { switch keyType { case pb.KeyType_KEYTYPE_SECRETBOX: return readCryptoRand(32) default: return nil, fmt.Errorf("unknown keytype: %s", keyType) } } func readCryptoRand(n int) ([]byte, error) { b := make([]byte, n, n) if _, err := io.ReadFull(crypto_rand.Reader, b); err != nil { return nil, fmt.Errorf("error reading random data: %v", err) } return b, nil } func (k *KubernetesKeyStore) ensureKeySet(name string, keyType pb.KeyType) error { err := k.mutateSecret(func(secret *v1.Secret) error { keysets := k.decodeSecret(secret) keyset := keysets[name] if keyset == nil { keyset = &keySet{ data: pb.KeySetData{ KeyType: keyType, }, keystore: k, //generator: generator, name: name, versions: make(map[int32]*secretboxKey), } keysets[name] = keyset } sharedSecret := keyset.versions[keyset.data.ActiveId] if sharedSecret == nil { maxId := int32(0) for id := range keyset.versions { if id > maxId { maxId = id } } secretData, err := generateSecret(keyset.data.KeyType) if err != nil { return fmt.Errorf("error generating secret: %v", err) } sharedSecret := &secretboxKey{ data: pb.KeyData{ Id: maxId + 1, Secret: secretData, Created: time.Now().Unix(), }, } keyset.data.ActiveId = sharedSecret.data.Id keyset.versions[sharedSecret.data.Id] = sharedSecret } keyPrefix := "secret." + keyset.name + "." for k := range secret.Data { if strings.HasPrefix(k, keyPrefix) { delete(secret.Data, k) } } data := &pb.KeySetData{} *data = keyset.data for _, k := range keyset.versions { data.Keys = append(data.Keys, &k.data) } if secret.Data == nil { secret.Data = make(map[string][]byte) } bytes, err := proto.Marshal(data) if err != nil { return fmt.Errorf("error serializing keyset: %v", err) } secret.Data["secret."+name] = bytes return nil }) return err } func int32ToString(v int32) string { return strconv.FormatInt(int64(v), 10) } func (k *keySet) activeKey() (*secretboxKey, error) { key := k.versions[k.data.ActiveId] if key != nil { return key, nil } return nil, fmt.Errorf("keyset not initialized") } func (k *keySet) findKey(keyId int32) (*secretboxKey, error) { key := k.versions[keyId] return key, nil } func (k *KubernetesKeyStore) ensureKeyset(name string) (*keySet, error) { keyType := pb.KeyType_KEYTYPE_SECRETBOX keyset := k.keySets[name] if keyset == nil { err := k.ensureKeySet(name, keyType) if err != nil { return nil, fmt.Errorf("error creating keyset: %v", err) } keyset = k.keySets[name] if keyset == nil { return nil, fmt.Errorf("created keyset was not found") } } //if keyset.generator == nil { // keyset.generator = generator //} return keyset, nil } func (s *KubernetesKeyStore) decodeSecret(secret *v1.Secret) map[string]*keySet { keySets := make(map[string]*keySet) for k, v := range secret.Data { tokens := strings.Split(k, ".") // secret.<name>=<value> if len(tokens) == 2 && tokens[0] == "secret" { name := tokens[1] ks := &keySet{ keystore: s, name: name, versions: make(map[int32]*secretboxKey), } err := proto.Unmarshal(v, &ks.data) if err != nil { glog.Warningf("error parsing secret key %v", k) continue } for _, key := range ks.data.Keys { ks.versions[key.Id] = &secretboxKey{ data: *key, } } keySets[name] = ks } else { glog.Warningf("ignoring unrecognized key %v", k) } } return keySets } // updateSecret parses and updates the specified secret func (k *KubernetesKeyStore) updateSecret(secret *v1.Secret) { k.mutex.Lock() defer k.mutex.Unlock() resourceVersion, err := strconv.ParseInt(secret.ObjectMeta.ResourceVersion, 10, 64) if err != nil { glog.Warningf("Unable to parse ResourceVersion=%q", secret.ObjectMeta.ResourceVersion) } else if resourceVersion <= k.resourceVersion { glog.V(2).Infof("Ignoring out of sequence secret update: %d vs %d", resourceVersion, k.resourceVersion) return } keySets := k.decodeSecret(secret) k.keySets = keySets k.resourceVersion = resourceVersion } func (k *KubernetesKeyStore) deleteSecret(resourceVersionString string) { k.mutex.Lock() defer k.mutex.Unlock() resourceVersion, err := strconv.ParseInt(resourceVersionString, 10, 64) if err != nil { glog.Warningf("Unable to parse ResourceVersion=%q", resourceVersionString) } else if resourceVersion <= k.resourceVersion { glog.V(2).Infof("Ignoring out of sequence secret update: %d vs %d", resourceVersion, k.resourceVersion) return } keySets := make(map[string]*keySet) k.keySets = keySets k.resourceVersion = resourceVersion } // Run starts the secretsWatcher. func (c *KubernetesKeyStore) Run(stopCh <-chan struct{}) { runOnce := func() (bool, error) { var listOpts metav1.ListOptions // How to watch a single object: https://github.com/kubernetes/kubernetes/issues/43299 listOpts.FieldSelector = fields.OneTermEqualSelector("metadata.name", c.name).String() secretList, err := c.client.CoreV1().Secrets(c.namespace).List(listOpts) if err != nil { return false, fmt.Errorf("error watching secrets: %v", err) } for i := range secretList.Items { if secretList.Items[i].Name != c.name { continue } c.updateSecret(&secretList.Items[i]) // TODO: If this is a multi-item scan, we need to delete any items not present } listOpts.Watch = true listOpts.ResourceVersion = secretList.ResourceVersion watcher, err := c.client.CoreV1().Secrets(c.namespace).Watch(listOpts) if err != nil { return false, fmt.Errorf("error watching secrets: %v", err) } ch := watcher.ResultChan() for { select { case <-stopCh: glog.Infof("Got stop signal") return true, nil case event, ok := <-ch: if !ok { glog.Infof("secret watch channel closed") return false, nil } secret := event.Object.(*v1.Secret) if secret.Name == c.name { glog.V(4).Infof("secret changed: %s %v", event.Type, secret.Name) switch event.Type { case watch.Added, watch.Modified: c.updateSecret(secret) case watch.Deleted: c.deleteSecret(secret.ResourceVersion) } } else { glog.V(4).Infof("ignoring secret change with wrong name: %s %v", event.Type, secret.Name) } } } } for { stop, err := runOnce() if stop { return } if err != nil { glog.Warningf("Unexpected error in secret watch, will retry: %v", err) time.Sleep(10 * time.Second) } } }
package amx import ( "encoding/json" "fmt" "testing" "github.com/prebid/openrtb/v19/openrtb2" "github.com/prebid/prebid-server/adapters" "github.com/prebid/prebid-server/config" "github.com/prebid/prebid-server/openrtb_ext" "github.com/stretchr/testify/assert" "github.com/prebid/prebid-server/adapters/adapterstest" ) const ( amxTestEndpoint = "http://pbs-dev.amxrtb.com/auction/openrtb" sampleVastADM = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?><VAST version=\"2.0\"><Ad id=\"128a6.44d74.46b3\"><InLine><Error><![CDATA[http://example.net/hbx/verr?e=]]></Error><Impression><![CDATA[http://example.net/hbx/vimp?lid=test&aid=testapp]]></Impression><Creatives><Creative sequence=\"1\"><Linear><Duration>00:00:15</Duration><TrackingEvents><Tracking event=\"firstQuartile\"><![CDATA[https://example.com?event=first_quartile]]></Tracking></TrackingEvents><VideoClicks><ClickThrough><![CDATA[http://example.com]]></ClickThrough></VideoClicks><MediaFiles><MediaFile delivery=\"progressive\" width=\"16\" height=\"9\" type=\"video/mp4\" bitrate=\"800\"><![CDATA[https://example.com/media.mp4]]></MediaFile></MediaFiles></Linear></Creative></Creatives></InLine></Ad></VAST>" sampleDisplayADM = "<img src='https://example.com/300x250.png' height='250' width='300'/>" ) func TestJsonSamples(t *testing.T) { bidder, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: amxTestEndpoint}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"}) if buildErr != nil { t.Fatalf("Builder returned unexpected error %v", buildErr) } adapterstest.RunJSONBidderTest(t, "amxtest", bidder) } func TestEndpointMalformed(t *testing.T) { _, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: " http://leading.space.is.invalid"}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"}) assert.Error(t, buildErr) } func TestEndpointQueryStringMalformed(t *testing.T) { _, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: "http://invalid.query.from.go.docs/page?%gh&%ij"}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"}) assert.Error(t, buildErr) } func TestMakeRequestsTagID(t *testing.T) { var w, h int = 300, 250 var width, height int64 = int64(w), int64(h) bidder, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: amxTestEndpoint}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"}) if buildErr != nil { t.Fatalf("Builder returned unexpected error %v", buildErr) } type testCase struct { tagID string extAdUnitID string expectedTagID string blankNil bool } tests := []testCase{ {tagID: "tag-id", extAdUnitID: "ext.adUnitID", expectedTagID: "ext.adUnitID", blankNil: false}, {tagID: "tag-id", extAdUnitID: "", expectedTagID: "tag-id", blankNil: false}, {tagID: "tag-id", extAdUnitID: "", expectedTagID: "tag-id", blankNil: true}, {tagID: "", extAdUnitID: "", expectedTagID: "", blankNil: true}, {tagID: "", extAdUnitID: "", expectedTagID: "", blankNil: false}, {tagID: "", extAdUnitID: "ext.adUnitID", expectedTagID: "ext.adUnitID", blankNil: true}, {tagID: "", extAdUnitID: "ext.adUnitID", expectedTagID: "ext.adUnitID", blankNil: false}, } for _, tc := range tests { imp1 := openrtb2.Imp{ ID: "sample_imp_1", Banner: &openrtb2.Banner{ W: &width, H: &height, Format: []openrtb2.Format{ {W: 300, H: 250}, }, }} if tc.extAdUnitID != "" || !tc.blankNil { imp1.Ext = json.RawMessage( fmt.Sprintf(`{"bidder":{"adUnitId":"%s"}}`, tc.extAdUnitID)) } if tc.tagID != "" || !tc.blankNil { imp1.TagID = tc.tagID } inputRequest := openrtb2.BidRequest{ User: &openrtb2.User{}, Imp: []openrtb2.Imp{imp1}, Site: &openrtb2.Site{}, } actualAdapterRequests, err := bidder.MakeRequests(&inputRequest, &adapters.ExtraRequestInfo{}) assert.Len(t, actualAdapterRequests, 1) assert.Empty(t, err) var body openrtb2.BidRequest assert.Nil(t, json.Unmarshal(actualAdapterRequests[0].Body, &body)) assert.Equal(t, tc.expectedTagID, body.Imp[0].TagID) } } func TestMakeRequestsPublisherId(t *testing.T) { var w, h int = 300, 250 var width, height int64 = int64(w), int64(h) bidder, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: amxTestEndpoint}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"}) if buildErr != nil { t.Fatalf("Builder returned unexpected error %v", buildErr) } type testCase struct { publisherID string extTagID string expectedPublisherID string blankNil bool } tests := []testCase{ {publisherID: "publisher.id", extTagID: "ext.tagId", expectedPublisherID: "ext.tagId", blankNil: false}, {publisherID: "publisher.id", extTagID: "", expectedPublisherID: "publisher.id", blankNil: false}, {publisherID: "", extTagID: "ext.tagId", expectedPublisherID: "ext.tagId", blankNil: false}, {publisherID: "", extTagID: "ext.tagId", expectedPublisherID: "ext.tagId", blankNil: true}, {publisherID: "publisher.id", extTagID: "", expectedPublisherID: "publisher.id", blankNil: false}, {publisherID: "publisher.id", extTagID: "", expectedPublisherID: "publisher.id", blankNil: true}, } for _, tc := range tests { imp1 := openrtb2.Imp{ ID: "sample_imp_1", Banner: &openrtb2.Banner{ W: &width, H: &height, Format: []openrtb2.Format{ {W: 300, H: 250}, }, }} if tc.extTagID != "" || !tc.blankNil { imp1.Ext = json.RawMessage( fmt.Sprintf(`{"bidder":{"tagId":"%s"}}`, tc.extTagID)) } inputRequest := openrtb2.BidRequest{ User: &openrtb2.User{ID: "example_user_id"}, Imp: []openrtb2.Imp{imp1}, Site: &openrtb2.Site{}, ID: "1234", } if tc.publisherID != "" || !tc.blankNil { inputRequest.Site.Publisher = &openrtb2.Publisher{ ID: tc.publisherID, } } actualAdapterRequests, err := bidder.MakeRequests(&inputRequest, &adapters.ExtraRequestInfo{}) assert.Len(t, actualAdapterRequests, 1) assert.Empty(t, err) var body openrtb2.BidRequest assert.Nil(t, json.Unmarshal(actualAdapterRequests[0].Body, &body)) assert.Equal(t, tc.expectedPublisherID, body.Site.Publisher.ID) } } func TestMakeBids(t *testing.T) { bidder, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: amxTestEndpoint}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"}) if buildErr != nil { t.Fatalf("Failed to build bidder: %v", buildErr) } type testCase struct { bidType openrtb_ext.BidType adm string extRaw string valid bool } tests := []testCase{ {openrtb_ext.BidTypeNative, `{"assets":[]}`, `{"ct":10}`, true}, {openrtb_ext.BidTypeBanner, sampleDisplayADM, `{"ct": 1}`, true}, {openrtb_ext.BidTypeBanner, sampleDisplayADM, `{"ct": "invalid"}`, false}, {openrtb_ext.BidTypeBanner, sampleDisplayADM, `{}`, true}, {openrtb_ext.BidTypeVideo, sampleVastADM, `{"startdelay": 1}`, true}, {openrtb_ext.BidTypeBanner, sampleVastADM, `{"ct": 1}`, true}, // the server shouldn't do this } for _, test := range tests { bid := openrtb2.Bid{ AdM: test.adm, Price: 1, Ext: json.RawMessage(test.extRaw), } sb := openrtb2.SeatBid{ Bid: []openrtb2.Bid{bid}, } resp := openrtb2.BidResponse{ SeatBid: []openrtb2.SeatBid{sb}, } respJson, jsonErr := json.Marshal(resp) if jsonErr != nil { t.Fatalf("Failed to serialize test bid %v: %v", test, jsonErr) } bids, errs := bidder.MakeBids(nil, nil, &adapters.ResponseData{ StatusCode: 200, Body: respJson, }) if !test.valid { assert.Len(t, errs, 1) continue } if len(errs) > 0 { t.Fatalf("Failed to make bids: %v", errs) } assert.Len(t, bids.Bids, 1) assert.Equal(t, test.bidType, bids.Bids[0].BidType) } }
package models import ( "context" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "time" ) /***************************************************** Model **********************************************************/ type User struct { ID primitive.ObjectID `json:"id" bson:"_id"` CreatedAt int64 `json:"created_at" bson:"created_at"` DeleteAt int64 `json:"delete_at" bson:"delete_at"` // status字段可自定义,默认0-正常用户、1-删除账户 Status int `json:"status" bson:"status"` Username string `json:"username" bson:"username"` Password string `json:"password" bson:"password"` Telephone string `json:"telephone" bson:"telephone"` Email string `json:"email" bson:"email"` } /***************************************************** Simple *********************************************************/ // 简单封装层,可根据业务需求复杂度选择是否进行简单封装 func UserUpdateOneOfSet(filter bson.M, update bson.M) (*mongo.UpdateResult, error) { res, err := UserUpdateOne(filter, bson.M{"$set": update}, nil) return res, err } /***************************************************** Basic **********************************************************/ func UserInsertOne(msg User) (*mongo.InsertOneResult, error) { res, err := UserColl.InsertOne(context.Background(), bson.M{ "created_at": time.Now().Unix(), "delete_at": time.Now().Unix(), "status": 0, "username": msg.Username, "password": msg.Password, "telephone": msg.Telephone, "email": msg.Email, }) return res, err } func UserFindOne(filter bson.M) (User, error) { var msg User err := UserColl.FindOne(context.Background(), filter).Decode(&msg) return msg, err } func UserFindMany(filter bson.M, options *options.FindOptions) ([]User, error) { ctx := context.Background() cursor, err := UserColl.Find(ctx, filter, options) if err != nil { return nil, err } defer cursor.Close(ctx) var res []User for cursor.Next(ctx) { var temp User if err := cursor.Decode(&temp); err != nil { return nil, err } res = append(res, temp) } return res, nil } func UserUpdateOne(filter bson.M, update bson.M, options *options.UpdateOptions) (*mongo.UpdateResult, error) { res, err := UserColl.UpdateOne(context.Background(), filter, update, options) return res, err } func UserUpdateMany(filter bson.M, update bson.M, options *options.UpdateOptions) (*mongo.UpdateResult, error) { res, err := UserColl.UpdateMany(context.Background(), filter, update, options) return res, err } func UserDeleteOne(filter bson.M) (*mongo.DeleteResult, error) { res, err := UserColl.DeleteOne(context.Background(), filter) return res, err } func UserDeleteMany(filter bson.M) (*mongo.DeleteResult, error) { res, err := UserColl.DeleteMany(context.Background(), filter) return res, err }
package model const ( UserStateEnabled = "enable" UserStateDisabled = "disable" ) type User struct { Model Email string `gorm:"column:email;size:256;uniqueIndex;not null" json:"email"` Name string `gorm:"column:name;size:256;uniqueIndex;not null" json:"name"` EncryptedPassword string `gorm:"column:encrypted_password;size:1024" json:"-"` Avatar string `gorm:"column:avatar;size:256" json:"avatar"` Phone string `gorm:"column:phone;size:256" json:"phone"` PrivateToken string `gorm:"column:private_token;size:256" json:"private_token"` State string `gorm:"type:enum('enable', 'disable');default:'enable'" json:"state"` Location string `gorm:"column:location;size:1024" json:"location"` Bio string `gorm:"column:bio;size:1024" json:"bio"` }
package apm_test import ( "errors" "testing" "github.com/junhwong/goost/apm" ) func TestLog(t *testing.T) { t.Cleanup(apm.Flush) // apm.UseAsyncDispatcher() apm.LogComponent("test") apm.Default(apm.WithFields(apm.LogComponent(""))).Debug("hello") apm.Default().Debug(apm.WrapCallStack(errors.New("hhh"))) }
package parking import ( "strconv" "github.com/boantp/parking_lot/config" ) type Config struct { Id int ParkingLotSlot int } type ParkingLot struct { IdParkingLot int64 SlotNumber int ParkingStatus int } type ParkingCar struct { IdParkingCar int SlotNumber int CarPlateNumber string CarColor string ParkingStatus int DateTime string } func PutConfig(cplValue string) (Config, error) { configData := Config{} i, _ := strconv.Atoi(cplValue) configData.ParkingLotSlot = i // insert values sqlStr := "INSERT INTO config(parking_lot_slot) VALUES (?)" //prepare the statement stmt, err := config.DB.Prepare(sqlStr) checkErr(err) //format all vals at once _, err = stmt.Exec(configData.ParkingLotSlot) checkErr(err) if err != nil { return configData, err } return configData, nil } func PutParkingLot(cplValue string) (ParkingLot, error) { pl := ParkingLot{} i, _ := strconv.Atoi(cplValue) for j := 1; j <= i; j++ { // insert values sqlStr := "INSERT INTO parking_lot(slot_number) VALUES (?)" //prepare the statement stmt, err := config.DB.Prepare(sqlStr) checkErr(err) //format all vals at once res, err := stmt.Exec(j) checkErr(err) id, err := res.LastInsertId() pl.IdParkingLot = id } return pl, nil } func PutParkingCar(slotNumber int, cpn string, cc string) (int, error) { // insert values sqlStr := "INSERT INTO parking_car(slot_number,car_plate_number,car_color,parking_status) VALUES (?,?,?,?)" //prepare the statement stmt, err := config.DB.Prepare(sqlStr) checkErr(err) //format all vals at once _, err = stmt.Exec(slotNumber, cpn, cc, 1) checkErr(err) if err != nil { return slotNumber, err } return slotNumber, err } func UpdateParkingLot(slotNumber int, parkingStatus int) (int, error) { // update stmt, err := config.DB.Prepare("update parking_lot set parking_status=? where slot_number=?") checkErr(err) _, err = stmt.Exec(parkingStatus, slotNumber) checkErr(err) if err != nil { return slotNumber, err } return slotNumber, err } func UpdateParkingCar(parkingCarId int, parkingStatus int) (int, error) { // update stmt, err := config.DB.Prepare("update parking_car set parking_status=? where id_parking_car=?") checkErr(err) _, err = stmt.Exec(parkingStatus, parkingCarId) checkErr(err) if err != nil { return parkingCarId, err } return parkingCarId, err } func OneParkingLot() (ParkingLot, error) { pl := ParkingLot{} // query row := config.DB.QueryRow("SELECT * FROM parking_lot WHERE parking_status = 0 ORDER BY id_parking_lot ASC LIMIT 1") err := row.Scan(&pl.IdParkingLot, &pl.SlotNumber, &pl.ParkingStatus) if err != nil { return pl, err } return pl, nil } func OneParkingCar(slotNumber int) (ParkingCar, error) { pc := ParkingCar{} // query rows, err := config.DB.Query("SELECT * FROM parking_car WHERE parking_status = 1 AND slot_number = ?", slotNumber) checkErr(err) for rows.Next() { err = rows.Scan(&pc.IdParkingCar, &pc.SlotNumber, &pc.CarPlateNumber, &pc.CarColor, &pc.ParkingStatus, &pc.DateTime) checkErr(err) } if err != nil { return pc, err } return pc, nil } func AllParkingCar(key string, value string) ([]ParkingCar, error) { var where string if key == "color" { where = " AND car_color='" + value + "'" } else if key == "registration_number" { where = " AND car_plate_number='" + value + "'" } else if key == "default" { where = "" } //query query := "SELECT * FROM parking_car where parking_status=1" + where rows, err := config.DB.Query(query) if err != nil { return nil, err } defer rows.Close() cars := make([]ParkingCar, 0) for rows.Next() { car := ParkingCar{} err := rows.Scan(&car.IdParkingCar, &car.SlotNumber, &car.CarPlateNumber, &car.CarColor, &car.ParkingStatus, &car.DateTime) // order matters if err != nil { return nil, err } cars = append(cars, car) } if err = rows.Err(); err != nil { return nil, err } return cars, nil } func TruncateTable() { // query _, err := config.DB.Query("TRUNCATE TABLE config") checkErr(err) _, err = config.DB.Query("TRUNCATE TABLE parking_lot") checkErr(err) _, err = config.DB.Query("TRUNCATE TABLE parking_car") checkErr(err) return } func checkErr(err error) { if err != nil { panic(err) } }
package main const PATH_ACIHOME = "/acihome" const PATH_OPT = "/opt" const PATH_BUILDER = "/builder" const PATH_DGR = "/dgr" const PATH_OVERLAY = "/overlay" const PATH_UPPER = "/upper" const PATH_STAGE2 = "/stage2" const PATH_ATTRIBUTES = "/attributes" const PATH_TMP = "/tmp"
package model import ( "fmt" "strconv" "time" ) type Result struct { Name string True bool Time time.Time } func (r Result) ToString() string { return fmt.Sprintf("%s,%s,%s", r.Name, strconv.FormatBool(r.True), r.Time.Format(time.RFC822)) }
package main import ( "fmt" "sort" ) type people []string func (p people) Len() int { return len(p) } func (p people) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p people) Less(i, j int) bool { return p[i] < p[j] } func main() { studyGroup := people{"Zeno", "John", "Al", "Jenny"} fmt.Println(studyGroup) fmt.Println("Ascending..") sort.Sort(studyGroup) fmt.Println(studyGroup) fmt.Println("Descending..") sort.Sort(sort.Reverse(sort.StringSlice(studyGroup))) fmt.Println(studyGroup) s := []string{"Zeno", "John", "Al", "Jenny"} fmt.Println("Sorting a []string using sort.StringSlice") // StringSlice is a type, therefore you can use it to convert // like Type(ToConvert) or string(65) sortedS := sort.StringSlice(s) sort.Sort(sortedS) fmt.Println(sortedS) sort.Sort(sort.Reverse(sortedS)) fmt.Println(sortedS) n := []int{5, 4, 10, 8, 1} fmt.Println("Sorting a []int using sort.IntSlice") sortedN := sort.IntSlice(n) sort.Sort(sortedN) fmt.Println(sortedN) sort.Sort(sort.Reverse(sortedN)) fmt.Println(sortedN) }
package controller import ( "bytes" "fmt" "github.com/astaxie/beego/validation" "github.com/gin-gonic/gin" "dawn_media/conf" "dawn_media/model" "net/http" ) type MyValidator struct { validation.Validation } //v应该为指针,否则是向副本里面加入的参数,而不是向实体 func (v *MyValidator) Size(i interface{}, min int, max int, message ...string) { name := "参数" if len(message) == 1 { name = message[0] } v.MinSize(i, min, "key1").Message(fmt.Sprintf("%s应该大于%d", name, min)) v.MaxSize(i, max, "key2").Message(fmt.Sprintf("%s应该小于%d", name, max)) } func (v *MyValidator) ValidateMedia(media *model.Media) { v.Size(media.Title, 1, 40, "媒体标题") v.Size(media.Introduction, 1, 300, "媒体介绍") } func (v *MyValidator) ValidateUser(user *model.User) { v.AlphaDash(user.Username, "key1").Message("用户名只能包含数字字母-_") v.Size(user.Username, 4, 20, "用户名") v.Size(user.Password, 4, 30, "密码") } func (v *MyValidator) ValidateUserUpdate(user *model.User) { if user.Password != "" { v.Size(user.Password, 4, 30, "密码") } v.Size(user.Nickname, 1, 20, "用户昵称") } func (v *MyValidator) ValidateODUserUpdate(user *model.User) { v.Size(user.Nickname, 1, 20, "用户昵称") } func (v *MyValidator) ValidateODPwdUpdate(o, n, a string) { v.Size(o, 4, 30, "原始密码") v.Size(n, 4, 30, "新密码") v.Size(a, 4, 30, "重复的密码") } /** 在原始返回数据里封装错误和成功消息 */ func h(gh gin.H, c *gin.Context) gin.H { gh["error"] = c.Query("error") gh["success"] = c.Query("success") if authUser, ok := c.Get("authUser"); ok { gh["authUser"] = authUser } gh["avatarMap"] = conf.C().AvatarMap gh["coverMap"] = conf.C().CoverMap gh["mediaMap"] = conf.C().MediaMap return gh } func j(code int, data interface{}, err string) gin.H { return gin.H{ "code": code, "data": data, "error": err, } } /** 重定向 */ func redirect(c *gin.Context, website string, h gin.H) { query := "" buf := bytes.NewBufferString(query) ok := false for k, v := range h { if ok { buf.WriteString("&") } ok = true buf.WriteString(k) buf.WriteString("=") buf.WriteString(v.(string)) } c.Redirect(http.StatusMovedPermanently, website+"?"+buf.String()) } func redirectOK(c *gin.Context, website string, success string) { redirect(c, website, gin.H{ "success": success, }) } func redirectError(c *gin.Context, website string, err string) { redirect(c, website, gin.H{ "error": err, }) } func redirectNotPass(c *gin.Context, website string, v MyValidator) bool { info := "" if v.HasErrors() { for _, err := range v.Errors { info += "<" + err.Error() + ">" } redirectError(c, website, info) return true } return false }
package solutions func gameOfLife(board [][]int) { if len(board) == 0 || len(board[0]) == 0 { return } x, y := len(board), len(board[0]) for i := 0; i < x; i++ { for j := 0; j < y; j++ { lives := 0 lives += isAlive(board, i - 1, j - 1) lives += isAlive(board, i - 1, j) lives += isAlive(board, i - 1, j + 1) lives += isAlive(board, i, j - 1) lives += isAlive(board, i, j + 1) lives += isAlive(board, i + 1, j - 1) lives += isAlive(board, i + 1, j) lives += isAlive(board, i + 1, j + 1) if board[i][j] == 0 { if lives == 3 { board[i][j] = 2 } } else { if lives < 2 || lives > 3 { board[i][j] = 3 } } } } for i := 0; i < x; i++ { for j := 0; j < y; j++ { if board[i][j] == 2 { board[i][j] = 1 } else if board[i][j] == 3 { board[i][j] = 0 } } } } func isAlive(board [][]int, i int, j int) int { if i < 0 || j < 0 || i >= len(board) || j >= len(board[0]) || board[i][j] == 0 || board[i][j] == 2 { return 0 } return 1 }
package version // Version build time set version var Version = "_ci_build_not_run_properly_"
package mysql import ( "errors" "testing" "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/assert" ) func TestConvertMysqlError(t *testing.T) { testCases := []struct { input error returnsSame bool output error }{{ input: nil, output: nil, }, { input: errors.New(`whatever`), returnsSame: true, }, { input: &mysql.MySQLError{ Number: 1205, }, returnsSame: true, }, { input: &mysql.MySQLError{ Number: 1062, }, output: errDuplicateEntry, }} for _, tc := range testCases { actOutput := convertMysqlError(tc.input) if tc.returnsSame { assert.Same(t, tc.input, actOutput) } else { assert.Equal(t, tc.output, actOutput) } } } func TestIsLockWaitTimeout(t *testing.T) { assert.False(t, IsLockWaitTimeout(nil)) assert.False(t, IsLockWaitTimeout(errors.New(`whodathunkit`))) assert.False(t, IsLockWaitTimeout(&mysql.MySQLError{})) assert.True(t, IsLockWaitTimeout(&mysql.MySQLError{ Number: 1205, })) }
package main import ( "context" "flag" "log" "net" epb "github.com/cpjudge/proto/evaluator" spb "github.com/cpjudge/proto/submission" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/testdata" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") certFile = flag.String("cert_file", "", "The TLS cert file") keyFile = flag.String("key_file", "", "The TLS key file") jsonDBFile = flag.String("json_db_file", "", "A json file containing a list of features") serverAddr = flag.String("server_addr", "172.17.0.1:12000", "The server address in the format of host:port") ) type evaluatorServer struct{} func (e *evaluatorServer) EvaluateCode(ctx context.Context, submission *spb.Submission) (*epb.CodeStatus, error) { codeStatus := SubmitCode(submission) evaluateCodeStatus := &epb.CodeStatus{} switch codeStatus.CodeStatus { case spb.SubmissionStatus_TO_BE_EVALUATED: evaluateStatus := EvaluateSubmission(submission.SubmissionId, submission.QuestionId) switch evaluateStatus { case 0: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_CORRECT_ANSWER case 2: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_TIME_LIMIT_EXCEEDED case 3: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_COMPILATION_ERROR case 4: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_RUNTIME_ERROR default: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_WRONG_ANSWER } case spb.SubmissionStatus_TIME_LIMIT_EXCEEDED: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_TIME_LIMIT_EXCEEDED case spb.SubmissionStatus_COMPILATION_ERROR: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_COMPILATION_ERROR default: evaluateCodeStatus.CodeStatus = epb.EvaluationStatus_WRONG_ANSWER } return evaluateCodeStatus, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", *serverAddr) if err != nil { log.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *tls { if *certFile == "" { *certFile = testdata.Path("server1.pem") } if *keyFile == "" { *keyFile = testdata.Path("server1.key") } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { log.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } grpcServer := grpc.NewServer(opts...) epb.RegisterEvaluatorServer(grpcServer, &evaluatorServer{}) grpcServer.Serve(lis) }
package main import ( "code.google.com/p/goprotobuf/proto" "io/ioutil" "log" "mobads_api" "net/http" ) func main() { http.HandleFunc("/api", CallBack) err := http.ListenAndServe(":8124", nil) if err != nil { log.Fatalf("start server fail . err[%s]", err.Error()) } return } func CallBack(resp http.ResponseWriter, req *http.Request) { reqbody, err := ioutil.ReadAll(req.Body) var temp_req mobads_api.BidRequest err = proto.Unmarshal(reqbody, &temp_req) var title string var desc string switch *temp_req.App.Id { case "10042c1f": title = "I am Banner ad title" desc = "I am Banner ad desc" case "10044934": title = "I am kaiping ad title" desc = "I am kaiping ad desc" case "10044933": title = "I am chaping ad title" desc = "I am chaping ad desc" } var temp_ans mobads_api.BidResponse temp_ans.RequestId = new(string) *temp_ans.RequestId = "aaaaaa" temp_ans.Ads = make([]*mobads_api.Ad, 0) var ad *mobads_api.Ad ad = new(mobads_api.Ad) ad.AdslotId = new(string) *ad.AdslotId = "123" ad.MaterialMeta = new(mobads_api.Ad_MaterialMeta) material := ad.MaterialMeta material.CreativeType = new(mobads_api.CreativeType) // *material.CreativeType = mobads_api.CreativeType_IMAGE *material.CreativeType = mobads_api.CreativeType_TEXT material.InteractionType = new(mobads_api.InteractionType) *material.InteractionType = mobads_api.InteractionType_DOWNLOAD material.Title = new(string) *material.Title = title material.Description1 = new(string) *material.Description1 = desc material.WinNoticeUrl = make([]string, 0) material.WinNoticeUrl = append(material.WinNoticeUrl, "http://192.168.1.5:8123/a.gif") material.MediaUrl = new(string) *material.MediaUrl = "http://192.168.1.5:8123/image/splash.png" material.ClickUrl = new(string) *material.ClickUrl = "http://smallkoo.wicp.net:8123/pkg/ditiepaoku_64.apk" temp_ans.Ads = append(temp_ans.Ads, ad) buf, err := proto.Marshal(&temp_ans) if err != nil { log.Printf("serialize fail . err[%s]", err.Error()) } resp.Write(buf) return }
package main import ( "bytes" "crypto/rand" "encoding/hex" "encoding/json" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "time" minio "github.com/minio/minio-go" nats "github.com/nats-io/go-nats" log "github.com/sirupsen/logrus" ) type PubSubEvent struct { ScanID string `json:"scan_id"` AssetType string `json:"asset_type"` AssetValue string `json:"asset_value"` } type TargetInfo struct { AssetType string `json:"asset_type"` AssetValue string `json:"asset_value"` } type CompletionEvent struct { ScanID string `json:"scan_id"` Status string `json:"status"` ToolName string `json:"tool_name"` TargetInfo TargetInfo `json:"target_info"` Path string `json:"path"` } var TOOL_ADAPTER_VERSION string // Injected at build time var COMPLETION_EVENT_ERROR = "Error" var COMPLETION_EVENT_SUCCESS = "Success" func getConfigValue(key string) string { return os.Getenv(key) } func getOutputFilePath() (string, error) { file, err := ioutil.TempFile("", "execTool") if err != nil { log.Warn("Failed to create temporary file: ", err.Error()) return "", err } file.Close() fp, err := filepath.Abs(file.Name()) if err != nil { log.Warn("Failed to temporary file path: ", err.Error()) return "", err } return fp, nil } func randomHex(n int) string { bytes := make([]byte, n) if _, err := rand.Read(bytes); err != nil { return "FAILED-TO-GENERATE-RANDOM-HEX" } return hex.EncodeToString(bytes) } // TODO: Shell escape all values func replaceStrPlaceholders(str string, event *PubSubEvent, outputFilePath string) string { str = strings.Replace(str, "{{SCAN_ID}}", event.ScanID, -1) str = strings.Replace(str, "{{TARGET}}", event.AssetValue, -1) str = strings.Replace(str, "{{OUTPUT_FILE_PATH}}", outputFilePath, -1) str = strings.Replace(str, "{{TIMESTAMP}}", strconv.FormatInt(time.Now().UnixNano(), 10), -1) str = strings.Replace(str, "{{RANDHEX}}", randomHex(8), -1) str = strings.Replace(str, "{{TOOL_NAME}}", getConfigValue("TOOL_NAME"), -1) return str } func getExecTimeout() int { timeout := getConfigValue("TOOL_EXEC_TIMEOUT") if timeout == "" { timeout = "60" } t, err := strconv.Atoi(timeout) if err != nil { return 60 } return t } func printUploadStatus(n int64, err error) { if err != nil { log.Warn("Failed to upload to minio: ", err.Error()) } else { log.Infof("Successfully uploaded to minio: [size %d bytes]", n) } } func minioDeployOutput(event *PubSubEvent, stdOut bytes.Buffer, outputFilePath string) { endpoint := getConfigValue("MINIO_ENDPOINT") accessKeyID := getConfigValue("MINIO_ACCESS_KEY") secretAccessKey := getConfigValue("MINIO_SECRET_KEY") useSSL := false log.Infof("Deploying STDOUT:%d bytes OutputFile:%s", stdOut.Len(), outputFilePath) client, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) if err != nil { log.Warn("Failed to connect to Minio endpoint") sendCompletionEvent(COMPLETION_EVENT_ERROR, event, getConfigValue("TOOL_NAME"), "") return } bucketName := getConfigValue("MINIO_OUTPUT_BUCKET") location := getConfigValue("MINIO_OUTPUT_FILE") // Creating Minio bucket. Here we fail silently if bucket exists err = client.MakeBucket(bucketName, "us-east-1") if err != nil { // TODO: Fail if bucket doesn't exist } // location = strings.Replace(location, "{{SCAN_ID}}", event.ScanID, -1) // location = strings.Replace(location, "{{OUTPUT_EVENT}}", eventName, -1) location = replaceStrPlaceholders(location, event, outputFilePath) log.Info("Writing to Minio: Bucket: ", bucketName, " Location: ", location) contentType := "application/json" if len(getConfigValue("TOOL_CAPTURE_STDOUT")) > 0 { log.Info("Sending stdout to Minio") n, err := client.PutObject(bucketName, location, strings.NewReader(stdOut.String()), -1, minio.PutObjectOptions{ContentType: contentType}) printUploadStatus(n, err) } else { log.Info("Sending output file to Minio") n, err := client.FPutObject(bucketName, location, outputFilePath, minio.PutObjectOptions{ContentType: contentType}) printUploadStatus(n, err) } if err != nil { log.Warn("Sending error completion event") sendCompletionEvent(COMPLETION_EVENT_ERROR, event, getConfigValue("TOOL_NAME"), "") } else { log.Info("Sending success completion event") sendCompletionEvent(COMPLETION_EVENT_SUCCESS, event, getConfigValue("TOOL_NAME"), location) } } func deployOutput(event *PubSubEvent, stdout bytes.Buffer, outputFilePath string) { minioDeployOutput(event, stdout, outputFilePath) } func sendCompletionEvent(status string, event *PubSubEvent, toolName string, persistentFilePath string) { completionEvent := new(CompletionEvent) completionEventTopic := getConfigValue("TOOL_COMPLETION_EVENT_TOPIC") if len(completionEventTopic) == 0 { log.Warn("Completion event topic is not defined") return } completionEvent.ScanID = event.ScanID completionEvent.Status = status completionEvent.ToolName = toolName completionEvent.TargetInfo.AssetType = event.AssetType completionEvent.TargetInfo.AssetValue = event.AssetValue if status == COMPLETION_EVENT_SUCCESS { completionEvent.Path = persistentFilePath } jsonEv, err := json.Marshal(completionEvent) if err != nil { log.Warn("Failed to generate JSON from completion event") return } nc, err := nats.Connect(getConfigValue("NATS_URL")) if err != nil { log.Warn("Failed to connect to NATS") return } nc.Publish(completionEventTopic, jsonEv) log.Info("Published completion event to NATS") nc.Close() } func execToolAndGetOutput(event *PubSubEvent) { log.Info("Executing external tool on PubSub event") var err error execPattern := getConfigValue("TOOL_EXEC_PATTERN") outputFilePath := getConfigValue("TOOL_USE_OUTPUT_FILE_PATH") if len(outputFilePath) == 0 { outputFilePath, err = getOutputFilePath() if err != nil { log.Warn("Failed to generated output file path: ", err.Error()) return } } // TODO: Shell escape this string // targetStr := event.AssetValues // execPattern = strings.Replace(execPattern, "{{TARGET}}", targetStr, -1) // execPattern = strings.Replace(execPattern, "{{OUTPUT_FILE_PATH}}", outputFilePath, -1) execPattern = replaceStrPlaceholders(execPattern, event, outputFilePath) log.Info("Running exec pattern: ", execPattern) // We need this to be able to pipe shell commands cmd := exec.Command("sh", "-c", execPattern) var stdOut bytes.Buffer var stdErr bytes.Buffer cmd.Stdout = &stdOut cmd.Stderr = &stdErr err = cmd.Start() done := make(chan error) go func() { done <- cmd.Wait() }() timeout := time.After(time.Duration(getExecTimeout()) * time.Second) select { case <-timeout: cmd.Process.Kill() log.Warn("Command execution timed out!") case err := <-done: if err != nil { log.Warn("Non-zero exit code from command: ", err.Error()) log.Info("STDOUT: ") log.Info(stdOut.String()) log.Info("STDERR: ") log.Info(stdErr.String()) log.Info("Sending error completion event") sendCompletionEvent(COMPLETION_EVENT_ERROR, event, getConfigValue("TOOL_NAME"), "") } else { log.Info("Command execution finished successfully") log.Info("STDOUT: ") log.Info(stdOut.String()) log.Info("STDERR: ") log.Info(stdErr.String()) deployOutput(event, stdOut, outputFilePath) } } } func handleNatsEvent(m *nats.Msg) { log.Info("Received a message: ", string(m.Data)) var event PubSubEvent err := json.Unmarshal(m.Data, &event) if err != nil { log.Warn("Error JSON decoding message: ", err.Error()) return } if (event.ScanID == "") || (event.AssetType == "") || (event.AssetValue == "") { log.Warn("Input JSON schema is incorrect") return } execToolAndGetOutput(&event) } func displayBanner() { log.Info("Tool Adapter version: ", TOOL_ADAPTER_VERSION, " running..") } func startConsumer() { log.Info("Starting consumer loop") nc, err := nats.Connect(getConfigValue("NATS_URL"), nats.DisconnectHandler(func(c *nats.Conn) { log.Warn("NATS connection lost") }), nats.ReconnectHandler(func(c *nats.Conn) { log.Info("Re-established connection with NATS server") }), nats.ClosedHandler(func(c *nats.Conn) { log.Fatal("Connection closed with NATS server") }), nats.MaxReconnects(5), nats.ReconnectWait(10*time.Second)) if err != nil { log.Fatal("Failed to connect NATS: ", err.Error()) return } queueGroupName := getConfigValue("NATS_QUEUE_GROUP_NAME") if len(queueGroupName) > 0 { log.Infof("Using queue subscription with group: %s", queueGroupName) nc.QueueSubscribe(getConfigValue("NATS_CONSUMER_TOPIC"), queueGroupName, func(m *nats.Msg) { handleNatsEvent(m) }) } else { log.Info("Using topic subscription") nc.Subscribe(getConfigValue("NATS_CONSUMER_TOPIC"), func(m *nats.Msg) { handleNatsEvent(m) }) } nc.Flush() runtime.Goexit() // Blocking } func main() { loggerInit() displayBanner() startConsumer() } func loggerInit() { formatter := &log.JSONFormatter{ FieldMap: log.FieldMap{ log.FieldKeyTime: "timestamp", log.FieldKeyLevel: "severity", log.FieldKeyMsg: "message", }, } log.SetFormatter(formatter) log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) }
package minicon import ( "github.com/nsf/termbox-go" // termbox; for event handling ) // // Helper to processes input events to various useful ends. // type InputHandler struct { prompt *Prompt // input adds to the user's buffered, pending command box *TermBox // input displays on the screen teletype *Teletype // input can advance the speed of the teletype history *History // input can add and cycle through history marker HistoryMarker // restores history if the user cycles through history } // // Every unqiue user entry needs its own unique InputHandler(). // func NewInputHandler( input *Prompt, box *TermBox, teletype *Teletype, history *History, ) *InputHandler { return &InputHandler{input, box, teletype, history, history.Mark()} } // // Return history to most recent moment in time. ( In case, via user input, the user cycled through old commands. ) // func (this *InputHandler) RestoreHistory() { this.history.Restore(this.marker) } // // Process a single terminal event: // read user input, add characters to the prompt, cycle through history. // Returns the user's command if they typed text and pressed enter; empty string otherwise. // Returns `true` if the terminal screen needs a refresh ( ex. a resize event was detected. ) // func (this *InputHandler) HandleTermEvent(evt termbox.Event, ) (userInput string, refresh bool, ) { prompt, box := this.prompt, this.box switch evt.Type { case termbox.EventKey: if evt.Ch != 0 { prompt.WriteRune(box, evt.Ch).Flush() } else { switch evt.Key { case termbox.KeyDelete, termbox.KeyBackspace, termbox.KeyBackspace2: prompt.DeleteRune(box).Flush() // history: case termbox.KeyArrowUp: // if the items back in time have been exhausted, str is blank. // don't clear the input, stick to that last item. if str, ok := this.history.Back(); ok { prompt.SetInput(box, str).Flush() } case termbox.KeyArrowDown: // if the items forward have been exhausted, str is blank. // it's okay, good even, to restore the input to a blank string str, _ := this.history.Forward() prompt.SetInput(box, str).Flush() // got valid input case termbox.KeyEnter: userInput = prompt.Clear() if userInput == "" { userInput = " " // to distinguish between no input at all. } else { this.marker = this.history.Add(userInput, this.marker) } case termbox.KeySpace: prompt.WriteRune(box, SpaceRune).Flush() case termbox.KeyEsc, termbox.KeyTab: this.teletype.NextSpeed() } } case termbox.EventResize: refresh = true //case termbox.EventError, termbox.EventInterrupt: // break WaitLoop //case termbox.EventMouse: //case termbox.EventRaw: } return userInput, refresh }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package execgen import ( "fmt" "go/token" "github.com/cockroachdb/errors" "github.com/dave/dst" "github.com/dave/dst/dstutil" ) // inlineFuncs takes an input file's contents and inlines all functions // annotated with // execgen:inline into their callsites via AST manipulation. func inlineFuncs(f *dst.File) { // First, run over the input file, searching for functions that are annotated // with execgen:inline. inlineFuncMap := extractInlineFuncDecls(f) // Do a second pass over the AST, this time replacing calls to the inlined // functions with the inlined function itself. inlineFunc(inlineFuncMap, f) } func inlineFunc(inlineFuncMap map[string]funcInfo, n dst.Node) dst.Node { var funcIdx int return dstutil.Apply(n, func(cursor *dstutil.Cursor) bool { cursor.Index() n := cursor.Node() // There are two cases. AssignStmt, which are like: // a = foo() // and ExprStmt, which are simply: // foo() // AssignStmts need to do extra work for inlining, because we have to // simulate producing return values. switch n := n.(type) { case *dst.AssignStmt: // Search for assignment function call: // a = foo() callExpr, ok := n.Rhs[0].(*dst.CallExpr) if !ok { return true } funcInfo := getInlinedFunc(inlineFuncMap, callExpr) if funcInfo == nil { return true } // We want to recurse here because funcInfo itself might have calls to // inlined functions. funcInfo.decl = inlineFunc(inlineFuncMap, funcInfo.decl).(*dst.FuncDecl) if len(n.Rhs) > 1 { panic("can't do template replacement with more than a single RHS to a CallExpr") } if n.Tok == token.DEFINE { // We need to put a variable declaration for the new defined variables // in the parent scope. newDefinitions := &dst.GenDecl{ Tok: token.VAR, Specs: make([]dst.Spec, len(n.Lhs)), } for i, e := range n.Lhs { // If we had foo, bar := thingToInline(), we'd get // var ( // foo int // bar int // ) newDefinitions.Specs[i] = &dst.ValueSpec{ Names: []*dst.Ident{dst.NewIdent(e.(*dst.Ident).Name)}, Type: dst.Clone(funcInfo.decl.Type.Results.List[i].Type).(dst.Expr), } } cursor.InsertBefore(&dst.DeclStmt{Decl: newDefinitions}) } // Now we've got a callExpr. We need to inline the function call, and // convert the result into the assignment variable. decl := funcInfo.decl // Produce declarations for each return value of the function to inline. retValDeclStmt, retValNames := extractReturnValues(decl) // inlinedStatements is a BlockStmt (a set of statements within curly // braces) that contains the entirety of the statements that result from // inlining the call. We make this a BlockStmt to avoid issues with // variable shadowing. // The first thing that goes in the BlockStmt is the ret val declarations. // When we're done, the BlockStmt for a statement // a, b = foo(x, y) // where foo was defined as // func foo(b string, c string) { ... } // will look like: // { // var ( // __retval_0 bool // __retval_1 int // ) // ... // { // b := x // c := y // ... the contents of func foo() except its return ... // { // // If foo() had `return true, j`, we'll generate the code: // __retval_0 = true // __retval_1 = j // } // } // a = __retval_0 // b = __retval_1 // } inlinedStatements := &dst.BlockStmt{ List: []dst.Stmt{retValDeclStmt}, } body := dst.Clone(decl.Body).(*dst.BlockStmt) // Replace return statements with assignments to the return values. // Make a copy of the function to inline, and walk through it, replacing // return statements at the end of the body with assignments to the return // value declarations we made first. body = replaceReturnStatements(decl.Name.Name, funcIdx, body, func(stmt *dst.ReturnStmt) dst.Stmt { returnAssignmentSpecs := make([]dst.Stmt, len(retValNames)) for i := range retValNames { returnAssignmentSpecs[i] = &dst.AssignStmt{ Lhs: []dst.Expr{dst.NewIdent(retValNames[i])}, Tok: token.ASSIGN, Rhs: []dst.Expr{stmt.Results[i]}, } } // Replace the return with the new assignments. return &dst.BlockStmt{List: returnAssignmentSpecs} }) // Reassign input parameters to formal parameters. reassignmentStmt := getFormalParamReassignments(decl, callExpr) inlinedStatements.List = append(inlinedStatements.List, &dst.BlockStmt{ List: append([]dst.Stmt{reassignmentStmt}, body.List...), }) // Assign mangled return values to the original assignment variables. newAssignment := dst.Clone(n).(*dst.AssignStmt) newAssignment.Tok = token.ASSIGN newAssignment.Rhs = make([]dst.Expr, len(retValNames)) for i := range retValNames { newAssignment.Rhs[i] = dst.NewIdent(retValNames[i]) } inlinedStatements.List = append(inlinedStatements.List, newAssignment) cursor.Replace(inlinedStatements) case *dst.ExprStmt: // Search for raw function call: // foo() callExpr, ok := n.X.(*dst.CallExpr) if !ok { return true } funcInfo := getInlinedFunc(inlineFuncMap, callExpr) if funcInfo == nil { return true } // We want to recurse here because funcInfo itself might have calls to // inlined functions. funcInfo.decl = inlineFunc(inlineFuncMap, funcInfo.decl).(*dst.FuncDecl) decl := funcInfo.decl reassignments := getFormalParamReassignments(decl, callExpr) // This case is simpler than the AssignStmt case. It's identical, except // there is no mangled return value name block, nor re-assignment to // the mangled returns after the inlined function. funcBlock := &dst.BlockStmt{ List: []dst.Stmt{reassignments}, } body := dst.Clone(decl.Body).(*dst.BlockStmt) // Remove return values if there are any, since we're ignoring returns // as a raw function call. body = replaceReturnStatements(decl.Name.Name, funcIdx, body, nil) // Add the inlined function body to the block. funcBlock.List = append(funcBlock.List, body.List...) cursor.Replace(funcBlock) default: return true } funcIdx++ return true }, nil) } // extractInlineFuncDecls searches the input file for functions that are // annotated with execgen:inline, extracts them into templateFuncMap, and // deletes them from the AST. func extractInlineFuncDecls(f *dst.File) map[string]funcInfo { ret := make(map[string]funcInfo) dstutil.Apply(f, func(cursor *dstutil.Cursor) bool { n := cursor.Node() switch n := n.(type) { case *dst.FuncDecl: var mustInline bool for _, dec := range n.Decorations().Start.All() { if dec == "// execgen:inline" { mustInline = true } } if !mustInline { // Nothing to do, but recurse further. return true } for _, p := range n.Type.Params.List { if len(p.Names) > 1 { // If we have a definition like this: // func a (a, b int) int // We're just giving up for now out of complete laziness. panic("can't currently deal with multiple names per type in decls") } } var info funcInfo info.decl = dst.Clone(n).(*dst.FuncDecl) // Store the function in a map. ret[n.Name.Name] = info // Replace the function textually with a fake constant, such as: // `const _ = "inlined_blahFunc"`. We do this instead // of completely deleting it to prevent "important comments" above the // function to be deleted, such as template comments like {{end}}. This // is kind of a quirk of the way the comments are parsed, but nonetheless // this is an easy fix so we'll leave it for now. cursor.Replace(&dst.GenDecl{ Tok: token.CONST, Specs: []dst.Spec{ &dst.ValueSpec{ Names: []*dst.Ident{dst.NewIdent("_")}, Values: []dst.Expr{ &dst.BasicLit{ Kind: token.STRING, Value: fmt.Sprintf(`"inlined_%s"`, n.Name.Name), }, }, }, }, Decs: dst.GenDeclDecorations{ NodeDecs: n.Decs.NodeDecs, }, }) return false } return true }, nil) return ret } // extractReturnValues generates return value variables. It will produce one // statement per return value of the input FuncDecl. For example, for // a FuncDecl that returns two boolean arguments, lastVal and lastValNull, // two statements will be returned: // var __retval_lastVal bool // var __retval_lastValNull bool // The second return is a slice of the names of each of the mangled return // declarations, in this example, __retval_lastVal and __retval_lastValNull. func extractReturnValues(decl *dst.FuncDecl) (retValDeclStmt dst.Stmt, retValNames []string) { if decl.Type.Results == nil { return &dst.EmptyStmt{}, nil } results := decl.Type.Results.List retValNames = make([]string, len(results)) specs := make([]dst.Spec, len(results)) for i, result := range results { var retvalName string // Make a mangled name. if len(result.Names) == 0 { retvalName = fmt.Sprintf("__retval_%d", i) } else { retvalName = fmt.Sprintf("__retval_%s", result.Names[0]) } retValNames[i] = retvalName specs[i] = &dst.ValueSpec{ Names: []*dst.Ident{dst.NewIdent(retvalName)}, Type: dst.Clone(result.Type).(dst.Expr), } } return &dst.DeclStmt{ Decl: &dst.GenDecl{ Tok: token.VAR, Specs: specs, }, }, retValNames } // getFormalParamReassignments creates a new DEFINE (:=) statement per parameter // to a FuncDecl, which makes a fresh variable with the same name as the formal // parameter name and assigns it to the corresponding name in the CallExpr. // // For example, given a FuncDecl: // // func foo(a int, b string) { ... } // // and a CallExpr // // foo(x, y) // // we'll return the statement: // // var ( // a int = x // b string = y // ) // // In the case where the formal parameter name is the same as the input // parameter name, no extra assignment is created. func getFormalParamReassignments(decl *dst.FuncDecl, callExpr *dst.CallExpr) dst.Stmt { formalParams := decl.Type.Params.List reassignmentSpecs := make([]dst.Spec, 0, len(formalParams)) for i, formalParam := range formalParams { if inputIdent, ok := callExpr.Args[i].(*dst.Ident); ok && inputIdent.Name == formalParam.Names[0].Name { continue } reassignmentSpecs = append(reassignmentSpecs, &dst.ValueSpec{ Names: []*dst.Ident{dst.NewIdent(formalParam.Names[0].Name)}, Type: dst.Clone(formalParam.Type).(dst.Expr), Values: []dst.Expr{callExpr.Args[i]}, }) } if len(reassignmentSpecs) == 0 { return &dst.EmptyStmt{} } return &dst.DeclStmt{ Decl: &dst.GenDecl{ Tok: token.VAR, Specs: reassignmentSpecs, }, } } // replaceReturnStatements edits the input BlockStmt, from the function funcName, // replacing ReturnStmts at the end of the BlockStmts with the results of // applying returnEditor on the ReturnStmt or deleting them if the modifier is // nil. // It will panic if any return statements are not in the final position of the // input block. func replaceReturnStatements( funcName string, funcIdx int, stmt *dst.BlockStmt, returnModifier func(*dst.ReturnStmt) dst.Stmt, ) *dst.BlockStmt { if len(stmt.List) == 0 { return stmt } // Insert an explicit return at the end if there isn't one. // We'll need to edit this later to make early returns work properly. lastStmt := stmt.List[len(stmt.List)-1] if _, ok := lastStmt.(*dst.ReturnStmt); !ok { ret := &dst.ReturnStmt{} stmt.List = append(stmt.List, ret) lastStmt = ret } retStmt := lastStmt.(*dst.ReturnStmt) if returnModifier == nil { stmt.List[len(stmt.List)-1] = &dst.EmptyStmt{} } else { stmt.List[len(stmt.List)-1] = returnModifier(retStmt) } label := dst.NewIdent(fmt.Sprintf("%s_return_%d", funcName, funcIdx)) // Find returns that weren't at the end of the function and replace them with // labeled gotos. var foundInlineReturn bool stmt = dstutil.Apply(stmt, func(cursor *dstutil.Cursor) bool { n := cursor.Node() switch n := n.(type) { case *dst.FuncLit: // A FuncLit is a function literal, like: // x := func() int { return 3 } // We don't recurse into function literals since the return statements // they contain aren't relevant to the inliner. return false case *dst.ReturnStmt: foundInlineReturn = true gotoStmt := &dst.BranchStmt{ Tok: token.GOTO, Label: dst.Clone(label).(*dst.Ident), } if returnModifier != nil { cursor.Replace(returnModifier(n)) cursor.InsertAfter(gotoStmt) } else { cursor.Replace(gotoStmt) } return false } return true }, nil).(*dst.BlockStmt) if foundInlineReturn { // Add the label at the end. stmt.List = append(stmt.List, &dst.LabeledStmt{ Label: label, Stmt: &dst.EmptyStmt{Implicit: true}, }) } return stmt } // getInlinedFunc returns the corresponding FuncDecl for a CallExpr from the // map, using the CallExpr's name to look up the FuncDecl from templateFuncs. func getInlinedFunc(templateFuncs map[string]funcInfo, n *dst.CallExpr) *funcInfo { ident, ok := n.Fun.(*dst.Ident) if !ok { return nil } info, ok := templateFuncs[ident.Name] if !ok { return nil } decl := info.decl if decl.Type.Params.NumFields()+len(info.templateParams) != len(n.Args) { panic(errors.Newf( "%s expected %d arguments, found %d", decl.Name, decl.Type.Params.NumFields(), len(n.Args)), ) } return &info }
package kontena import ( "fmt" "github.com/inloop/goclitools" "github.com/jakubknejzlik/kontena-git-cli/model" "github.com/jakubknejzlik/kontena-git-cli/utils" ) // RegistryExists ... func (c *Client) RegistryExists(name string) (bool, error) { registries, err := c.RegistryList() if err != nil { return false, err } return utils.ArrayOfStringsContains(registries, name), nil } // RegistryExistsInGrid ... func (c *Client) RegistryExistsInGrid(grid, name string) (bool, error) { registries, err := c.RegistryListInGrid(grid) if err != nil { return false, err } return utils.ArrayOfStringsContains(registries, name), nil } // RegistryAdd ... func (c *Client) RegistryAdd(registry model.Registry) error { cmd := fmt.Sprintf("kontena external-registry add --username %s --password %s --email %s https://%s/v2/", registry.User, registry.Password, registry.Email, registry.Name) return goclitools.RunInteractive(cmd) } // RegistryAddToGrid ... func (c *Client) RegistryAddToGrid(grid string, registry model.Registry) error { cmd := fmt.Sprintf("kontena external-registry add --grid %s --username %s --password %s --email %s https://%s/v2/", grid, registry.User, registry.Password, registry.Email, registry.Name) return goclitools.RunInteractive(cmd) } // RegistryRemove ... func (c *Client) RegistryRemove(name string) error { cmd := fmt.Sprintf("kontena external-registry rm --force %s", name) return goclitools.RunInteractive(cmd) } // RegistryRemoveFromGrid ... func (c *Client) RegistryRemoveFromGrid(grid, name string) error { cmd := fmt.Sprintf("kontena external-registry rm --grid %s --force %s", grid, name) return goclitools.RunInteractive(cmd) } // RegistryList ... func (c *Client) RegistryList() ([]string, error) { res, err := goclitools.Run("kontena external-registry ls | awk 'FNR>1{printf \"%s \",$1}'") if err != nil { return nil, err } return utils.SplitString(string(res), " "), nil } // RegistryListInGrid ... func (c *Client) RegistryListInGrid(grid string) ([]string, error) { res, err := goclitools.Run(fmt.Sprintf("kontena external-registry ls --grid %s | awk 'FNR>1{printf \"%%s \",$1}'", grid)) if err != nil { return nil, err } return utils.SplitString(string(res), " "), nil }
// Package stringUtils : from https://github.com/elgs/gostrgen package stringUtils import ( "errors" "math/rand" "strings" "time" ) // Lower permet d'indiquer si l'on souhaite utiliser les lettres minuscules var Lower = 1 << 0 // Upper permet d'indiquer si l'on souhaite utiliser les lettres majuscules var Upper = 1 << 1 // Digit permet d'indiquer si l'on souhaite utiliser les chiffre var Digit = 1 << 2 // Punct permet d'indiquer si l'on souhaite utiliser la ponctuation var Punct = 1 << 3 // LowerUpper : indique l'on souhaite utiliser les lettres minucules et majuscules (raccourcis pour Lower | Upper) var LowerUpper = Lower | Upper // LowerDigit : indique l'on souhaite utiliser les lettres minucules et les chiffres (raccourcis pour Lower | Digit) var LowerDigit = Lower | Digit // UpperDigit indique l'on souhaite utiliser les lettres majuscules et les chiffres (raccourcis pour Upper | Digit) var UpperDigit = Upper | Digit // LowerUpperDigit indique que l'on souhaite utiliser les chiffres et les lettres minucules et majuscules (raccourcis pour LowerUpper | Digit) var LowerUpperDigit = LowerUpper | Digit // All : indique que l'on souhaites utilisers les lettres minucules et majuscules ainsi que les chiffres et la ponctuation var All = LowerUpperDigit | Punct var lower = "abcdefghijklmnopqrstuvwxyz" var upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" var digit = "0123456789" var punct = "~!@#$%^&*()_+-=" func init() { rand.Seed(time.Now().UTC().UnixNano()) } // RandGen : Génération d'une chaîne alléatoire. // size : taille de la chaine finale // set : caractère à utiliser, en fonction des "set" prédéfinie ci dessus, c'est un "ou" binaire entre chaque // includes (string) : caractère supplémentaire à inclure (qui ne sont pas dans les set existants) // exclude (string) caractère à exclure (par exemple o majuscule pour pas confondre avec zéro, ou l minuscule pour pas confondre avec un) func RandGen(size int, set int, include string, exclude string) (string, error) { all := include if set&Lower > 0 { all += lower } if set&Upper > 0 { all += upper } if set&Digit > 0 { all += digit } if set&Punct > 0 { all += punct } lenAll := len(all) if len(exclude) >= lenAll { return "", errors.New("Too much to exclude.") } buf := make([]byte, size) for i := 0; i < size; i++ { b := all[rand.Intn(lenAll)] if strings.Contains(exclude, string(b)) { i-- continue } buf[i] = b } return string(buf), nil }
package producer import ( "asyncMessageSystem/app/common" "asyncMessageSystem/app/middleware/log" "asyncMessageSystem/app/model" "encoding/json" "github.com/Braveheart7854/rabbitmqPool" //"asyncMessageSystem/app/middleware/rabbitmqPool" "github.com/kataras/iris" "strconv" "time" ) type Produce struct {} type Producer interface { Notify(ctx iris.Context) Read(ctx iris.Context) } //const ( // EXCHANGE_NOTICE = "exchange_wxforum_notice" // ROUTE_NOTICE = "route_wxforum_notice" //) type Notice struct { Uid uint64 `json:"uid"` Type int `json:"type"` Data string `json:"data"` CreateTime string `json:"createTime"` } type ReturnJson struct { Code int `json:"code"` Msg string `json:"msg"` Data map[string]interface{} `json:"data"` } /** 新增消息 */ func (P *Produce) Notify(ctx iris.Context) { uid := ctx.PostValueInt64Default("uid",0) n_type := ctx.PostValueIntDefault("type",0) data := ctx.PostValueDefault("data","") createTime := ctx.PostValueDefault("time",time.Now().Format("2006-01-02 15:04:05")) var noticeData Notice noticeData.Uid = uint64(uid) noticeData.Type = n_type noticeData.Data = data noticeData.CreateTime = createTime //common.Log("./log1.txt",data) go func() { _,err := rabbitmqPool.AmqpServer.PutIntoQueue(common.ExchangeNameNotice,common.RouteKeyNotice,noticeData) if err != nil { info := map[string]interface{}{"msg":noticeData,"error":err.Error()} strInfo,_ := json.Marshal(info) //common.Log("./notice_retry.log",string(strInfo)) log.NotifyLogger.Info(string(strInfo)) } }() //common.Log("./log2.txt",data) //log.Printf("%d %d %s",uid,n_type,data) ctx.JSON(ReturnJson{Code:10000,Msg:"success",Data: map[string]interface{}{"uid":uid,"type":n_type,"data":data}}) return } /** 消息标记为已读 */ func (P *Produce) Read(ctx iris.Context) { uid := ctx.PostValueInt64Default("uid",0) n_type := ctx.PostValueIntDefault("type",common.TYPE_LIKE) data := ctx.PostValueDefault("data","") createTime := ctx.PostValueDefault("time",time.Now().Format("2006-01-02 15:04:05")) var noticeData Notice noticeData.Uid = uint64(uid) noticeData.Type = n_type noticeData.Data = data noticeData.CreateTime = createTime go func() { _,err := rabbitmqPool.AmqpServer.PutIntoQueue(common.ExchangeNameRead,common.RouteKeyRead,noticeData) if err != nil { info := map[string]interface{}{"msg":noticeData,"error":err.Error()} strInfo,_ := json.Marshal(info) //common.Log("./read_retry.log",string(strInfo)) log.ReadLogger.Info(string(strInfo)) } }() //log.Printf("%d %d %s",uid,n_type,data) _,_ = ctx.JSON(ReturnJson{Code:10000,Msg:"success",Data: map[string]interface{}{"uid":uid,"type":n_type,"data":data}}) return } /** 消息列表 */ func (P *Produce) List(ctx iris.Context){ uid,_ := strconv.ParseUint(ctx.FormValueDefault("uid","0"),10,64) typ,_ := strconv.Atoi(ctx.FormValueDefault("type","0")) page,_ := strconv.Atoi(ctx.FormValueDefault("page","1")) NoticeModel := new(model.Notice) list := NoticeModel.GetListByUid(uid,typ,page) unread := NoticeModel.CountUnReadByUid(uid,typ) _,_ = ctx.JSON(ReturnJson{Code:10000,Msg:"success",Data: map[string]interface{}{"list":list,"unread":unread}}) return }
package googl import ( "fmt" "github.com/parnurzeal/gorequest" ) type Googl struct { Key string } type ShortMsg struct { Kind string `json:"kind"` Id string `json:"id"` LongUrl string `json:"longUrl"` } type LongMsg struct { Kind string `json:"kind"` Id string `json:"id"` LongUrl string `json:"longUrl"` Status string `json:"status"` } func NewClient(key string) *Googl { return &Googl{Key: key} } func (c *Googl) Shorten(url string) string { request := gorequest.New() var response string gUrl := "https://www.googleapis.com/urlshortener/v1/url?key=" + c.Key if c.Key == "" { response = "You need to set the Google Url Shortener API Key" } else if url == "" { response = "You need to set the url to be shortened" } else { resp, body, _ := request.Post(gUrl). Set("Accept", "application/json"). Set("Content-Type", "application/json"). Send(`{"longUrl":"` + url + `"}`).End() if resp.Status == "200 OK" { fmt.Println(body) response = "Done! Ok!" } else { response = "Some error occurred, please try again later" } } return response } func (c *Googl) Expand(shortUrl string) string { request := gorequest.New() var response string gUrl := "https://www.googleapis.com/urlshortener/v1/url?key=" + c.Key + "&shortUrl=" + shortUrl if c.Key == "" { response = "You need to set the Google Url Shortener API Key" } else if shortUrl == "" { response = "You need to set the url to be expanded" } else { resp, body, _ := request.Get(gUrl). Set("Accept", "application/json"). Set("Content-Type", "application/json").End() if resp.Status == "200 OK" { fmt.Println(body) response = "Done! Ok!" } else { response = "Some error occurred, please try again later" } } return response }
package main import ( "fmt" "github.com/mholland/advent-of-code/2019/intcode" ) func main() { gameState := runGame(1) blocks := 0 for _, tile := range gameState { if tile == block { blocks++ } } fmt.Printf("Number of starting blocks: %v\n", blocks) finishedGame := runGame(2) printGame(finishedGame) fmt.Printf("Highscore: %v", finishedGame[pos{-1, 0}]) } func printGame(tiles map[pos]tileType) { for y := 0; y < 25; y++ { for x := 0; x < 42; x++ { switch tiles[pos{x, y}] { case empty: fmt.Print(" ") case wall: fmt.Print("#") case block: fmt.Print("□") case paddle: fmt.Print("_") case ball: fmt.Print("o") } } fmt.Println() } for x := 0; x < 42; x++ { if x%2 == 0 { fmt.Print("~") continue } fmt.Print("-") } fmt.Println() } func runGame(mode int) map[pos]tileType { program := intcode.ReadProgram("input.txt") machine := intcode.NewIntcodeMachine() machine.LoadMemory(program) machine.SetMemoryValue(0, mode) input := 0 for machine.State != intcode.Halted { machine.SetInput(input) machine.RunProgram() ballPos := findTile(mapOutput(machine.GetOutput()), ball) paddlePos := findTile(mapOutput(machine.GetOutput()), paddle) if ballPos.x > paddlePos.x { input = 1 continue } if ballPos.x < paddlePos.x { input = -1 continue } input = 0 } return mapOutput(machine.GetOutput()) } func findTile(gameState map[pos]tileType, tileType tileType) pos { for pos, tt := range gameState { if tt == tileType { return pos } } return pos{-1, -1} } func mapOutput(output []int) map[pos]tileType { gameState := make(map[pos]tileType) for i := 0; i < len(output); i += 3 { x, y, tile := output[i], output[i+1], output[i+2] gameState[pos{x, y}] = tileType(tile) } return gameState } type pos struct { x, y int } type tileType int const ( empty tileType = iota wall block paddle ball )
package main import ( "fmt" "os" "strings" ) func parseCmdLine() { var gtver latest if len(os.Args) > 1 { args := os.Args[1:] switch strings.ToLower(args[0]) { case "refresh": fmt.Println(strFetchRemote) refreshDb() case "installed": if len(args) > 1 { if args[1] == "go" { printInstalled(getInstalled("go"), args[1]) } else if args[1] == "liteide" { printInstalled(getInstalled("liteide"), args[1]) } } else { printInstalled(getInstalled("go"), "go") printInstalled(getInstalled("liteide"), "liteide") } case "ls": if len(args) > 1 { if args[1] == "go" { printVersions("go") } else if args[1] == "liteide" { printVersions("liteide") } } else { printVersions("go") printVersions("liteide") } case "fetch": if len(args) >= 2 { if args[1] == "go" { if len(args) >= 3 { gtver = getLatest("go", args[2], "") fmt.Printf(strDownloadingGo, gtver.ver) } else { gtver = getLatest("go", "", "") fmt.Printf(strDownloadingGo, gtver.ver) } download("go", gtver.ver, gtver.url, gtver.fileName) if !compareHash(gtver.ver, checksum(archivesDir+ps+gtver.fileName)) { removeFile(archivesDir + ps + gtver.fileName) } } else if args[1] == "liteide" { if len(args) >= 3 { fmt.Printf(strDownloadingLiteIDE, args[2]) gtver = getLatest("liteide", args[2], "") if len(args) >= 4 { fmt.Printf(strDownloadingLiteIDEversion, args[2], args[3]) gtver = getLatest("liteide", args[2], args[3]) } } else { gtver = getLatest("liteide", "", "") } fmt.Printf(strDownloading, gtver.ver) download("liteide", gtver.ver, gtver.url, gtver.fileName) } else { usage() } } else if len(args) == 1 { fmt.Println(strPleaseSetTool) } else { usage() } case "install", "i": if len(args) > 1 { if args[1] == "go" { if len(args) >= 3 { gtver = getLatest("go", args[2], "") } else { gtver = getLatest("go", "", "") } fmt.Printf(strDownloadingGo, gtver.ver) download("go", gtver.ver, gtver.url, gtver.fileName) if compareHash(gtver.ver, checksum(archivesDir+ps+gtver.fileName)) { extract(gtver.fileName, gtver.ver) } else { fmt.Printf("%s. %s\n", strChecksumMismatch, strOperationAborted) removeFile(archivesDir + ps + gtver.fileName) } } else if args[1] == "liteide" { if len(args) >= 3 { fmt.Printf(strDownloadingLiteIDE, args[2]) gtver = getLatest("liteide", args[2], "") if len(args) >= 4 { fmt.Printf(strDownloadingLiteIDEversion, args[2], args[3]) gtver = getLatest("liteide", args[2], args[3]) } } else { gtver = getLatest("liteide", "", "") } fmt.Printf(strDownloadingInstalling, gtver.ver) download("liteide", gtver.ver, gtver.url, gtver.fileName) extract(gtver.fileName, gtver.ver) } else { usage() } } else { fmt.Println(strPleaseSetTool) usage() } case "uninstall", "u": if len(args) == 3 { if args[1] == "go" { fmt.Println(strUninstallGo, args[2]) errRemove := os.RemoveAll(gtvmDir + ps + "go" + ps + args[2]) checkErr("Uninstall go", errRemove) } else if args[1] == "liteide" { fmt.Println(strUninstallLiteIDE, args[2]) errRemove := os.RemoveAll(gtvmDir + ps + "liteide" + ps + args[2]) checkErr("Uninstall liteide", errRemove) } else { usage() } } else if len(args) == 1 { fmt.Println(strPleaseSetTool) } else { usage() } case "use": if len(args) > 1 { if args[1] == "go" { if len(args) >= 3 { setGoRoot(args[2]) } } } case "archives": getArchives() case "upgrade": usage() case "config": if len(args) == 2 { // } else if len(args) == 1 { // } else { usage() } case "env": if len(args) == 2 { // } else if len(args) == 1 { // } else { usage() } case "help", "h": usage() } } else { usage() } }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package eventchannel contains functionality for sending any protobuf message // on a socketpair. // // The wire format is a uvarint length followed by a binary protobuf.Any // message. package eventchannel import ( "encoding/binary" "fmt" "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" "gvisor.dev/gvisor/pkg/errors/linuxerr" pb "gvisor.dev/gvisor/pkg/eventchannel/eventchannel_go_proto" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/unet" ) // Emitter emits a proto message. type Emitter interface { // Emit writes a single eventchannel message to an emitter. Emit should // return hangup = true to indicate an emitter has "hung up" and no further // messages should be directed to it. Emit(msg proto.Message) (hangup bool, err error) // Close closes this emitter. Emit cannot be used after Close is called. Close() error } // DefaultEmitter is the default emitter. Calls to Emit and AddEmitter are sent // to this Emitter. var DefaultEmitter = &multiEmitter{} // Emit is a helper method that calls DefaultEmitter.Emit. func Emit(msg proto.Message) error { _, err := DefaultEmitter.Emit(msg) return err } // LogEmit is a helper method that calls DefaultEmitter.Emit. // It also logs a warning message when an error occurs. func LogEmit(msg proto.Message) error { _, err := DefaultEmitter.Emit(msg) if err != nil { log.Warningf("unable to emit event: %s", err) } return err } // AddEmitter is a helper method that calls DefaultEmitter.AddEmitter. func AddEmitter(e Emitter) { DefaultEmitter.AddEmitter(e) } // HaveEmitters indicates if any emitters have been registered to the // default emitter. func HaveEmitters() bool { DefaultEmitter.mu.Lock() defer DefaultEmitter.mu.Unlock() return len(DefaultEmitter.emitters) > 0 } // multiEmitter is an Emitter that forwards messages to multiple Emitters. type multiEmitter struct { // mu protects emitters. mu sync.Mutex // emitters is initialized lazily in AddEmitter. emitters map[Emitter]struct{} } // Emit emits a message using all added emitters. func (me *multiEmitter) Emit(msg proto.Message) (bool, error) { me.mu.Lock() defer me.mu.Unlock() var err error for e := range me.emitters { hangup, eerr := e.Emit(msg) if eerr != nil { if err == nil { err = fmt.Errorf("error emitting %v: on %v: %v", msg, e, eerr) } else { err = fmt.Errorf("%v; on %v: %v", err, e, eerr) } // Log as well, since most callers ignore the error. log.Warningf("Error emitting %v on %v: %v", msg, e, eerr) } if hangup { log.Infof("Hangup on eventchannel emitter %v.", e) delete(me.emitters, e) } } return false, err } // AddEmitter adds a new emitter. func (me *multiEmitter) AddEmitter(e Emitter) { me.mu.Lock() defer me.mu.Unlock() if me.emitters == nil { me.emitters = make(map[Emitter]struct{}) } me.emitters[e] = struct{}{} } // Close closes all emitters. If any Close call errors, it returns the first // one encountered. func (me *multiEmitter) Close() error { me.mu.Lock() defer me.mu.Unlock() var err error for e := range me.emitters { if eerr := e.Close(); err == nil && eerr != nil { err = eerr } delete(me.emitters, e) } return err } // socketEmitter emits proto messages on a socket. type socketEmitter struct { socket *unet.Socket } // SocketEmitter creates a new event channel based on the given fd. // // SocketEmitter takes ownership of fd. func SocketEmitter(fd int) (Emitter, error) { s, err := unet.NewSocket(fd) if err != nil { return nil, err } return &socketEmitter{ socket: s, }, nil } // Emit implements Emitter.Emit. func (s *socketEmitter) Emit(msg proto.Message) (bool, error) { any, err := anypb.New(msg) if err != nil { return false, err } bufMsg, err := proto.Marshal(any) if err != nil { return false, err } // Wire format is uvarint message length followed by binary proto. p := make([]byte, binary.MaxVarintLen64) n := binary.PutUvarint(p, uint64(len(bufMsg))) p = append(p[:n], bufMsg...) for done := 0; done < len(p); { n, err := s.socket.Write(p[done:]) if err != nil { return linuxerr.Equals(linuxerr.EPIPE, err), err } done += n } return false, nil } // Close implements Emitter.Emit. func (s *socketEmitter) Close() error { return s.socket.Close() } // debugEmitter wraps an emitter to emit stringified event messages. This is // useful for debugging -- when the messages are intended for humans. type debugEmitter struct { inner Emitter } // DebugEmitterFrom creates a new event channel emitter by wrapping an existing // raw emitter. func DebugEmitterFrom(inner Emitter) Emitter { return &debugEmitter{ inner: inner, } } func (d *debugEmitter) Emit(msg proto.Message) (bool, error) { text, err := prototext.Marshal(msg) if err != nil { return false, err } ev := &pb.DebugEvent{ Name: string(msg.ProtoReflect().Descriptor().FullName()), Text: string(text), } return d.inner.Emit(ev) } func (d *debugEmitter) Close() error { return d.inner.Close() }
package service import ( "belajar-unit-testing/repository" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) var categoryRespository = &repository.CategoryRepositoryMock{Mock: mock.Mock{}} var categoryService = CategoryService{Repository: categoryRespository} func TestCategoryServiceGet(t *testing.T) { // program mock categoryRespository.Mock.On("FindById", "1").Return(nil) category, err := categoryService.Get("1") assert.Nil(t, category) assert.NotNil(t, err) }
package requests import ( "encoding/json" "fmt" "net/url" "strings" "github.com/google/go-querystring/query" "github.com/atomicjolt/canvasapi" "github.com/atomicjolt/string_utils" ) // EnableDisableOrClearExplicitCspSettingAccounts Either explicitly sets CSP to be on or off for courses and sub-accounts, // or clear the explicit settings to default to those set by a parent account // // Note: If "inherited" and "settings_locked" are both true for this account or course, // then the CSP setting cannot be modified. // https://canvas.instructure.com/doc/api/content_security_policy_settings.html // // Path Parameters: // # Path.AccountID (Required) ID // // Form Parameters: // # Form.Status (Required) . Must be one of enabled, disabled, inheritedIf set to "enabled" for an account, CSP will be enabled for all its courses and sub-accounts (that // have not explicitly enabled or disabled it), using the allowed domains set on this account. // If set to "disabled", CSP will be disabled for this account or course and for all sub-accounts // that have not explicitly re-enabled it. // If set to "inherited", this account or course will reset to the default state where CSP settings // are inherited from the first parent account to have them explicitly set. // type EnableDisableOrClearExplicitCspSettingAccounts struct { Path struct { AccountID string `json:"account_id" url:"account_id,omitempty"` // (Required) } `json:"path"` Form struct { Status string `json:"status" url:"status,omitempty"` // (Required) . Must be one of enabled, disabled, inherited } `json:"form"` } func (t *EnableDisableOrClearExplicitCspSettingAccounts) GetMethod() string { return "PUT" } func (t *EnableDisableOrClearExplicitCspSettingAccounts) GetURLPath() string { path := "accounts/{account_id}/csp_settings" path = strings.ReplaceAll(path, "{account_id}", fmt.Sprintf("%v", t.Path.AccountID)) return path } func (t *EnableDisableOrClearExplicitCspSettingAccounts) GetQuery() (string, error) { return "", nil } func (t *EnableDisableOrClearExplicitCspSettingAccounts) GetBody() (url.Values, error) { return query.Values(t.Form) } func (t *EnableDisableOrClearExplicitCspSettingAccounts) GetJSON() ([]byte, error) { j, err := json.Marshal(t.Form) if err != nil { return nil, nil } return j, nil } func (t *EnableDisableOrClearExplicitCspSettingAccounts) HasErrors() error { errs := []string{} if t.Path.AccountID == "" { errs = append(errs, "'Path.AccountID' is required") } if t.Form.Status == "" { errs = append(errs, "'Form.Status' is required") } if t.Form.Status != "" && !string_utils.Include([]string{"enabled", "disabled", "inherited"}, t.Form.Status) { errs = append(errs, "Status must be one of enabled, disabled, inherited") } if len(errs) > 0 { return fmt.Errorf(strings.Join(errs, ", ")) } return nil } func (t *EnableDisableOrClearExplicitCspSettingAccounts) Do(c *canvasapi.Canvas) error { _, err := c.SendRequest(t) if err != nil { return err } return nil }
package cncscraper type PollOption struct { Title string `bson:"label"` Votes int `bson:"votes"` }
// Package Mail is an abstraction around the gomail package that handles // sending user email confirmation messages. package mail import ( "fmt" "os" "github.com/sendgrid/rest" sendgrid "github.com/sendgrid/sendgrid-go" "github.com/sendgrid/sendgrid-go/helpers/mail" ) type MessageData struct { Address string Tag string UserId int Token string } // SendConfirmation builds and dispatches an email confirmation to new users. func SendConfirmation(data *MessageData) (*rest.Response, error) { from := mail.NewEmail("vvmk", "vv@shfflshinerepeat.com") subject := "Welcome to the party!" to := mail.NewEmail(data.Tag, data.Address) confirm := fmt.Sprintf("https://shfflshinerepeat.com/confirm?uid=%d&token=%s", data.UserId, data.Token) message := new(mail.SGMailV3) message.SetTemplateID(os.Getenv("SENDGRID_TEMPLATE_CONFIRMATION")) message.SetFrom(from) message.Subject = subject p := mail.NewPersonalization() p.AddTos(to) p.SetSubstitution("-confirmlink-", confirm) p.SetSubstitution("-ssrtag-", data.Tag) message.AddPersonalizations(p) client := sendgrid.NewSendClient(os.Getenv("SENDGRID_API_KEY")) response, err := client.Send(message) if err != nil { return nil, err } else { return response, nil } }
package main import "fmt" func square(x int) int { return x * x } type ListNode struct { data int link *ListNode } func main() { fmt.Println(square(1000)) }
package utils import ( "bytes" "container/list" "encoding/csv" "encoding/json" "strings" "github.com/pelletier/go-toml" "github.com/tealeg/xlsx" "sigs.k8s.io/yaml" ) // ParserResponse represents the Parser JSON response type ParserResponse struct { FileParsed bool JsonString string } // GetParserResponse return Parsed JSON response func GetParserResponse(parsedData []map[string]interface{}) ParserResponse { parsedJson, err := json.MarshalIndent(parsedData, "", " ") if err != nil { return ParserResponse{} } jsonString := string(parsedJson) return ParserResponse{true, jsonString} } // YAMLToJSON convert YAML to JSON return map func YAMLToJSON(data []byte) ParserResponse { parsedData, _ := yaml.YAMLToJSON(data) jsonMap := make(map[string]interface{}) if err := json.Unmarshal(parsedData, &jsonMap); err != nil { return ParserResponse{} } parsedJson, err := json.MarshalIndent(jsonMap, "", " ") jsonString := string(parsedJson) if err != nil { return ParserResponse{} } return ParserResponse{true, jsonString} } // TOMLToJSON convert TOML to JSON return map func TOMLToJSON(data []byte) ParserResponse { readerData := strings.NewReader(string(data)) tree, err := toml.LoadReader(readerData) if err != nil { return ParserResponse{} } treeMap := tree.ToMap() bytes, err := json.MarshalIndent(treeMap, "", " ") if err != nil { return ParserResponse{} } jsonString := string(bytes[:]) return ParserResponse{true, jsonString} } // CSVToJSON convert CSV to JSON return map func CSVToJSON(data []byte) ParserResponse { bytesData := bytes.NewReader(data) // create a new reader r := csv.NewReader(bytesData) records, err := r.ReadAll() if err != nil { return ParserResponse{} } parsedData := make([]map[string]interface{}, 0) headerName := records[0] for rowCounter, row := range records { if rowCounter != 0 { var singleMap = make(map[string]interface{}) for colCounter, col := range row { singleMap[headerName[colCounter]] = col } if len(singleMap) > 0 { parsedData = append(parsedData, singleMap) } } } if len(parsedData) > 0 { return GetParserResponse(parsedData) } return ParserResponse{} } // ExcelToJSON convert Excel to JSON return map func ExcelToJSON(data []byte) ParserResponse { xlFile, err := xlsx.OpenBinary(data) if err != nil { return ParserResponse{} } parsedData := make([]map[string]interface{}, 0) headerName := list.New() // sheet for _, sheet := range xlFile.Sheets { // rows for rowCounter, row := range sheet.Rows { // column headerIterator := headerName.Front() var singleMap = make(map[string]interface{}) for _, cell := range row.Cells { if rowCounter == 0 { text := cell.String() headerName.PushBack(text) } else { text := cell.String() singleMap[headerIterator.Value.(string)] = text headerIterator = headerIterator.Next() } } if rowCounter != 0 && len(singleMap) > 0 { parsedData = append(parsedData, singleMap) } } } if len(parsedData) > 0 { return GetParserResponse(parsedData) } return ParserResponse{} }
package raws // import "github.com/BenLubar/dfide/raws" import "errors" var ( ErrNoRawFileName = errors.New("raws: no file name (first line was blank)") ErrNoRawObjectType = errors.New("raws: missing OBJECT tag at start of file") ErrIncompleteTag = errors.New("raws: incomplete tag (missing closing bracket)") ErrInvalidCharacter = errors.New("raws: invalid character") )
package app import ( "errors" "fmt" "github.com/tharsis/token/erc20" ) // DeployContract deploys ERC-20 contract using privateKey and ethClient that are stored on Client. func (c *Client) DeployContract() error { if c.privateKey == nil { return errors.New("privateKey is nil") } if c.ethClient == nil { return errors.New("ethClient is nil") } auth, fromAddressStr, err := c.setupTransOpts() if err != nil { return err } // address, tx, instance, err := token.DeployToken(auth, client) addr, _, _, err := erc20.DeployErc20(auth, c.ethClient) if err != nil { return fmt.Errorf("DeployToken err: %q", err) } fmt.Printf("contract from %s has been successfully deployed at: %s", fromAddressStr, addr.Hex()) return nil }
package chapter2 import ( "container/list" "testing" ) func Test_kFromTail(t *testing.T) { getList := func() *list.List { l := list.New() l.PushBack(1) l.PushBack(2) l.PushBack(2) l.PushBack(1) l.PushBack(3) return l } type args struct { l *list.List k int } tests := []struct { name string args args want interface{} }{ { "正常", args{ l: getList(), k: 3, }, 2, }, { "限界値テスト:正常", args{ l: getList(), k: 4, }, 1, }, { "限界値テスト:異常", args{ l: getList(), k: 5, }, nil, }, { "異常", args{ l: getList(), k: 100, }, nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := kFromTail(tt.args.l, tt.args.k); got != tt.want { t.Errorf("kFromTail() = %v, want %v", got, tt.want) } }) } }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package crash import ( "context" "io/ioutil" "os" "path/filepath" "time" "chromiumos/tast/common/testexec" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/metrics" "chromiumos/tast/local/sysutil" "chromiumos/tast/testing" ) const ( // crashUserAccessGID is the GID for crash-user-access, as defined in // third_party/eclass-overlay/profiles/base/accounts/group/crash-user-access. crashUserAccessGID = 420 // collectChromeCrashFile is the name of a special file that tells crash_reporter's // UserCollector to always dump Chrome crashes. (Instead of the normal behavior // of skipping those crashes in user_collector in favor of letting ChromeCollector // handle them.) This behavior change will mess up several crash tests. collectChromeCrashFile = "/mnt/stateful_partition/etc/collect_chrome_crashes" // rebootPersistenceCount is the number of reboots across which the // mock-consent and crash-test-in-progress files should persist, in // case the DUT reboots multiple times during the test. It is only used // when the RebootingTest option is passed. rebootPersistenceCount = "4" // rebootPersistDir is the directory to which mock-consent and // crash-test-in-progress written to in order to preserve them across // reboot. rebootPersistDir = "/mnt/stateful_partition/unencrypted/preserve/" // daemonStoreConsentName is the name of file in daemon-store that // gives per-user consent state. daemonStoreConsentName = "consent-enabled" ) // ConsentType is to be used for parameters to tests, to allow them to determine // whether they should use mock consent or real consent. type ConsentType int const ( // MockConsent indicates that a test should use the mock consent system. MockConsent ConsentType = iota // RealConsent indicates that a test should use the real consent system. RealConsent // RealConsentPerUserOn indicates that a test should use the real // consent system, and also turn *on* per-user consent. RealConsentPerUserOn // RealConsentPerUserOff indicates that a test should use the real // consent system, and also turn *off* per-user consent. // Crashes should not be collected in this case. RealConsentPerUserOff ) // SetConsent enables or disables metrics consent, based on the value of consent. // Pre: cr must point to a logged-in chrome session. func SetConsent(ctx context.Context, cr *chrome.Chrome, consent bool) error { ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() if err := ensureSoftwareDeps(ctx); err != nil { return err } // First, ensure that device ownership has been taken, for two reasons: // 1. Due to https://crbug.com/1042951#c16, if we set consent while // ownership is in the OWNERSHIP_UNKNOWN state, we'll never actually // set it (it will stay pending forever). // 2. Even if setting consent to pending works (we set it when we're in // state OWNERSHIP_NONE), there is a brief time after ownership is // taken where consent is reset to false. See // https://crbug.com/1041062#c23 if err := testing.Poll(ctx, func(ctx context.Context) error { if _, err := os.Stat("/var/lib/devicesettings/owner.key"); err != nil { if os.IsNotExist(err) { return err } return testing.PollBreak(err) } return nil }, nil); err != nil { return errors.Wrap(err, "timed out while waiting for device ownership") } tconn, err := cr.TestAPIConn(ctx) if err != nil { return errors.Wrap(err, "creating test API connection failed") } testing.ContextLogf(ctx, "Setting metrics consent to %t", consent) if err := tconn.Call(ctx, nil, "tast.promisify(chrome.autotestPrivate.setMetricsEnabled)", consent); err != nil { return errors.Wrap(err, "running autotestPrivate.setMetricsEnabled failed") } // Wait for consent to be set before we return. if err := testing.Poll(ctx, func(ctx context.Context) error { state, err := metrics.HasConsent(ctx) if err != nil { return testing.PollBreak(err) } if state != consent { return errors.Errorf("consent state mismatch: got %t, want %t", state, consent) } return nil }, nil); err != nil { return errors.Wrap(err, "timed out while waiting for consent") } // Make sure that the updated status is polled by crash_reporter. // crash_reporter holds a cache of the consent status until the integer value of time() changes since last time. // https://chromium.googlesource.com/chromiumos/platform2/+/3fe852bfa/metrics/metrics_library.cc#154 // The fraction of the end time is intentionally rounded down here. // For example, if the system clock were 12:34:56.700, the cache would be purged no later than 12:34:57.000. end := time.Unix(time.Now().Add(1*time.Second).Unix(), 0) testing.Sleep(ctx, end.Sub(time.Now())) // If a test wants consent to be turned off, make sure mock consent doesn't // interfere. if err := os.Remove(filepath.Join(crashTestInProgressDir, mockConsentFile)); err != nil && !os.IsNotExist(err) { return errors.Wrap(err, "unable to remove mock consent file") } if err := os.Remove(filepath.Join(SystemCrashDir, mockConsentFile)); err != nil && !os.IsNotExist(err) { return errors.Wrap(err, "unable to remove mock consent file") } return nil } // ensureSoftwareDeps checks that the current test declares appropriate software // dependencies for crash tests. func ensureSoftwareDeps(ctx context.Context) error { deps, ok := testing.ContextSoftwareDeps(ctx) if !ok { return errors.New("failed to extract software dependencies from context (using wrong context?)") } const exp = "metrics_consent" for _, dep := range deps { if dep == exp { return nil } } return errors.Errorf("crash tests must declare %q software dependency", exp) } // moveAllCrashesTo moves crashes from source to target. This allows us to // start crash tests with an empty spool directory, reducing risk of flakes if // the dir is already full when the test starts. func moveAllCrashesTo(source, target string) error { files, err := ioutil.ReadDir(source) if err != nil { // Bubble this up so caller can check whether IsNotExist and behave accordingly. return err } if err := os.MkdirAll(target, 0755); err != nil { return errors.Wrapf(err, "couldn't make stash crash dir %s", target) } for _, f := range files { if f.IsDir() { // Don't move directories (like the "attachments" directory that crashpad creates). continue } err := os.Rename(filepath.Join(source, f.Name()), filepath.Join(target, f.Name())) // Ignore error if the source was removed. // This could happen, for example, if moveAllCrashesTo races with early-failure-cleanup. // NOTE: We need to check both the os.Rename() return value as well as Stat()'ing the source // file because our destination is of form "target/foo", and "target" may not exist. if errors.Is(err, os.ErrNotExist) { if _, err := os.Stat(filepath.Join(source, f.Name())); err != nil && errors.Is(err, os.ErrNotExist) { continue } } if err != nil { return errors.Wrapf(err, "couldn't move file: %v", f.Name()) } } return nil } // Option is a self-referential function can be used to configure crash tests. // See https://commandcenter.blogspot.com.au/2014/01/self-referential-functions-and-design.html // for details about this pattern. type Option func(p *setUpParams) // DevImage prevents the test library from indicating to the DUT that a crash // test is in progress, allowing the test to complete with standard developer // image behavior. func DevImage() Option { return func(p *setUpParams) { p.isDevImageTest = true } } // WithConsent indicates that the test should enable metrics consent. // Pre: cr should be a logged-in chrome session. func WithConsent(cr *chrome.Chrome) Option { return func(p *setUpParams) { p.setConsent = true p.chrome = cr } } // WithMockConsent indicates that the test should touch the mock metrics consent // file which causes crash_reporter and crash_sender to act as if they had // consent to process crashes. func WithMockConsent() Option { return func(p *setUpParams) { p.setMockConsent = true } } // RebootingTest indicates that this test will reboot the machine, and the crash // reporting state files (e.g. crash-test-in-progress) should also be placed in // rebootPersistDir (so that the persist-crash-test task moves them over to // /run/crash_reporter on boot). func RebootingTest() Option { return func(p *setUpParams) { p.rebootTest = true } } // FilterCrashes puts s into the filter-in file, so that the crash reporter only // processes matching crashes. // If this option is used, then only invocation of crash_reporter with arguments // that contains s will be processed. See platform2/crash-reporter/README.md and // search for "filter-in" for more info. func FilterCrashes(s string) Option { return func(p *setUpParams) { p.filterIn = s } } // SetUpCrashTest indicates that we are running a test that involves the crash // reporting system (crash_reporter, crash_sender, or anomaly_detector). The // test should "defer TearDownCrashTest(ctx)" after calling this. If developer image // behavior is required for the test, call SetUpDevImageCrashTest instead. func SetUpCrashTest(ctx context.Context, opts ...Option) error { crashDirs := []crashAndStash{ {SystemCrashDir, systemCrashStash}, {LocalCrashDir, localCrashStash}, {ClobberCrashDir, clobberCrashStash}, } p := setUpParams{ inProgDir: crashTestInProgressDir, crashDirs: crashDirs, rebootPersistDir: rebootPersistDir, senderPausePath: senderPausePath, filterInPath: FilterInPath, senderProcName: senderProcName, mockSendingPath: mockSendingPath, sendRecordDir: SendRecordDir, } for _, opt := range opts { opt(&p) } // Unconditionally stash daemon-store crash dirs now that we're almost-always using daemon-store. daemonStorePaths, err := GetDaemonStoreCrashDirs(ctx) if err != nil { return errors.Wrap(err, "failed to get daemon store crash directories") } for _, path := range daemonStorePaths { p.crashDirs = append(p.crashDirs, crashAndStash{path, path + ".real"}) } // This file usually doesn't exist; don't error out if it doesn't. "Not existing" // is the normal state, so we don't undo this in TearDownCrashTest(ctx). if err := os.Remove(collectChromeCrashFile); err != nil && !os.IsNotExist(err) { return errors.Wrap(err, "failed to remove "+collectChromeCrashFile) } // Clean up per-user consent so that we do not inadvertently use consent left over from a prior test. if err := RemovePerUserConsent(ctx); err != nil { return errors.Wrap(err, "failed to clean up per-user consent") } // Reinitialize crash_reporter in case previous tests have left bad state // in core_pattern, etc. if out, err := testexec.CommandContext(ctx, "/sbin/crash_reporter", "--init").CombinedOutput(); err != nil { testing.ContextLog(ctx, "Couldn't initialize crash reporter: ", string(out)) return errors.Wrap(err, "initializing crash reporter") } return setUpCrashTest(ctx, &p) } type crashAndStash struct { crashDir string stashDir string } // setUpParams is a collection of parameters to setUpCrashTest. type setUpParams struct { inProgDir string // crashDirs is a list of all crash directories, along with the directories to which we should stash them. crashDirs []crashAndStash rebootPersistDir string senderPausePath string senderProcName string mockSendingPath string filterInPath string sendRecordDir string isDevImageTest bool setConsent bool setMockConsent bool rebootTest bool filterIn string chrome *chrome.Chrome } // SetCrashTestInProgress creates a file to tell crash_reporter that a crash_reporter test is in progress. func SetCrashTestInProgress() error { filePath := filepath.Join(crashTestInProgressDir, crashTestInProgressFile) if err := ioutil.WriteFile(filePath, []byte("in-progress"), 0644); err != nil { return errors.Wrapf(err, "failed writing in-progress state file %s", filePath) } return nil } // UnsetCrashTestInProgress tells crash_reporter that no crash_reporter test is in progress. func UnsetCrashTestInProgress() error { filePath := filepath.Join(crashTestInProgressDir, crashTestInProgressFile) if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "failed to remove in-progress state file %s", filePath) } return nil } // CreatePerUserConsent creates the per-user consent file with the specified state. func CreatePerUserConsent(ctx context.Context, enable bool) error { dirs, err := GetDaemonStoreConsentDirs(ctx) if err != nil { return errors.Wrap(err, "failed to get daemon store consent dirs") } // Set consent for all active dirs. for _, d := range dirs { f := filepath.Join(d, daemonStoreConsentName) contents := "0" if enable { contents = "1" } if err := ioutil.WriteFile(f, []byte(contents), 0644); err != nil { return errors.Wrapf(err, "failed writing consent-enabled file %s", f) } } return nil } // RemovePerUserConsent deletes the per-user consent files so that we fall back to device policy state. func RemovePerUserConsent(ctx context.Context) error { dirs, err := GetDaemonStoreConsentDirs(ctx) if err != nil { return errors.Wrap(err, "failed to get daemon store consent dirs") } // Clear per-user consent for all active dirs. var firstErr error for _, d := range dirs { f := filepath.Join(d, daemonStoreConsentName) if err := os.Remove(f); err != nil && !os.IsNotExist(err) { testing.ContextLogf(ctx, "Error removing consent-enabled file %s: %v", f, err) if firstErr == nil { firstErr = errors.Wrapf(err, "failed removing consent-enabled file %s", f) } } } return firstErr } // setUpCrashTest is a helper function for SetUpCrashTest. We need // this as a separate function for testing. func setUpCrashTest(ctx context.Context, p *setUpParams) (retErr error) { defer func() { if retErr != nil { tearDownCrashTest(ctx, &tearDownParams{ inProgDir: p.inProgDir, crashDirs: p.crashDirs, rebootPersistDir: p.rebootPersistDir, senderPausePath: p.senderPausePath, mockSendingPath: p.mockSendingPath, filterInPath: p.filterInPath, }) } }() if p.filterIn == "" { if err := disableCrashFiltering(p.filterInPath); err != nil { return errors.Wrap(err, "couldn't disable crash filtering") } } else { if err := enableCrashFiltering(ctx, p.filterInPath, p.filterIn); err != nil { return errors.Wrap(err, "couldn't enable crash filtering") } } if p.setConsent && p.setMockConsent { return errors.New("Should not set consent and mock consent at the same time") } if err := os.MkdirAll(p.inProgDir, 0755); err != nil { return errors.Wrapf(err, "could not make directory %v", p.inProgDir) } if p.setConsent { if err := SetConsent(ctx, p.chrome, true); err != nil { return errors.Wrap(err, "couldn't enable metrics consent") } } // Pause the periodic crash_sender job. if err := ioutil.WriteFile(p.senderPausePath, nil, 0644); err != nil { return errors.Wrapf(err, "couldn't write sender pause file %s", p.senderPausePath) } // If crash_sender happens to be running, touching senderPausePath does not // stop it. Kill crash_sender processes to make sure there is no running // instance. if err := testexec.CommandContext(ctx, "pkill", "-9", "--exact", p.senderProcName).Run(); err != nil { // pkill exits with code 1 if it could find no matching process (see: man 1 pkill). // It is perfectly fine for our case. if ws, ok := testexec.GetWaitStatus(err); !ok || !ws.Exited() || ws.ExitStatus() != 1 { return errors.Wrap(err, "failed to kill crash_sender processes") } } // Configure crash_sender to prevent uploading crash reports actually. if err := enableMockSending(p.mockSendingPath, true); err != nil { return errors.Wrapf(err, "couldn't enable mock sending with path %s", p.mockSendingPath) } if err := resetSendRecords(p.sendRecordDir); err != nil { return errors.Wrapf(err, "couldn't reset send records at dir %s", p.sendRecordDir) } // Move all crashes into stash directory so a full directory won't stop // us from saving a new crash report, and so that we don't improperly // interpret preexisting crashes as being created during the test. for _, crashAndStash := range p.crashDirs { if err := moveAllCrashesTo(crashAndStash.crashDir, crashAndStash.stashDir); err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "couldn't stash crashes from %s to %s", crashAndStash.crashDir, crashAndStash.stashDir) } } // We must set mock consent _after_ stashing crashes, or we'll stash // the mock consent for reboot tests. if p.setMockConsent { mockConsentPath := filepath.Join(p.inProgDir, mockConsentFile) if err := ioutil.WriteFile(mockConsentPath, nil, 0644); err != nil { return errors.Wrapf(err, "failed writing mock consent file %s", mockConsentPath) } if p.rebootTest { mockConsentPersistent := filepath.Join(p.rebootPersistDir, mockConsentFile) if err := ioutil.WriteFile(mockConsentPersistent, []byte(rebootPersistenceCount), 0644); err != nil { return errors.Wrapf(err, "failed writing mock consent file %s", mockConsentPersistent) } } } // If the test is meant to run with developer image behavior, return here to // avoid creating the file that indicates a crash test is in progress. if p.isDevImageTest { return nil } filePath := filepath.Join(p.inProgDir, crashTestInProgressFile) if err := ioutil.WriteFile(filePath, nil, 0644); err != nil { return errors.Wrapf(err, "could not create %v", filePath) } if p.rebootTest { filePath = filepath.Join(p.rebootPersistDir, crashTestInProgressFile) if err := ioutil.WriteFile(filePath, []byte(rebootPersistenceCount), 0644); err != nil { return errors.Wrapf(err, "could not create %v", filePath) } } return nil } func cleanUpStashDir(stashDir, realDir string) error { // Stash dir should exist, so error if it doesn't. if err := moveAllCrashesTo(stashDir, realDir); err != nil && !os.IsNotExist(err) { return err } if err := os.Remove(stashDir); err != nil { if !os.IsNotExist(err) { return errors.Wrapf(err, "couldn't remove stash dir: %v", stashDir) } } return nil } // tearDownOption is a self-referential function can be used to configure crash tests. // See https://commandcenter.blogspot.com.au/2014/01/self-referential-functions-and-design.html // for details about this pattern. type tearDownOption func(p *tearDownParams) // TearDownCrashTest undoes the work of SetUpCrashTest. We assume here that the // set of active sessions hasn't changed since SetUpCrashTest was called for // the purpose of restoring the per-user-cryptohome crash directories. func TearDownCrashTest(ctx context.Context, opts ...tearDownOption) error { var firstErr error crashDirs := []crashAndStash{ {SystemCrashDir, systemCrashStash}, {LocalCrashDir, localCrashStash}, {ClobberCrashDir, clobberCrashStash}, } p := tearDownParams{ inProgDir: crashTestInProgressDir, crashDirs: crashDirs, rebootPersistDir: rebootPersistDir, senderPausePath: senderPausePath, mockSendingPath: mockSendingPath, filterInPath: FilterInPath, } for _, opt := range opts { opt(&p) } // This could return a different list of paths then the original setup // if a session has started or ended in the meantime. If a new session // has started then the restore will just become a no-op, but if a // session has ended we won't restore the crash files inside that // session's cryptohome. We can't do much about this since we can't // touch a cryptohome while that user isn't logged in. daemonStorePaths, err := GetDaemonStoreCrashDirs(ctx) if err != nil { testing.ContextLog(ctx, "Failed to get daemon store crash dirs: ", err) if firstErr == nil { firstErr = errors.Wrap(err, "failed to get daemon store crash directories") } } for _, path := range daemonStorePaths { p.crashDirs = append(p.crashDirs, crashAndStash{path, path + ".real"}) } if err := tearDownCrashTest(ctx, &p); err != nil { testing.ContextLog(ctx, "Failed to tearDownCrashTest: ", err) if firstErr == nil { firstErr = errors.Wrap(err, "couldn't tear down crash test") } } // The user crash directory should always be owned by chronos not root. The // unit tests don't run as root and can't chown, so skip this in tests. // Only do this if the local crash dir actually exists. if _, err := os.Stat(LocalCrashDir); err == nil { if err := os.Chown(LocalCrashDir, int(sysutil.ChronosUID), crashUserAccessGID); err != nil { testing.ContextLogf(ctx, "Couldn't chown %s: %v", LocalCrashDir, err) if firstErr == nil { firstErr = errors.Wrapf(err, "couldn't chown %s", LocalCrashDir) } } } return firstErr } // tearDownParams is a collection of parameters to tearDownCrashTest. type tearDownParams struct { inProgDir string // crashDirs is a list of all crash directories, along with the directories to which we stashed them in setUp crashDirs []crashAndStash rebootPersistDir string senderPausePath string mockSendingPath string filterInPath string } // tearDownCrashTest is a helper function for TearDownCrashTest. We need // this as a separate function for testing. func tearDownCrashTest(ctx context.Context, p *tearDownParams) error { var firstErr error // If crashTestInProgressFile does not exist, something else already removed the file // or it was never created (See SetUpDevImageCrashTest). // Well, whatever, we're in the correct state now (the file is gone). filePath := filepath.Join(p.inProgDir, crashTestInProgressFile) if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) { testing.ContextLogf(ctx, "Error removing crash test in progress file %s: %v", filePath, err) if firstErr == nil { firstErr = errors.Wrapf(err, "removing crash test in progress file %s", filePath) } } filePath = filepath.Join(p.rebootPersistDir, crashTestInProgressFile) if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) { testing.ContextLogf(ctx, "Error removing persistent crash test in progress file %s: %v", filePath, err) if firstErr == nil { firstErr = errors.Wrapf(err, "removing persistent crash test in progress file %s", filePath) } } if err := disableCrashFiltering(p.filterInPath); err != nil { testing.ContextLog(ctx, "Couldn't disable crash filtering: ", err) if firstErr == nil { firstErr = errors.Wrap(err, "couldn't disable crash filtering") } } // Clean up stash directories for _, crashAndStash := range p.crashDirs { if err := cleanUpStashDir(crashAndStash.stashDir, crashAndStash.crashDir); err != nil { testing.ContextLogf(ctx, "Error cleaning up stash dir %s (real dir %s): %v", crashAndStash.stashDir, crashAndStash.crashDir, err) if firstErr == nil { firstErr = errors.Wrapf(err, "couldn't clean up stash dir %s to %s", crashAndStash.stashDir, crashAndStash.crashDir) } } } if err := disableMockSending(p.mockSendingPath); err != nil { testing.ContextLogf(ctx, "Error disabling mock sending with path %s: %v", p.mockSendingPath, err) if firstErr == nil { firstErr = err } } mockConsentPath := filepath.Join(p.inProgDir, mockConsentFile) if err := os.Remove(mockConsentPath); err != nil && !os.IsNotExist(err) { testing.ContextLogf(ctx, "Error removing mock consent file %s: %v", mockConsentPath, err) if firstErr == nil { firstErr = errors.Wrapf(err, "couldn't remove mock consent file %s", mockConsentPath) } } mockConsentPersistent := filepath.Join(p.rebootPersistDir, mockConsentFile) if err := os.Remove(mockConsentPersistent); err != nil && !os.IsNotExist(err) { testing.ContextLogf(ctx, "Error removing persistent mock consent file %s: %v", mockConsentPersistent, err) if firstErr == nil { firstErr = errors.Wrapf(err, "couldn't remove persistent mock consent file %s", mockConsentPersistent) } } if err := os.Remove(p.senderPausePath); err != nil && !os.IsNotExist(err) { testing.ContextLogf(ctx, "Error removing sender pause file %s: %v", p.senderPausePath, err) if firstErr == nil { firstErr = errors.Wrapf(err, "couldn't remove sender pause file %s", p.senderPausePath) } } return firstErr }
package gqlerrors import ( "encoding/json" "errors" "github.com/appleboy/graphql/language/location" ) // FormattedError contains user and machine readable, formatted error messages. type FormattedError struct { Message string `json:"message"` Locations []location.SourceLocation `json:"locations"` Extensions ErrorExtensions `json:"-"` } // MarshalJSON implements custom JSON marshaling for the `FormattedError` type // in order to place the `ErrorExtensions` at the top level. func (g FormattedError) MarshalJSON() ([]byte, error) { m := map[string]interface{}{} for k, v := range g.Extensions { m[k] = v } m["message"] = g.Message m["locations"] = g.Locations return json.Marshal(m) } // Error implements the `error` interface. func (g FormattedError) Error() string { return g.Message } // NewFormattedError creates a new formatted error from a string. func NewFormattedError(message string) FormattedError { err := errors.New(message) return FormatError(err) } // NewFormattedErrorWithExtensions creates a new formatted error from a string // with the given extensions. func NewFormattedErrorWithExtensions(message string, extensions ErrorExtensions, ) FormattedError { err := FormatError(errors.New(message)) err.Extensions = extensions return err } // FormatError from a plain error type. func FormatError(err error) FormattedError { switch err := err.(type) { case FormattedError: return err case *Error: return FormattedError{ Message: err.Error(), Locations: err.Locations, Extensions: err.Extensions, } case Error: return FormattedError{ Message: err.Error(), Locations: err.Locations, Extensions: err.Extensions, } default: return FormattedError{ Message: err.Error(), Locations: []location.SourceLocation{}, } } } // FormatErrors creates an array of `FormattedError`s from plain errors. func FormatErrors(errs ...error) []FormattedError { formattedErrors := []FormattedError{} for _, err := range errs { formattedErrors = append(formattedErrors, FormatError(err)) } return formattedErrors }
package zdb import ( "database/sql" "errors" "fmt" "reflect" "runtime/debug" "strconv" "strings" "time" "github.com/sohaha/zlsgo/zstring" "github.com/sohaha/zlsgo/ztime" "github.com/sohaha/zlsgo/ztype" ) type ( ByteUnmarshaler interface { UnmarshalByte(data []byte) error } // IfeRows defines methods that scanner needs IfeRows interface { Close() error Columns() ([]string, error) Next() bool Scan(dest ...interface{}) error } ) var ( // BindTag is the default struct tag name BindTag = "zdb" // ErrTargetNotSettable means the second param of Bind is not settable ErrTargetNotSettable = errors.New("target is not settable! a pointer is required") // ErrNilRows means the first param can't be a nil ErrNilRows = errors.New("rows can't be nil") // ErrSliceToString means only []uint8 can be transmuted into string ErrSliceToString = errors.New("can't transmute a non-uint8 slice to string") // ErrConversionFailed conversion failed ErrConversionFailed = errors.New("conversion failed") // ErrDBNotExist db not exist ErrDBNotExist = errors.New("database instance does not exist") // ErrRecordNotFound no records found ErrRecordNotFound = errors.New("no records found") errNoData = errors.New("no data") errInsertEmpty = errors.New("insert data can not be empty") errDataInvalid = errors.New("data is illegal") ) func Scan(rows IfeRows, out interface{}) (int, error) { data, count, err := resolveDataFromRows(rows) if err != nil { return 0, err } if nil == data { return count, ErrRecordNotFound } return count, scan(data, out) } func scan(data []ztype.Map, out interface{}) (err error) { targetValueOf := reflect.ValueOf(out) if nil == out || targetValueOf.Kind() != reflect.Ptr || targetValueOf.IsNil() { return ErrTargetNotSettable } targetValueOf = targetValueOf.Elem() switch targetValueOf.Kind() { case reflect.Slice: err = bindSlice(data, targetValueOf) default: err = bind(data[0], targetValueOf) } return err } // ScanToMap returns the result in the form of []map[string]interface{} func ScanToMap(rows IfeRows) ([]ztype.Map, int, error) { return resolveDataFromRows(rows) } func bindSlice(arr []ztype.Map, elem reflect.Value) error { if !elem.CanSet() { return ErrTargetNotSettable } length := len(arr) valueArrObj := reflect.MakeSlice(elem.Type(), 0, length) typeObj := valueArrObj.Type().Elem() var err error for i := 0; i < length; i++ { newObj := reflect.New(typeObj) err = bind(arr[i], newObj.Elem()) if nil != err { return err } valueArrObj = reflect.Append(valueArrObj, newObj.Elem()) } elem.Set(valueArrObj) return nil } func bind(result map[string]interface{}, rv reflect.Value) (resp error) { defer func() { if r := recover(); nil != r { resp = fmt.Errorf("error:[%v], stack:[%s]", r, string(debug.Stack())) } }() if !rv.CanSet() { return ErrTargetNotSettable } typeObj := rv.Type() if typeObj.Kind() == reflect.Ptr { ptrType := typeObj.Elem() newObj := reflect.New(ptrType) err := bind(result, newObj.Elem()) if nil == err { rv.Set(newObj) } return err } if typeObj.Kind() == reflect.Struct { for i := 0; i < rv.NumField(); i++ { fieldTypeI := typeObj.Field(i) fieldName := fieldTypeI.Name valuei := rv.Field(i) if !valuei.CanSet() { continue } tagName, _ := lookUpTagName(fieldTypeI) if tagName == "" { if fieldName == "ID" { tagName = "id" } else { tagName = zstring.CamelCaseToSnakeCase(fieldName) } } mapValue, ok := result[tagName] if !ok || mapValue == nil { continue } if fieldTypeI.Type.Kind() == reflect.Ptr && !fieldTypeI.Type.Implements(reflect.TypeOf(new(ByteUnmarshaler)).Elem()) { valuei.Set(reflect.New(fieldTypeI.Type.Elem())) valuei = valuei.Elem() } err := convert(mapValue, valuei) if nil != err { return err } } } else if rv.CanSet() { for i := range result { return convert(result[i], rv) } return nil } return nil } func isIntSeriesType(k reflect.Kind) bool { return k >= reflect.Int && k <= reflect.Int64 } func isUintSeriesType(k reflect.Kind) bool { return k >= reflect.Uint && k <= reflect.Uint64 } func isFloatSeriesType(k reflect.Kind) bool { return k == reflect.Float32 || k == reflect.Float64 } func resolveDataFromRows(rows IfeRows) ([]ztype.Map, int, error) { result := make([]ztype.Map, 0) if nil == rows { return result, 0, ErrNilRows } columns, err := rows.Columns() if nil != err { return result, 0, err } length := len(columns) values := make([]interface{}, length) valuePtrs := make([]interface{}, length) count := 0 for rows.Next() { for i := 0; i < length; i++ { valuePtrs[i] = &values[i] } err = rows.Scan(valuePtrs...) if err != nil { return result, 0, err } entry := make(ztype.Map, length) for i, col := range columns { val := values[i] b, ok := val.([]byte) if ok { entry[col] = zstring.Bytes2String(b) } else { entry[col] = val } } result = append(result, entry) count++ } return result, count, nil } func lookUpTagName(rf reflect.StructField) (string, bool) { name, ok := rf.Tag.Lookup(BindTag) if !ok { return "", false } name = resolveTagName(name) return name, true } func resolveTagName(tag string) string { idx := strings.IndexByte(tag, ',') if idx == -1 { return tag } return tag[:idx] } func convert(out interface{}, rv reflect.Value) error { vit := rv.Type() mvt := reflect.TypeOf(out) if nil == mvt { return nil } if mvt.AssignableTo(vit) { rv.Set(reflect.ValueOf(out)) return nil } switch assertT := out.(type) { case time.Time: return handleConvertTime(assertT, mvt, vit, &rv) } if scanner, ok := rv.Addr().Interface().(sql.Scanner); ok { return scanner.Scan(out) } vk := vit.Kind() switch mvt.Kind() { case reflect.Int64: if isIntSeriesType(vk) { rv.SetInt(out.(int64)) } else if isUintSeriesType(vk) { rv.SetUint(uint64(out.(int64))) } else if vk == reflect.Bool { v := out.(int64) if v > 0 { rv.SetBool(true) } else { rv.SetBool(false) } } else if vk == reflect.String { rv.SetString(strconv.FormatInt(out.(int64), 10)) } else { return ErrConversionFailed } case reflect.Float32: if isFloatSeriesType(vk) { rv.SetFloat(float64(out.(float32))) } else { return ErrConversionFailed } case reflect.Float64: if isFloatSeriesType(vk) { rv.SetFloat(out.(float64)) } else { return ErrConversionFailed } case reflect.Slice: return handleConvertSlice(out, mvt, vit, &rv) default: if mvt.Kind() == reflect.String && vit.ConvertibleTo(timeType) { t, err := ztime.Parse(out.(string)) if err == nil { if vit.AssignableTo(timeType) { rv.Set(reflect.ValueOf(t)) } else if vit.AssignableTo(jsontimeType) { rv.Set(reflect.ValueOf(JsonTime(t))) } return nil } } return ErrConversionFailed } return nil } func handleConvertSlice(mapValue interface{}, mvt, vit reflect.Type, valuei *reflect.Value) error { mapValueSlice, ok := mapValue.([]byte) if !ok { return ErrSliceToString } mapValueStr := string(mapValueSlice) vitKind := vit.Kind() switch { case vitKind == reflect.String: valuei.SetString(mapValueStr) case isIntSeriesType(vitKind): intVal, err := strconv.ParseInt(mapValueStr, 10, 64) if nil != err { return err } valuei.SetInt(intVal) case isUintSeriesType(vitKind): uintVal, err := strconv.ParseUint(mapValueStr, 10, 64) if nil != err { return err } valuei.SetUint(uintVal) case isFloatSeriesType(vitKind): floatVal, err := strconv.ParseFloat(mapValueStr, 64) if nil != err { return err } valuei.SetFloat(floatVal) case vitKind == reflect.Bool: intVal, err := strconv.ParseInt(mapValueStr, 10, 64) if nil != err { return err } if intVal > 0 { valuei.SetBool(true) } else { valuei.SetBool(false) } default: if _, ok := valuei.Interface().(ByteUnmarshaler); ok { return byteUnmarshal(mapValueSlice, valuei) } return ErrConversionFailed } return nil } func byteUnmarshal(mapValueSlice []byte, valuei *reflect.Value) error { var pt reflect.Value initFlag := false if valuei.IsNil() { pt = reflect.New(valuei.Type().Elem()) initFlag = true } else { pt = *valuei } err := pt.Interface().(ByteUnmarshaler).UnmarshalByte(mapValueSlice) if nil != err { structName := pt.Elem().Type().Name() return fmt.Errorf("%s.UnmarshalByte fail to unmarshal the bytes, err: %s", structName, err) } if initFlag { valuei.Set(pt) } return nil } func handleConvertTime(assertT time.Time, mvt, vit reflect.Type, rv *reflect.Value) error { switch vit.Kind() { case reflect.String: sTime := assertT.Format(ztime.TimeTpl) rv.SetString(sTime) return nil case reflect.Struct: if vit.ConvertibleTo(mvt) { v := reflect.ValueOf(assertT) vv := v.Convert(vit) rv.Set(vv) return nil } } return errors.New("convert time failed") }
package cherryDataConfig import ( "fmt" cherryUtils "github.com/cherry-game/cherry/extend/utils" "github.com/cherry-game/cherry/interfaces" cherryLogger "github.com/cherry-game/cherry/logger" "github.com/cherry-game/cherry/profile" "sync" ) type DataConfigComponent struct { cherryInterfaces.BaseComponent sync.Mutex register []IConfigFile configFiles map[string]interface{} source IDataSource parser IParser parserExtName string } func NewComponent() *DataConfigComponent { return &DataConfigComponent{} } //Name unique components name func (d *DataConfigComponent) Name() string { return "data_config_component" } func (d *DataConfigComponent) Init() { d.configFiles = make(map[string]interface{}) // read data_config node in profile-x.json configNode := cherryProfile.Config("data_config") if configNode.LastError() != nil { panic(fmt.Sprintf("not found `data_config` node in `%s` file.", cherryProfile.FilePath())) } // get data source sourceName := configNode.Get("data_source").ToString() d.source = GetDataSource(sourceName) if d.source == nil { panic(fmt.Sprintf("data source not found. sourceName = %s", sourceName)) } // get file parser parserName := configNode.Get("parser").ToString() d.parser = GetParser(parserName) if d.parser == nil { panic(fmt.Sprintf("parser not found. sourceName = %s", parserName)) } cherryUtils.Try(func() { d.source.Init(d) }, func(errString string) { cherryLogger.Warn(errString) }) } func (d *DataConfigComponent) Stop() { if d.source != nil { d.source.Stop() } } func (d *DataConfigComponent) Register(file IConfigFile) { d.register = append(d.register, file) } func (d *DataConfigComponent) GetFiles() []IConfigFile { return d.register } func (d *DataConfigComponent) Get(fileName string) interface{} { return d.configFiles[fileName] } func (d *DataConfigComponent) Load(fileName string, data []byte) { cherryUtils.Try(func() { var v interface{} err := d.parser.Unmarshal(data, &v) if err != nil { cherryLogger.Warn(err) return } defer d.Unlock() d.Lock() d.configFiles[fileName] = &v }, func(errString string) { cherryLogger.Warn(errString) }) }
import "sort" /* * @lc app=leetcode id=47 lang=golang * * [47] Permutations II * * https://leetcode.com/problems/permutations-ii/description/ * * algorithms * Medium (46.58%) * Likes: 2089 * Dislikes: 63 * Total Accepted: 366.7K * Total Submissions: 785.5K * Testcase Example: '[1,1,2]' * * Given a collection of numbers that might contain duplicates, return all * possible unique permutations. * * Example: * * * Input: [1,1,2] * Output: * [ * ⁠ [1,1,2], * ⁠ [1,2,1], * ⁠ [2,1,1] * ] * * */ // @lc code=start func permuteUnique(nums []int) [][]int { return permuteUnique1(nums) } func permuteUnique1(nums []int) [][]int { sort.Ints(nums) output, track, visit := [][]int{}, []int{}, make([]bool, len(nums)) backtrack(&output, nums, track, visit, 0) return output } func backtrack(output *[][]int, nums, track []int, visited []bool, index int) { if len(track) == len(nums) { path := make([]int, len(nums)) copy(path, track) *output = append(*output, path) return } for i := 0; i < len(nums); i++ { if visited[i] { continue } // https://leetcode.com/problems/permutations-ii/discuss/18594/Really-easy-Java-solution-much-easier-than-the-solutions-with-very-high-vote // With inputs as [1a, 1b, 2a], /// If we don't handle the duplicates, the results would be: [1a, 1b, 2a], [1b, 1a, 2a]..., // so we must make sure 1a goes before 1b to avoid duplicates // By using nums[i-1]==nums[i] && !used[i-1], we can make sure that 1b cannot be choosed before 1a if i > 0 && nums[i-1] == nums[i] && !visited[i-1] { continue } track = append(track, nums[i]) visited[i] = true backtrack(output, nums, track, visited, index+1) track = track[:len(track)-1] visited[i] = false } } // @lc code=end
package main import ( _ "embed" "fmt" "math/rand" "strings" "time" ) // Here comes the embedding! //go:embed quotes.txt var quoteData string func main() { rand.Seed(time.Now().Unix()) quotes := strings.Split(quoteData, "\n\n") fmt.Println(quotes[rand.Intn(len(quotes))]) }
package main import ( "bufio" "fmt" "log" "os" "regexp" "strings" "time" ) var bagSearch = map[string]interface{}{"shiny gold": nil} func main() { start := time.Now() fmt.Printf("Result is %v \n", run()) log.Printf("Code took %s", time.Since(start)) } func run() int { var validRules = []string{} f, err := os.Open("input.txt") if err != nil { log.Fatal(err) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { if strings.Contains(scanner.Text(), "contain no other bags") { continue } rule := strings.ReplaceAll(scanner.Text(), "bags", "") rule = strings.ReplaceAll(rule, ".", "") rule = strings.ReplaceAll(rule, "bag", "") rule = strings.ReplaceAll(rule, " , ", ",") validRules = append(validRules, strings.TrimSpace(rule)) } if err := scanner.Err(); err != nil { log.Fatal(err) } return countBags(validRules) } func countBags(validRules []string) int { // fmt.Println(validRules) reg, err := regexp.Compile("[^a-zA-Z] +") if err != nil { log.Fatal(err) } allBags := map[string]string{} search := true for search { newBags := map[string]string{} for _, rule := range validRules { sRule := strings.Split(rule, " contain") for k := range bagSearch { pk := reg.ReplaceAllString(k, "") if strings.Contains(sRule[0], pk) { newBags[strings.Split(sRule[0], " bags")[0]] = sRule[1] } } } search = false for k, v := range newBags { k = strings.TrimSpace(k) if _, ok := allBags[k]; !ok { allBags[k] = v sv := strings.Split(v, ",") for _, b := range sv { bagSearch[strings.TrimSpace(b)] = nil } search = true } } } return calculateBags(allBags) // Removing the shiny gold } func calculateBags(bags map[string]string) int { fmt.Println(bags) startBag := bags["shiny gold"] count := 1 for k, v := range bags { fmt.Println(k, v) } fmt.Println(startBag) return count }
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package wire import ( "encoding/json" "strings" "unicode/utf8" ) // FIBeneficiaryFIAdvice is the financial institution beneficiary financial institution type FIBeneficiaryFIAdvice struct { // tag tag string // Advice Advice Advice `json:"advice,omitempty"` // validator is composed for data validation validator // converters is composed for WIRE to GoLang Converters converters } // NewFIBeneficiaryFIAdvice returns a new FIBeneficiaryFIAdvice func NewFIBeneficiaryFIAdvice() *FIBeneficiaryFIAdvice { fibfia := &FIBeneficiaryFIAdvice{ tag: TagFIBeneficiaryFIAdvice, } return fibfia } // Parse takes the input string and parses the FIBeneficiaryFIAdvice values // // Parse provides no guarantee about all fields being filled in. Callers should make a Validate() call to confirm // successful parsing and data validity. func (fibfia *FIBeneficiaryFIAdvice) Parse(record string) error { if utf8.RuneCountInString(record) < 9 { return NewTagMinLengthErr(9, len(record)) } fibfia.tag = record[:6] fibfia.Advice.AdviceCode = fibfia.parseStringField(record[6:9]) length := 9 value, read, err := fibfia.parseVariableStringField(record[length:], 26) if err != nil { return fieldError("LineOne", err) } fibfia.Advice.LineOne = value length += read value, read, err = fibfia.parseVariableStringField(record[length:], 33) if err != nil { return fieldError("LineTwo", err) } fibfia.Advice.LineTwo = value length += read value, read, err = fibfia.parseVariableStringField(record[length:], 33) if err != nil { return fieldError("LineThree", err) } fibfia.Advice.LineThree = value length += read value, read, err = fibfia.parseVariableStringField(record[length:], 33) if err != nil { return fieldError("LineFour", err) } fibfia.Advice.LineFour = value length += read value, read, err = fibfia.parseVariableStringField(record[length:], 33) if err != nil { return fieldError("LineFive", err) } fibfia.Advice.LineFive = value length += read value, read, err = fibfia.parseVariableStringField(record[length:], 33) if err != nil { return fieldError("LineSix", err) } fibfia.Advice.LineSix = value length += read if err := fibfia.verifyDataWithReadLength(record, length); err != nil { return NewTagMaxLengthErr(err) } return nil } func (fibfia *FIBeneficiaryFIAdvice) UnmarshalJSON(data []byte) error { type Alias FIBeneficiaryFIAdvice aux := struct { *Alias }{ (*Alias)(fibfia), } if err := json.Unmarshal(data, &aux); err != nil { return err } fibfia.tag = TagFIBeneficiaryFIAdvice return nil } // String returns a fixed-width FIBeneficiaryFIAdvice record func (fibfia *FIBeneficiaryFIAdvice) String() string { return fibfia.Format(FormatOptions{ VariableLengthFields: false, }) } // Format returns a FIBeneficiaryFIAdvice record formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) Format(options FormatOptions) string { var buf strings.Builder buf.Grow(200) buf.WriteString(fibfia.tag) buf.WriteString(fibfia.AdviceCodeField()) buf.WriteString(fibfia.FormatLineOne(options)) buf.WriteString(fibfia.FormatLineTwo(options)) buf.WriteString(fibfia.FormatLineThree(options)) buf.WriteString(fibfia.FormatLineFour(options)) buf.WriteString(fibfia.FormatLineFive(options)) buf.WriteString(fibfia.FormatLineSix(options)) if options.VariableLengthFields { return fibfia.stripDelimiters(buf.String()) } else { return buf.String() } } // Validate performs WIRE format rule checks on FIBeneficiaryFIAdvice and returns an error if not Validated // The first error encountered is returned and stops that parsing. func (fibfia *FIBeneficiaryFIAdvice) Validate() error { if fibfia.tag != TagFIBeneficiaryFIAdvice { return fieldError("tag", ErrValidTagForType, fibfia.tag) } if err := fibfia.isAdviceCode(fibfia.Advice.AdviceCode); err != nil { return fieldError("AdviceCode", err, fibfia.Advice.AdviceCode) } if err := fibfia.isAlphanumeric(fibfia.Advice.LineOne); err != nil { return fieldError("LineOne", err, fibfia.Advice.LineOne) } if err := fibfia.isAlphanumeric(fibfia.Advice.LineTwo); err != nil { return fieldError("LineTwo", err, fibfia.Advice.LineTwo) } if err := fibfia.isAlphanumeric(fibfia.Advice.LineThree); err != nil { return fieldError("LineThree", err, fibfia.Advice.LineThree) } if err := fibfia.isAlphanumeric(fibfia.Advice.LineFour); err != nil { return fieldError("LineFour", err, fibfia.Advice.LineFour) } if err := fibfia.isAlphanumeric(fibfia.Advice.LineFive); err != nil { return fieldError("LineFive", err, fibfia.Advice.LineFive) } if err := fibfia.isAlphanumeric(fibfia.Advice.LineSix); err != nil { return fieldError("LineSix", err, fibfia.Advice.LineSix) } return nil } // AdviceCodeField gets a string of the AdviceCode field func (fibfia *FIBeneficiaryFIAdvice) AdviceCodeField() string { return fibfia.alphaField(fibfia.Advice.AdviceCode, 3) } // LineOneField gets a string of the LineOne field func (fibfia *FIBeneficiaryFIAdvice) LineOneField() string { return fibfia.alphaField(fibfia.Advice.LineOne, 26) } // LineTwoField gets a string of the LineTwo field func (fibfia *FIBeneficiaryFIAdvice) LineTwoField() string { return fibfia.alphaField(fibfia.Advice.LineTwo, 33) } // LineThreeField gets a string of the LineThree field func (fibfia *FIBeneficiaryFIAdvice) LineThreeField() string { return fibfia.alphaField(fibfia.Advice.LineThree, 33) } // LineFourField gets a string of the LineFour field func (fibfia *FIBeneficiaryFIAdvice) LineFourField() string { return fibfia.alphaField(fibfia.Advice.LineFour, 33) } // LineFiveField gets a string of the LineFive field func (fibfia *FIBeneficiaryFIAdvice) LineFiveField() string { return fibfia.alphaField(fibfia.Advice.LineFive, 33) } // LineSixField gets a string of the LineSix field func (fibfia *FIBeneficiaryFIAdvice) LineSixField() string { return fibfia.alphaField(fibfia.Advice.LineSix, 33) } // FormatLineOne returns Advice.LineOne formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) FormatLineOne(options FormatOptions) string { return fibfia.formatAlphaField(fibfia.Advice.LineOne, 26, options) } // FormatLineTwo returns Advice.LineTwo formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) FormatLineTwo(options FormatOptions) string { return fibfia.formatAlphaField(fibfia.Advice.LineTwo, 33, options) } // FormatLineThree returns Advice.LineThree formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) FormatLineThree(options FormatOptions) string { return fibfia.formatAlphaField(fibfia.Advice.LineThree, 33, options) } // FormatLineFour returns Advice.LineFour formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) FormatLineFour(options FormatOptions) string { return fibfia.formatAlphaField(fibfia.Advice.LineFour, 33, options) } // FormatLineFive returns Advice.LineFive formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) FormatLineFive(options FormatOptions) string { return fibfia.formatAlphaField(fibfia.Advice.LineFive, 33, options) } // FormatLineSix returns Advice.LineSix formatted according to the FormatOptions func (fibfia *FIBeneficiaryFIAdvice) FormatLineSix(options FormatOptions) string { return fibfia.formatAlphaField(fibfia.Advice.LineSix, 33, options) }
package controllers import ( "fmt" "os" "github.com/itang/yunshang/main/app" "github.com/itang/yunshang/main/app/models/entity" "github.com/revel/revel" ) // 应用主控制器 type App struct { AppController } // 应用主页 func (c App) Index() revel.Result { c.setChannel("index/") return c.Render() } func (c App) AdImagesData() revel.Result { images := c.appApi().FindAdImages() return c.RenderJson(Success("", images)) } func (c App) AdImage(file string) revel.Result { targetFile, err := c.appApi().GetAdImageFile(file) if err != nil { return c.NotFound("No found file " + file) } c.Response.ContentType = "image/jpg" return c.RenderFile(targetFile, "") } func (c App) HotKeywordsData() revel.Result { keywords := c.appApi().FindHotKeywords() return c.RenderJson(Success("", keywords)) } func (c App) NewInquiry(q string) revel.Result { c.setChannel("index/inquiry") user, _ := c.currUser() userDetail, _ := c.userApi().GetUserDetailByUserId(user.Id) return c.Render(q, user, userDetail) } func (c App) DoNewInquiry(i entity.Inquiry) revel.Result { c.Validation.Required(i.Model).Message("请填写询价型号") c.Validation.Required(i.Quantity).Message("请填写询价数量") c.Validation.Required(i.Contact).Message("请填写联系人") c.Validation.Required(i.Phone).Message("请填写联系电话") if ret := c.doValidate(App.NewInquiry); ret != nil { return ret } if c.isLogined() { i.UserId = c.forceSessionUserId() } err := c.appApi().SaveInquiry(i) if err != nil { c.FlashParams() c.Flash.Error("询价出错, 请重试!") return c.Redirect(App.NewInquiry) } c.setChannel("index/inquiry") return c.Render() } func (c App) Version() revel.Result { return c.RenderText(app.Version) } func (c App) ProcessInfo() revel.Result { d := struct { Pid int Ppid int }{os.Getpid(), os.Getppid()} return c.RenderJson(Success("进程信息", d)) } func (c App) ProcessInfoLine() revel.Result { t := fmt.Sprintf("%d %d", os.Getpid(), os.Getppid()) return c.RenderText(t) } func (c App) Weixin() revel.Result { return c.Render() } func (c App) NewFeedback(subject string) revel.Result { c.setChannel("index/feeback") user, _ := c.currUser() userDetail, _ := c.userApi().GetUserDetailByUserId(user.Id) return c.Render(subject, user, userDetail) } func (c App) DoNewFeedback(i entity.Feedback) revel.Result { c.Validation.Required(i.Content).Message("请填写内容") c.Validation.Required(i.Phone).Message("请填写联系电话") if ret := c.doValidate(App.NewFeedback); ret != nil { return ret } err := c.appApi().SaveFeedback(i) if err != nil { c.setRollbackOnly() c.FlashParams() c.Flash.Error("填写信息反馈出错, 请重试!") return c.Redirect(App.NewFeedback) } c.Flash.Success("提交信息反馈完成!") return c.Redirect(App.NewFeedback) }
package main import "fmt" //BynarySearch is struct base for implements methods to bynary search //algoritm. type BynarySearch struct{} //bynarySearch is responsable by search element in array. func bynarySearch(arr []int, elem, start, end int) int { index := (start + end) / 2 if index < len(arr) && arr[index] == elem { return index } if start != end { if arr[index] < elem { return bynarySearch(arr, elem, index+1, end) } return bynarySearch(arr, elem, start, index-1) } return -1 } //Search is return of index the element. func (b *BynarySearch) Search(arr []int, number int) int { return bynarySearch(arr, number, 0, len(arr)) } func main() { arr := []int{1, 2, 5, 6, 7, 9, 12, 14} b := BynarySearch{} index := b.Search(arr, 12) fmt.Println(index) }
package entities import "time" //User connects to login credentials to a user object type User struct { *Entity `bson:",inline"` // main *unique* reference id, its additional to ID because it might be a list of reference ids in the future so a user // can merge multiple accounts (refids) into the same user object RefID string `json:"refid,omitempty"` // name and email should never be displayed in a game scenario Name string `json:"name"` Email string `json:"email"` // nickname can be used to display a player/user in the case where character name is not applicable Nickname string `json:"nickname"` Created time.Time `bson:"created" json:"created,omitempty"` LastSeen time.Time `bson:"lastSeen" json:"lastSeen,omitempty"` Picture string `json:"picture"` // every time the user logs in the last character is automatically loaded. When switched we track the last character in the user object while the // game server will switch completely to the new character LastCharacter string `bson:"lastCharacter" json:"lastCharacter"` // is set to false after the first PUT request IsNewUser bool `bson:"isNewUser" json:"isNewUser"` IsOnline bool `bson:"isOnline" json:"isOnline"` } // NewUser creates a new user func NewUser() *User { return &User{} }
package controllers import ( "encoding/json" "fmt" "github.com/syndtr/goleveldb/leveldb" "net/http" "strconv" "time" ) type BlockData struct { Sender string Receiver string Data string TimeStamp int64 } type StroeData struct { Sender string Receiver string Data string TimeStamp int64 Transaction string } func (app *Application) RequestHandler(w http.ResponseWriter, r *http.Request) { cookie, _ := r.Cookie("username") if cookie == nil { http.Redirect(w,r,"/login",http.StatusFound) return } data := &struct { TransactionId string Success bool Response bool Username string }{ TransactionId: "", Success: false, Response: false, Username: cookie.Value, } if r.FormValue("submitted") == "true" { helloValue := r.FormValue("hello") receiver := r.FormValue("receive") data1 := r.FormValue("data") if receiver==""||data1=="" { fmt.Println("get content error") } var blockdata BlockData var storetodb StroeData blockdata.Sender = helloValue blockdata.Receiver = receiver blockdata.Data = data1 blockdata.TimeStamp = time.Now().Unix() d, err := json.Marshal(&blockdata) if err != nil { fmt.Println("change to json err") } else { fmt.Println(string(d)) } storedata := string(d) txid, err := app.Fabric.InvokeHello(storedata) if err != nil { http.Error(w, "Unable to invoke hello in the blockchain", 500) } storetodb.Sender = helloValue storetodb.Receiver = receiver storetodb.Data = data1 storetodb.TimeStamp = blockdata.TimeStamp storetodb.Transaction = txid e, err := json.Marshal(&storetodb) if err != nil { fmt.Println("change to json err2") } else { fmt.Println(string(e)) } storetodbdata := string(e) db, err := leveldb.OpenFile("db",nil) defer db.Close() if err != nil { fmt.Println("open db err",err) } err = db.Put([]byte(strconv.FormatFloat(float64(1)/float64(storetodb.TimeStamp),'E',-1,64)),[]byte(storetodbdata),nil) if err != nil { fmt.Println("Put data err",err) } data.TransactionId = txid data.Success = true data.Response = true } renderTemplate(w, r, "request.html", data) }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package policy import ( "context" "time" "chromiumos/tast/common/fixture" "chromiumos/tast/common/pci" "chromiumos/tast/common/policy" "chromiumos/tast/common/policy/fakedms" "chromiumos/tast/ctxutil" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/chrome/browser/browserfixt" "chromiumos/tast/local/chrome/uiauto/faillog" "chromiumos/tast/local/policyutil" "chromiumos/tast/local/policyutil/safesearch" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: ForceYouTubeSafetyMode, LacrosStatus: testing.LacrosVariantExists, Desc: "Test the behavior of deprecated ForceYouTubeSafetyMode policy: check if YouTube safe search is enabled based on the value of the policy", Contacts: []string{ "cmfcmf@google.com", // Test author }, SoftwareDeps: []string{"chrome"}, Attr: []string{"group:mainline", "informational"}, Params: []testing.Param{{ Fixture: fixture.ChromePolicyLoggedIn, Val: browser.TypeAsh, }, { Name: "lacros", ExtraSoftwareDeps: []string{"lacros"}, Fixture: fixture.LacrosPolicyLoggedIn, Val: browser.TypeLacros, }}, // Loading two YouTube videos on slower devices can take a while (we observed subtests that took up to 40 seconds), thus give every subtest 1 minute to run. Timeout: 7 * time.Minute, SearchFlags: []*testing.StringPair{ pci.SearchFlag(&policy.ForceYouTubeSafetyMode{}, pci.VerifiedFunctionalityJS), pci.SearchFlag(&policy.ForceYouTubeRestrict{}, pci.VerifiedFunctionalityJS), }, }) } func ForceYouTubeSafetyMode(ctx context.Context, s *testing.State) { cr := s.FixtValue().(chrome.HasChrome).Chrome() fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS() // Reserve ten seconds for cleanup. cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second) defer cancel() for _, param := range []struct { name string strongContentRestricted bool mildContentRestricted bool value []policy.Policy }{ { name: "enabled", strongContentRestricted: true, mildContentRestricted: false, value: []policy.Policy{&policy.ForceYouTubeSafetyMode{Val: true}}, }, { name: "enabled_overwritten_by_ForceYouTubeRestrict_disabled", strongContentRestricted: false, mildContentRestricted: false, value: []policy.Policy{ &policy.ForceYouTubeSafetyMode{Val: true}, &policy.ForceYouTubeRestrict{Val: safesearch.ForceYouTubeRestrictDisabled}}, }, { name: "enabled_overwritten_by_ForceYouTubeRestrict_strict", strongContentRestricted: true, mildContentRestricted: true, value: []policy.Policy{ &policy.ForceYouTubeSafetyMode{Val: true}, &policy.ForceYouTubeRestrict{Val: safesearch.ForceYouTubeRestrictStrict}}, }, { name: "disabled", strongContentRestricted: false, mildContentRestricted: false, value: []policy.Policy{&policy.ForceYouTubeSafetyMode{Val: false}}, }, { name: "disabled_overwritten_by_ForceYouTubeRestrict_moderate", strongContentRestricted: true, mildContentRestricted: false, value: []policy.Policy{ &policy.ForceYouTubeSafetyMode{Val: false}, &policy.ForceYouTubeRestrict{Val: safesearch.ForceYouTubeRestrictModerate}}, }, { name: "disabled_overwritten_by_ForceYouTubeRestrict_strict", strongContentRestricted: true, mildContentRestricted: true, value: []policy.Policy{ &policy.ForceYouTubeSafetyMode{Val: false}, &policy.ForceYouTubeRestrict{Val: safesearch.ForceYouTubeRestrictStrict}}, }, { name: "unset", strongContentRestricted: false, mildContentRestricted: false, value: []policy.Policy{&policy.ForceYouTubeSafetyMode{Stat: policy.StatusUnset}}, }, } { s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) { if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil { s.Fatal("Failed to clean up: ", err) } if err := policyutil.ServeAndVerify(ctx, fdms, cr, param.value); err != nil { s.Fatal("Failed to update policies: ", err) } br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type)) if err != nil { s.Fatal("Failed to setup chrome: ", err) } defer closeBrowser(cleanupCtx) defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name) if err := safesearch.TestYouTubeRestrictedMode(ctx, br, param.strongContentRestricted, param.mildContentRestricted); err != nil { s.Error("Failed to verify YouTube content restriction: ", err) } }) } }
package main import ( "os" "testing" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) var testAccProviders map[string]terraform.ResourceProvider var testAccProvider *schema.Provider func init() { testAccProvider = Provider() testAccProviders = map[string]terraform.ResourceProvider{ "windowsdns": testAccProvider, } } func TestProvider(t *testing.T) { if err := Provider().InternalValidate(); err != nil { t.Fatalf("Error: %s", err) } } func TestProvider_impl(t *testing.T) { var _ terraform.ResourceProvider = Provider() } func testAccPreCheck(t *testing.T) { if v := os.Getenv("WINRM_USERNAME"); v == "" { t.Fatal("WINRM_USERNAME must be set for tests") } if v := os.Getenv("WINRM_PASSWORD"); v == "" { t.Fatal("WINRM_PASSWORD must be set for tests") } if v := os.Getenv("WINRM_SERVER"); v == "" { t.Fatal("WINRM_SERVER must be set for tests") } if v := os.Getenv("WINRM_DOMAIN"); v == "" { t.Fatal("WINRM_DOMAIN must be set for tests") } } func testPorts(t *testing.T) { tables := []struct { p int https bool e int }{ {1234, false, 1234}, {1234, true, 1234}, {0, false, 5985}, {0, true, 5986}, } for _, table := range tables { derivedPort := derivePort(table.p, table.https) if (derivedPort != table.e) { t.Errorf("Port test for port %d with HTTPS %t returned %d instead of %d", table.p, table.https, derivedPort, table.e) } } }
package httpbakery import ( "crypto/rand" "encoding/base64" "encoding/json" "fmt" "log" "net/http" "path" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v0/bakery" "gopkg.in/macaroon-bakery.v0/bakery/checkers" ) type dischargeHandler struct { svc *bakery.Service checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) } // AddDischargeHandler adds handlers to the given // ServeMux to serve third party caveat discharges // using the given service. // // The handlers are added under the given rootPath, // which must be non-empty. // // The check function is used to check whether a client making the given // request should be allowed a discharge for the given caveat. If it // does not return an error, the caveat will be discharged, with any // returned caveats also added to the discharge macaroon. // If it returns an error with a *Error cause, the error will be marshaled // and sent back to the client. // // The name space served by DischargeHandler is as follows. // All parameters can be provided either as URL attributes // or form attributes. The result is always formatted as a JSON // object. // // On failure, all endpoints return an error described by // the Error type. // // POST /discharge // params: // id: id of macaroon to discharge // location: location of original macaroon (optional (?)) // ?? flow=redirect|newwindow // result on success (http.StatusOK): // { // Macaroon *macaroon.Macaroon // } // // POST /create // params: // condition: caveat condition to discharge // rootkey: root key of discharge caveat // result: // { // CaveatID: string // } // // GET /publickey // result: // public key of service // expiry time of key func AddDischargeHandler(mux *http.ServeMux, rootPath string, svc *bakery.Service, checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) { d := &dischargeHandler{ svc: svc, checker: checker, } mux.Handle(path.Join(rootPath, "discharge"), handleJSON(d.serveDischarge)) mux.Handle(path.Join(rootPath, "create"), handleJSON(d.serveCreate)) // TODO(rog) is there a case for making public key caveat signing // optional? mux.Handle(path.Join(rootPath, "publickey"), handleJSON(d.servePublicKey)) } type dischargeResponse struct { Macaroon *macaroon.Macaroon `json:",omitempty"` } func (d *dischargeHandler) serveDischarge(h http.Header, req *http.Request) (interface{}, error) { r, err := d.serveDischarge1(h, req) if err != nil { log.Printf("serveDischarge -> error %#v", err) } else { log.Printf("serveDischarge -> %#v", r) } return r, err } func (d *dischargeHandler) serveDischarge1(h http.Header, req *http.Request) (interface{}, error) { log.Printf("dischargeHandler.serveDischarge {") defer log.Printf("}") if req.Method != "POST" { // TODO http.StatusMethodNotAllowed) return nil, badRequestErrorf("method not allowed") } req.ParseForm() id := req.Form.Get("id") if id == "" { return nil, badRequestErrorf("id attribute is empty") } checker := func(cavId, cav string) ([]checkers.Caveat, error) { return d.checker(req, cavId, cav) } // TODO(rog) pass location into discharge // location := req.Form.Get("location") var resp dischargeResponse m, err := d.svc.Discharge(bakery.ThirdPartyCheckerFunc(checker), id) if err != nil { return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any) } resp.Macaroon = m return &resp, nil } type thirdPartyCaveatIdRecord struct { RootKey []byte Condition string } type caveatIdResponse struct { CaveatId string Error string } func (d *dischargeHandler) serveCreate(h http.Header, req *http.Request) (interface{}, error) { req.ParseForm() condition := req.Form.Get("condition") rootKeyStr := req.Form.Get("root-key") if len(condition) == 0 { return nil, badRequestErrorf("empty value for condition") } if len(rootKeyStr) == 0 { return nil, badRequestErrorf("empty value for root key") } rootKey, err := base64.StdEncoding.DecodeString(rootKeyStr) if err != nil { return nil, badRequestErrorf("cannot base64-decode root key: %v", err) } // TODO(rog) what about expiry times? idBytes, err := randomBytes(24) if err != nil { return nil, fmt.Errorf("cannot generate random key: %v", err) } id := fmt.Sprintf("%x", idBytes) recordBytes, err := json.Marshal(thirdPartyCaveatIdRecord{ Condition: condition, RootKey: rootKey, }) if err != nil { return nil, fmt.Errorf("cannot marshal caveat id record: %v", err) } err = d.svc.Store().Put(id, string(recordBytes)) if err != nil { return nil, fmt.Errorf("cannot store caveat id record: %v", err) } return caveatIdResponse{ CaveatId: id, }, nil } type publicKeyResponse struct { PublicKey *bakery.PublicKey } func (d *dischargeHandler) servePublicKey(h http.Header, r *http.Request) (interface{}, error) { return publicKeyResponse{d.svc.PublicKey()}, nil } func randomBytes(n int) ([]byte, error) { b := make([]byte, n) _, err := rand.Read(b) if err != nil { return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) } return b, nil }
package def import ( "errors" "testing" ) func TestNormalizedKVError(t *testing.T) { kvErr := errors.New("Key not found") err := NormalizedKVError(kvErr) if err != nil { t.Log(err) } p2pErr := errors.New("invalid stream") err = NormalizedKVError(p2pErr) if err != nil { t.Log(err) } }
/** * @Author: lzw5399 * @Date: 2021/1/15 23:35 * @Desc: */ package service import ( "errors" "workflow/global" . "workflow/model" "workflow/model/request" "workflow/util" "gorm.io/gorm" ) // 创建新的process流程 func CreateProcess(r *request.ProcessRequest, originXml string) error { // 检查流程是否已存在 var c int64 global.BankDb.Model(&Process{}).Where("code=?", r.ID).Count(&c) if c != 0 { return errors.New("当前流程标识已经在,请检查后重试") } // 校验 if err := validate(r); err != nil { return err } // 开始事务 err := global.BankDb.Transaction(func(tx *gorm.DB) error { process := r.Process(originXml) if err := tx.Create(&process).Error; err != nil { return err } for _, event := range r.Events(process.Id) { if err := tx.Create(&event).Error; err != nil { return err } } for _, gateway := range r.ExclusiveGateways(process.Id) { if err := tx.Create(&gateway).Error; err != nil { return err } } for _, flow := range r.SequenceFlows(process.Id) { if err := tx.Create(&flow).Error; err != nil { return err } } for _, task := range r.Tasks(process.Id) { if err := tx.Create(&task).Error; err != nil { return err } } // 返回nil提交事务 return nil }) return err } // 校验 func validate(r *request.ProcessRequest) error { if r.StartEvent == nil || len(r.StartEvent) == 0 { return errors.New(util.PropertyNotFound("StartEvent")) } if r.EndEvent == nil || len(r.EndEvent) == 0 { return errors.New(util.PropertyNotFound("EndEvent")) } return nil }
package main import "testing" func TestNewParser(t *testing.T) { p, err := NewParser(`test/MaxL.asm`) if err != nil { t.Fatal(err) } for p.HasMoreCommands() { t.Log(`=====`) t.Log(`CurrentLine: `, p.CurrentLine) t.Log(`CommandType: `, p.CommandType()) t.Log(`Symbol: `, p.Symbol()) t.Log(`Dest: `, p.Dest()) t.Log(`Comp: `, p.Comp()) t.Log(`Jump: `, p.Jump()) } }
package ravendb import "strings" var _ queryToken = &fieldsToFetchToken{} type fieldsToFetchToken struct { fieldsToFetch []string projections []string customFunction bool sourceAlias string } func newFieldsToFetchToken(fieldsToFetch []string, projections []string, customFunction bool, sourceAlias string) *fieldsToFetchToken { return &fieldsToFetchToken{ fieldsToFetch: fieldsToFetch, projections: projections, customFunction: customFunction, sourceAlias: sourceAlias, } } func createFieldsToFetchToken(fieldsToFetch []string, projections []string, customFunction bool, sourceAlias string) *fieldsToFetchToken { if len(fieldsToFetch) == 0 { panicIf(true, "fieldToFetch cannot be null") //return newIllegalArgumentError("fieldToFetch cannot be null"); } if !customFunction && len(projections) != len(fieldsToFetch) { panicIf(true, "Length of projections must be the same as length of field to fetch") // return newIllegalArgumentError("Length of projections must be the same as length of field to fetch"); } return newFieldsToFetchToken(fieldsToFetch, projections, customFunction, sourceAlias) } func (t *fieldsToFetchToken) writeTo(writer *strings.Builder) error { for i, fieldToFetch := range t.fieldsToFetch { if i > 0 { writer.WriteString(", ") } if fieldToFetch == "" { writeQueryTokenField(writer, "null") } else { writeQueryTokenField(writer, fieldToFetch) } if t.customFunction { continue } // Note: Java code has seemingly unnecessary checks (conditions that would // be rejected in createFieldsToFetchToken) projection := t.projections[i] if projection == "" || projection == fieldToFetch { continue } writer.WriteString(" as ") writer.WriteString(projection) } return nil }
type Command interface { Execute() ([]byte, error) ValidateInput() bool } type CommandExecutor struct { } func (c CommandExecutor) Execute(command Command) { if command.ValidateInput() { command.Execute() } } type FooCommand struct { args []string // need args } func (c FooCommand) ValidateInput() { // validate args if len(args) >= 1 && len(args[0]) > 0 { return true } return false } func (c FooCommand) Execute() ([]byte, error) { ... } type BarCommand struct { } func (c BarCommand) ValidateInput() { // does nothing return false } func (c BarCommand) Execute() ([]byte, error) { ... }
package keycode const ( M = "M" Space = "SPACE" Enter = "ENTER" LeftControl = "L_CTRL" RightControl = "R_CTRL" LeftAlt = "L_ALT" RightAlt = "R_ALT" One = "1" Two = "2" Three = "3" Four = "4" Five = "5" Q = "Q" W = "W" E = "E" R = "R" A = "A" S = "S" D = "D" F = "F" Z = "Z" X = "X" C = "C" V = "V" ) type KeyCode string
package model import ( "errors" "lhc.go.game.center/libs/mysql" "time" ) type CrawlerMenu struct { Id int `json:"id" form:"id"` Name string `json:"name" form:"name"` Mark string `json:"mark" form:"mark"` CrawlerUrl string `json:"crawler_url" form:"crawler_url"` CronUrl string `json:"cron_url" form:"cron_url"` Status int `json:"status" form:"status"` CronTime string `json:"cron_time" form:"cron_time"` CronFomatTime string `json:"cron_fomat_time"` CreateTime int64 `json:"create_time" ` UpdateTime int64 `json:"update_time" ` } func NewCrawlerMenu() *CrawlerMenu { return &CrawlerMenu{} } func (this *CrawlerMenu) GetList()(data []*CrawlerMenu,total int,err error) { Db :=mysql.MysqlConnet.Model(&this) if this.Status!=0 { Db = Db.Where("status = ?",this.Status) } if this.Name !="" { Db = Db.Where("title like ?","%"+this.Name+"%") } if err:= Db.Find(&data).Error;err!=nil{ return nil,0,err } Db.Count(&total) return } func (this *CrawlerMenu) GetOneCrawlerMenu() error { if this.Id ==0 { return errors.New("id 不能为空") } if err:= mysql.MysqlConnet.Model(&this).First(&this).Error;err!=nil{ return err } return nil } func (m *CrawlerMenu) UpdateData () error { if m.Name=="" { return errors.New("名称不能为空") } m.UpdateTime=time.Now().Unix() if m.Id !=0 { if err:=mysql.MysqlConnet.Model(&m).Where("id = ?",m.Id).Update(&m).Error;err!=nil{ return err } }else { m.CreateTime=time.Now().Unix() if err:=mysql.MysqlConnet.Model(&m).Create(&m).Error;err!=nil{ return err } } return nil } func (this *CrawlerMenu) Update () error { if this.Id ==0 { return errors.New("id 不能为空") } this.UpdateTime=time.Now().Unix() if err:=mysql.MysqlConnet.Model(&this).Where("id = ?",this.Id).Update(&this).Error;err!=nil{ return err } return nil }
package main func bin_exprs() { var x int var y int var r int var b bool var c bool r = x + 1 r = x - 1 r = x * 1 r = x / 1 r = x % 1 r = x & 1 r = x | 1 r = x ^ 1 r = x &^ 1 r = x << 1 r = x >> 1 c = b && b c = b || b c = x < y c = x <= y c = x == y c = x != y c = x >= y c = x > y r = x + y }
package postgres import ( "bytes" "fmt" "strings" "github.com/Sirupsen/logrus" _ "github.com/jackc/pgx/stdlib" "github.com/jmoiron/sqlx" "github.com/segment-sources/sqlsource/domain" "github.com/segment-sources/sqlsource/driver" ) const chunkSize = 1000000 type tableDescriptionRow struct { Catalog string `db:"table_catalog"` SchemaName string `db:"table_schema"` TableName string `db:"table_name"` ColumnName string `db:"column_name"` IsPrimary bool `db:"is_primary_key"` } type Postgres struct { Connection *sqlx.DB } func (p *Postgres) Init(c *domain.Config) error { var extraOptions bytes.Buffer if len(c.ExtraOptions) > 0 { extraOptions.WriteRune('?') extraOptions.WriteString(strings.Join(c.ExtraOptions, "&")) } connectionString := fmt.Sprintf( "postgres://%s:%s@%s:%s/%s%s", c.Username, c.Password, c.Hostname, c.Port, c.Database, extraOptions.String(), ) db, err := sqlx.Connect("pgx", connectionString) if err != nil { return err } p.Connection = db return nil } func (p *Postgres) Scan(t *domain.Table, lastPkValues []interface{}) (driver.SqlRows, error) { // in most cases whereClause will simply look like "id" > 114, but since the source supports compound PKs // we must be able to include all PK columns in the query. For example, for a table with 3-column PK: // a | b | c // ---+---+--- // 1 | 1 | 1 // 1 | 1 | 2 // 1 | 2 | 1 // 1 | 2 | 2 // 2 | 1 | 1 // // whereClause selecting records after (1, 1, 1) should look like: // a > 1 OR a = 1 AND b > 1 OR a = 1 AND b = 1 AND c > 1 whereClause := "true" if len(lastPkValues) > 0 { // {"a > 1", "a = 1 AND b > 1", "a = 1 AND b = 1 AND c > 1"} whereOrList := []string{} for i, pk := range t.PrimaryKeys { // {"a = 1", "b = 1", "c > 1"} choiceAndList := []string{} for j := 0; j < i; j++ { choiceAndList = append(choiceAndList, fmt.Sprintf(`"%s" = $%d`, t.PrimaryKeys[j], j+1)) } choiceAndList = append(choiceAndList, fmt.Sprintf(`"%s" > $%d`, pk, i+1)) whereOrList = append(whereOrList, strings.Join(choiceAndList, " AND ")) } whereClause = strings.Join(whereOrList, " OR ") } orderByList := make([]string, 0, len(t.PrimaryKeys)) for _, column := range t.PrimaryKeys { orderByList = append(orderByList, fmt.Sprintf(`"%s"`, column)) } orderByClause := strings.Join(orderByList, ", ") query := fmt.Sprintf("SELECT %s FROM %q.%q WHERE %s ORDER BY %s LIMIT %d", t.ColumnToSQL(), t.SchemaName, t.TableName, whereClause, orderByClause, chunkSize) logger := logrus.WithFields(logrus.Fields{ "query": query, "args": lastPkValues, }) logger.Debugf("Executing query") return p.Connection.Queryx(query, lastPkValues...) } func (p *Postgres) Transform(row map[string]interface{}) map[string]interface{} { return row } func (p *Postgres) Describe() (*domain.Description, error) { describeQuery := ` with o_1 as (SELECT _s.nspname AS table_schema, _t.relname AS table_name, c.conkey AS column_positions FROM pg_catalog.pg_constraint c LEFT JOIN pg_catalog.pg_class _t ON c.conrelid = _t.oid LEFT JOIN pg_catalog.pg_class referenced_table ON c.confrelid = referenced_table.oid LEFT JOIN pg_catalog.pg_namespace _s ON _t.relnamespace = _s.oid LEFT JOIN pg_catalog.pg_namespace referenced_schema ON referenced_table.relnamespace = referenced_schema.oid WHERE c.contype = 'p') select c.table_catalog, c.table_schema, c.table_name, c.column_name, CASE WHEN c.ordinal_position = ANY(o_1.column_positions) THEN true ELSE false END as "is_primary_key" FROM o_1 INNER JOIN information_schema.columns c ON o_1.table_schema = c.table_schema AND o_1.table_name = c.table_name; ` res := domain.NewDescription() rows, err := p.Connection.Queryx(describeQuery) if err != nil { return nil, err } defer rows.Close() for rows.Next() { row := &tableDescriptionRow{} if err := rows.StructScan(row); err != nil { return nil, err } res.AddColumn(&domain.Column{Name: row.ColumnName, Schema: row.SchemaName, Table: row.TableName, IsPrimaryKey: row.IsPrimary}) } if err := rows.Err(); err != nil { return nil, err } return res, nil }
package reverse // Reverse reverses a string func Reverse(s string) string { res := []rune(s) for i, j := 0, len(res)-1; i < j; i, j = i+1, j-1 { res[i], res[j] = res[j], res[i] } return string(res) }