text
stringlengths
11
4.05M
package gate import "github.com/gin-gonic/gin" // AddGroup : Add route group func (gate *Gate) AddGroup(relativePath string, handlers ...gin.HandlerFunc) { gate.groups[relativePath] = gate.engine.Group(relativePath, handlers...) } // GET add get handler func (gate *Gate) GET(relativePath string, handler interface{}, group ...string) { gate.routerGroup(group...).GET(relativePath, wrap(handler)) } // POST add post handler func (gate *Gate) POST(relativePath string, handler interface{}, group ...string) { gate.routerGroup(group...).POST(relativePath, wrap(handler)) } // routerGroup get route group func (gate *Gate) routerGroup(group ...string) *gin.RouterGroup { switch len(group) { case 1: return gate.groups[group[0]] default: return &gate.engine.RouterGroup } }
// Package parse has functions that turn string-representations of geometry // package primitives into their respective primitives following the same // pattern set by each primitive's string-representation. package parse import ( "bufio" "errors" "io" "strconv" "strings" "github.com/jwowillo/viztransform/geometry" "github.com/jwowillo/viztransform/transform" ) var ( // ErrBadTransformation is returned when a transform.Transformation's // string is bad. ErrBadTransformation = errors.New("bad geometry.Transformation-string") // ErrBadLine is returned when a geometry.Line's string is bad. ErrBadLine = errors.New("bad geometry.Line-string") // ErrBadPoint is returned when a geometry.Point's string is bad. ErrBadPoint = errors.New("bad geometry.Point-string") // ErrBadVector is returned when a geometry.Vector's string is bad. ErrBadVector = errors.New("bad geometry.Vector-string") // ErrBadNumber is returned when a geometry.Number's string is bad. ErrBadNumber = errors.New("bad geometry.Number-string") // ErrBadAngle is returned when a geometry.Angle's string is bad. ErrBadAngle = errors.New("bad geometry.Angle-string") ) // Transformation parses a transform.Transformation from the io.Reader r. // // A transform.Transformation's string is a newline separated string-list where // string is a string-representation of a called // transform.Transformation-constructor. Each string is turned into its // respective transform.Transformations and then composed together. // // Returns an error if any string can't be parsed depending on the reason. // Returns ErrBadTransformation if the constructor name isn't recognized, the // calling syntax is bad, or the wrong number of arguments are passed to the // constructor. Returns a corresponding ErrBad error if an argument can't be // parsed to its geometry package primitive. All of these errors stem from parts // of the string not fitting corresponding string-representation patterns. func Transformation(r io.Reader) (transform.Transformation, error) { var t transform.Transformation scanner := bufio.NewScanner(r) for scanner.Scan() { name, args, err := split(scanner.Text()) if err != nil { return nil, err } var nt transform.Transformation switch name { case "NoTransformation": nt, err = noTransformation(args) case "LineReflection": nt, err = lineReflection(args) case "Translation": nt, err = translation(args) case "Rotation": nt, err = rotation(args) case "GlideReflection": nt, err = glideReflection(args) } if nt == nil { return nil, ErrBadTransformation } if err != nil { return nil, err } t = transform.Compose(t, nt) } if scanner.Err() != nil { return nil, scanner.Err() } return t, nil } // split x which is a transform.Transformation's string-representation into the // constructor name name and arguments list. // // Returns ErrBadTransformation if x isn't formatted properly. func split(x string) (string, []string, error) { i := strings.Index(x, "(") if i == -1 || x[len(x)-1] != ')' { return "", nil, ErrBadTransformation } args := strings.Split(x[i+1:len(x)-1], ", ") if len(args) == 1 && args[0] == "" { args = nil } return x[:i], args, nil } // noTransformation parses a transform.Transformation with // transform.TypeNoTransformation from constructor arguments xs. // // Returns ErrBadTransformation if any arguments are passed. func noTransformation(xs []string) (transform.Transformation, error) { if len(xs) != 0 { return nil, ErrBadTransformation } return transform.NoTransformation(), nil } // lineReflection parses a transform.Transformation with // transform.TypeLineReflection from constructor arguments xs. // // Returns ErrBadTransformation if there isn't exactly 1 argument passed. // Returns ErrBadLine if that argument can't be parsed to a geometry.Line. func lineReflection(xs []string) (transform.Transformation, error) { if len(xs) != 1 { return nil, ErrBadTransformation } l, err := Line(xs[0]) if err != nil { return nil, err } return transform.LineReflection(l), nil } // translation parses a transform.Transformation with transform.TypeTranslation // from constructor arguments xs. // // Returns ErrBadTransformation if there isn't exactly 1 argument passed. // Returns ErrBadVector if that argument can't be parsed to a geometry.Vector. func translation(xs []string) (transform.Transformation, error) { if len(xs) != 1 { return nil, ErrBadTransformation } v, err := Vector(xs[0]) if err != nil { return nil, err } return transform.Translation(v), nil } // rotation parses a transform.Transformation with transform.TypeRotation from // constructor arguments xs. // // Returns ErrBadTransformation if there aren't exactly 2 arguments passed. // Returns ErrBadPoint if the first argument can't be parsed to a // geometry.Point. Returns ErrBadAngle if the second argument can't be parsed // to a geometry.Angle. func rotation(xs []string) (transform.Transformation, error) { if len(xs) != 2 { return nil, ErrBadTransformation } p, err := Point(xs[0]) if err != nil { return nil, err } rads, err := Angle(xs[1]) if err != nil { return nil, err } return transform.Rotation(p, rads), nil } // glideReflectin parses a transform.Transformation with // transform.TypeGlideReflection from constructor arguments xs. // // Returns ErrBadTransformation if there aren't exactly 2 arguments passed. // Returns ErrBadLine if the first argument can't be parsed to a geometry.Line. // Returns ErrBadVector if the second argument can't be parsed to a // geometry.Vector. func glideReflection(xs []string) (transform.Transformation, error) { if len(xs) != 2 { return nil, ErrBadTransformation } l, err := Line(xs[0]) if err != nil { return nil, err } v, err := Vector(xs[1]) if err != nil { return nil, err } return transform.GlideReflection(l, v), nil } // Line parses a geometry.Line from the string x. // // Returns ErrBadLine if the string doesn't fit the geometry.Line // string-representation pattern. func Line(x string) (geometry.Line, error) { if x[0] != '{' || x[len(x)-1] != '}' { return geometry.Line{}, ErrBadLine } x = x[1 : len(x)-1] i := strings.Index(x, ")") if i == -1 { return geometry.Line{}, ErrBadLine } if x[i+1] != ' ' { return geometry.Line{}, ErrBadLine } sa, sb := x[:i+1], x[i+2:] a, err := Point(sa) if err != nil { return geometry.Line{}, ErrBadLine } b, err := Point(sb) if err != nil { return geometry.Line{}, ErrBadLine } return geometry.NewLineFromPoints(a, b) } // Vector parses a geometry.Vector from the string x. // // Returns ErrBadVector if the string doesn't fit the geometry.Vector // string-representation pattern. func Vector(sx string) (geometry.Vector, error) { if sx[0] != '<' || sx[len(sx)-1] != '>' { return geometry.Vector{}, ErrBadVector } fs := strings.Split(sx[1:len(sx)-1], " ") if len(fs) != 2 { return geometry.Vector{}, ErrBadVector } i, err := Number(fs[0]) if err != nil { return geometry.Vector{}, ErrBadVector } j, err := Number(fs[1]) if err != nil { return geometry.Vector{}, ErrBadVector } return geometry.Vector{I: i, J: j}, nil } // Point parses a geometry.Point from the string x. // // Returns ErrBadPoint if the string doesn't fit the geometry.Point // string-representation pattern. func Point(x string) (geometry.Point, error) { if x[0] != '(' || x[len(x)-1] != ')' { return geometry.Point{}, ErrBadPoint } fs := strings.Split(x[1:len(x)-1], " ") if len(fs) != 2 { return geometry.Point{}, ErrBadPoint } nx, err := Number(fs[0]) if err != nil { return geometry.Point{}, ErrBadPoint } ny, err := Number(fs[1]) if err != nil { return geometry.Point{}, ErrBadPoint } return geometry.Point{X: nx, Y: ny}, nil } // Number parses a geometry.Number from the string x. // // Returns ErrBadNumber if the string doesn't fit the geometry.Number // string-representation pattern. func Number(x string) (geometry.Number, error) { n, err := strconv.ParseFloat(x, 64) if err != nil { return 0, ErrBadNumber } return geometry.Number(n), nil } // Angle parses a geometry.Angle from the string x. // // Returns ErrBadAngle if the string doesn't fit the geometry.Angle // string-representation pattern. func Angle(x string) (geometry.Angle, error) { a, err := Number(x) if err != nil { return 0, ErrBadAngle } return geometry.Angle(a), nil }
package storage import ( "strconv" "strings" "time" "grm-service/dbcentral/pg" "grm-service/log" "github.com/emicklei/go-restful" "grm-service/common" "grm-service/geoserver" . "grm-service/util" . "storage-manager/types" ) var ( volUnit = map[string]string{"K": "KB", "M": "MB", "G": "GB", "T": "TB"} ) // POST http://localhost:8080/devices func (s StorageSvc) deviceRegistry(req *restful.Request, res *restful.Response) { reqInfo := registryDeviceRequest{} err := req.ReadEntity(&reqInfo) if err != nil { ResWriteError(res, err) return } if len(reqInfo.Volume) > 0 { volume := strings.Replace(reqInfo.Volume, " ", "", -1) volume = strings.ToUpper(volume) if _, ok := volUnit[volume[len(volume)-1:]]; ok { reqInfo.Volume = volume + "B" } _, err := strconv.Atoi(volume[:len(volume)-2]) if err != nil { ResWriteError(res, ErrDeviceVolume) return } } // 检查信息完整性 var geoStorage string switch reqInfo.StorageType { case common.DBType: { if len(reqInfo.IpAddress) == 0 || len(reqInfo.DBUser) == 0 || len(reqInfo.DBPort) == 0 || len(reqInfo.DBPwd) == 0 { ResWriteError(res, ErrInvalidDBInfo) return } // 注册geoserver中的storage if reqInfo.StorageOrg == common.POSTGRESQL { geoStorage = "postgis_" + reqInfo.IpAddress err := s.GeoServer.AddPgStore(geoserver.GeoWorkSpace, geoStorage, reqInfo.IpAddress, reqInfo.DBPort, pg.DataDBName, reqInfo.DBUser, reqInfo.DBPwd) if err != nil { log.Error("Failed to add pg store:", err.Error()) return } } } case common.NFSType: { if len(reqInfo.FileSys) == 0 || len(reqInfo.MountPath) == 0 { ResWriteError(res, ErrInvalidNFSInfo) return } } case common.DFSType: { } } dev := Device{ Label: reqInfo.Label, StorageType: reqInfo.StorageType, StorageOrg: reqInfo.StorageOrg, DataType: reqInfo.DateType, IpAddress: reqInfo.IpAddress, ServiceName: reqInfo.ServiceName, DBPort: reqInfo.DBPort, DBUser: reqInfo.DBUser, DBPwd: reqInfo.DBPwd, GeoStorage: geoStorage, FileSys: reqInfo.FileSys, MountPath: reqInfo.MountPath, Description: reqInfo.Description, Volume: reqInfo.Volume, } ret, err := s.SysDB.DeviceRegistry(&dev) if err != nil { ResWriteError(res, err) return } ResWriteEntity(res, ret) } // GET http://localhost:8080/devices func (s StorageSvc) deviceList(req *restful.Request, res *restful.Response) { ret, err := s.SysDB.GetDevices(req.QueryParameter("data-type")) if err != nil { ResWriteError(res, err) return } for index, _ := range ret { if err := s.DynamicDB.GetStorage(&ret[index]); err != nil { log.Error(err) } } ResWriteEntity(res, &ret) } // 更新存储设备信息 func (s StorageSvc) updateDeviceInfo(req *restful.Request, res *restful.Response) { devId := req.PathParameter("device-id") var args UpdateDeviceRequest if err := req.ReadEntity(&args); err != nil { ResWriteError(res, err) return } if err := s.SysDB.UpdateDevice(devId, &args); err != nil { ResWriteError(res, err) return } ResWriteEntity(res, nil) } // 移除存储设备 func (s StorageSvc) delDevice(req *restful.Request, res *restful.Response) { devId := req.PathParameter("device-id") if err := s.SysDB.DeleteDevice(devId); err != nil { ResWriteError(res, err) return } // TODO: 移除etcd中设备信息 ResWriteEntity(res, nil) } func (s StorageSvc) deviceLoop() error { devices, err := s.SysDB.GetDevices("") if err != nil { return err } for i, _ := range devices { dev := &devices[i] var info *DeviceInfo var err error if dev.StorageType == common.NFSType { info, err = GetDeviceInfo(dev.MountPath) } else if dev.StorageType == common.DBType && dev.StorageOrg == common.POSTGRESQL { info, err = GetDeviceDBInfo(dev) } else if dev.StorageType == common.DBType && dev.StorageOrg == common.MONGODB { info, err = GetDeviceMongoInfo(dev) } else { dev.Used = "-1" dev.UsedPercent = "0%" } if err != nil { log.Println("Failed to get device space info") //dev.Volume = "-1" //dev.Free = "-1" dev.Used = "-1" dev.UsedPercent = "0%" } else { dev.FileSys = info.FileSystem dev.Volume = info.Volume dev.Used = info.Used dev.UsedPercent = info.UsedPercent } if err := s.DynamicDB.UpdateStorage(dev.Id, dev.Used, dev.UsedPercent); err != nil { return err } } return nil } func (s StorageSvc) DeviceLoop() { t := time.NewTicker(DefaultTimerInterval) for { select { case <-t.C: s.deviceLoop() } } }
package main import ( "context" "fmt" "github.com/scjalliance/drivestream/driveapicollector" "github.com/scjalliance/drivestream/resource" drive "google.golang.org/api/drive/v3" ) func listPermissions(ctx context.Context, s *drive.Service, id string) (perms []resource.Permission, err error) { var token string for { call := s.Permissions.List(id) call.Context(ctx) call.SupportsTeamDrives(true) call.UseDomainAdminAccess(true) call.Fields("nextPageToken", "permissions(id,type,emailAddress,domain,role,displayName,expirationTime,deleted)") if token != "" { call.PageToken(token) } list, err := call.Do() if err != nil { return nil, fmt.Errorf("unable to retrieve teamdrive permissions: %v", err) } for i, perm := range list.Permissions { record, err := driveapicollector.MarshalPermission(perm) if err != nil { return nil, fmt.Errorf("permission list parsing failed: record %d: %v", i, err) } perms = append(perms, record) } if list.NextPageToken == "" { return perms, nil } token = list.NextPageToken } }
/* The Code tab has a code which attempts to add a clone of an array to itself. There is no error message, but the results are not as expected. Can you fix the code? Examples clone([1, 1]) ➞ [1, 1, [1, 1]] clone([1, 2, 3]) ➞ [1, 2, 3, [1, 2, 3]] clone(["x", "y"]) ➞ ["x", "y", ["x", "y"]] */ package main import ( "fmt" "reflect" ) func main() { test([]any{1, 1}, []any{1, 1, []any{1, 1}}) test([]any{1, 2, 3}, []any{1, 2, 3, []any{1, 2, 3}}) test([]any{"x", "y"}, []any{"x", "y", []any{"x", "y"}}) test([]any{"a", "b", "c"}, []any{"a", "b", "c", []any{"a", "b", "c"}}) test([]any{}, []any{[]any{}}) } func assert(x bool) { if !x { panic("assertion failed") } } func test(a, r []any) { p := clone(a) fmt.Println(p) assert(reflect.DeepEqual(p, r)) } func clone(a []any) []any { r := append([]any{}, a...) r = append(r, a) return r }
package file import ( "jmcs/core/library/socket" "jmcs/core/utils/file/transfer" "github.com/gin-gonic/gin/json" "jmcs/core/utils/file" "net" ) type FileTransController struct { socket.SocketController } func (f FileTransController) Receive() { serverTransfer := &transfer.ServerTransfer{} f.ResolveBody(serverTransfer) receivePackage := serverTransfer.ReceiveFile() //接收文件 receiveByte, _ := json.Marshal(receivePackage) f.Responser(receiveByte, "/file/receive", "") //if serverTransfer.Finished { //文件传输完成 todo:发送方断开??? // f.Close() //} } func (f FileTransController) Send() { filePath := "C:/temp/new" rootPath := "C:/temp/root" //conn := f.Conn fileInfos, err := file.GetFileList(filePath, 0) if err != nil { } conn, _ := net.Dial("tcp", "127.0.0.1:8002") head := f.BuildHead("/file/receive", 200) for _, fileInfo := range fileInfos { sp := transfer.SendPackage{} sp.Handle(conn, rootPath, head.Bytes(), fileInfo) } f.Close() //文件传输完成,断开连接 }
package main import "net/http" func f1(w http.ResponseWriter, r *http.Request) { str := "hello dali" w.Write([]byte(str)) } func main() { http.HandleFunc("/hello", f1) http.ListenAndServe("127.0.0.1:9000", nil) }
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package isolated import ( "encoding/json" "strconv" ) // Int is a JSON/Cloud Endpoints friendly int type that will correctly parse // string encoded integers found in JSON encoded data. type Int int func (i *Int) UnmarshalJSON(p []byte) error { val := 0 if err := json.Unmarshal(p, &val); err == nil { *i = Int(val) return err } s := "" if err := json.Unmarshal(p, &s); err != nil { return err } v, err := strconv.Atoi(s) if err != nil { return err } *i = Int(v) return nil } // ServerCapabilities is returned by /server_details. type ServerCapabilities struct { ServerVersion string `json:"server_version"` } // DigestItem is used as input for /preupload via DigestCollection. type DigestItem struct { Digest HexDigest `json:"digest"` IsIsolated bool `json:"is_isolated"` Size int64 `json:"size"` } // DigestCollection is used as input for /preupload. type DigestCollection struct { Items []*DigestItem `json:"items"` Namespace struct { Namespace string `json:"namespace"` } `json:"namespace"` } // PreuploadStatus is returned by /preupload via UrlCollection. type PreuploadStatus struct { GSUploadURL string `json:"gs_upload_url"` UploadTicket string `json:"upload_ticket"` Index Int `json:"index"` } // UrlCollection is returned by /preupload. type UrlCollection struct { Items []PreuploadStatus `json:"items"` } // FinalizeRequest is used as input for /finalize_gs_upload. type FinalizeRequest struct { UploadTicket string `json:"upload_ticket"` } // StorageRequest is used as input for /store_inline. type StorageRequest struct { UploadTicket string `json:"upload_ticket"` Content []byte `json:"content"` }
package hit import ( "math" "github.com/GuillaumeTech/3dgo/internal/geom" ) type Sphere struct { Center geom.Vec3d Radius float64 Mat Material } func (sphere Sphere) hit(ray geom.Ray, tMin float64, tMax float64, record *HitRecord) bool { oc := geom.SubstractTwoVec(ray.Origin, sphere.Center) a := geom.DotProduct(ray.Direction, ray.Direction) halfB := geom.DotProduct(oc, ray.Direction) c := geom.DotProduct(oc, oc) - sphere.Radius*sphere.Radius discriminant := halfB*halfB - a*c if discriminant > 0 { root := math.Sqrt(discriminant) temp := (-halfB - root) / a if temp < tMax && temp > tMin { record.T = temp record.P = ray.At(temp) outwardNormal := geom.DivideVec(geom.SubstractTwoVec(record.P, sphere.Center), sphere.Radius) record.setFaceNormal(ray, outwardNormal) record.Mat = sphere.Mat return true } temp = (-halfB + root) / a if temp < tMax && temp > tMin { record.T = temp record.P = ray.At(temp) outwardNormal := geom.DivideVec(geom.SubstractTwoVec(record.P, sphere.Center), sphere.Radius) record.setFaceNormal(ray, outwardNormal) record.Mat = sphere.Mat return true } } return false }
package main import ( "fmt" . "leetcode" ) func main() { fmt.Println(reverseKGroup(NewListNode(1, 2, 3, 4), 2)) } func reverseKGroup(head *ListNode, k int) *ListNode { var reverse func(head, foot *ListNode) *ListNode reverse = func(head, foot *ListNode) *ListNode { if head == nil || head.Next == nil { return head } newHead := reverse(head.Next, foot) head.Next.Next = head head.Next = nil return newHead } //var reverseRange func(head, foot *ListNode) *ListNode //reverseRange = func(head, foot *ListNode) *ListNode { // //} //count := 0 //for cur, pre := head, new(ListNode); cur != nil; cur = cur.Next { // count++ // // if count //} return reverse(head, head.Next.Next) }
package main import ( "fmt" "google.golang.org/grpc" "grpc_calculator/calculator/calculatorpb" "io" "log" "net" "time" ) //Server with embedded UnimplementedGreetServiceServer type Server struct { calculatorpb.UnimplementedCalculatorServiceServer } func(s *Server) PrimeNumberDecomposition(req *calculatorpb.PrimeNumberRequest, stream calculatorpb.CalculatorService_PrimeNumberDecompositionServer) error{ fmt.Printf("PrimeNumberDecomposition service is invoked\n") prime_number := req.GetNumber() prime_numbers_factors := decomposePrimeNumber(prime_number) for i := 0; i < len(prime_numbers_factors); i++ { res := &calculatorpb.PrimeNumberResponse{Result: prime_numbers_factors[i]} if err := stream.Send(res); err != nil { log.Fatalf("error with responses: %v", err.Error()) } time.Sleep(time.Second) } return nil } // private method for identification of prime number factors func decomposePrimeNumber(num int32) []int32 { res_arr := []int32{} for{ res_arr = append(res_arr, 2) num /= 2 if num% 2 != 0{ break } } var i int32 = 0 for i = 3; i <= num*num; i+=2{ for { res_arr = append(res_arr, 3) num /= i; if num% i != 0 { break } } } if num > 2 { res_arr = append(res_arr, num) } return res_arr } func(s *Server) ComputeAverage(stream calculatorpb.CalculatorService_ComputeAverageServer) error{ fmt.Printf("ComputeAverage service is invoked\n") var sum int32 = 0 var quantity int32 = 0 for { req, err := stream.Recv() if err == io.EOF { var response = &calculatorpb.AvgNumberResponse{Res: float32(sum / quantity)} return stream.SendAndClose(response) } if err != nil { log.Fatalf("Error while reading client stream: %v", err) } sum += req.GetNum() quantity++ } } func main() { l, err := net.Listen("tcp", "0.0.0.0:7777") if err != nil { log.Fatalf("Failed to listen:%v", err) } s := grpc.NewServer() calculatorpb.RegisterCalculatorServiceServer(s, &Server{}) log.Println("Server is running on port:7777") if err := s.Serve(l); err != nil { log.Fatalf("failed to serve:%v", err) } }
package response import ( "github.com/pobo380/network-games/card-game/server/websocket/handler/event_filter" ) type GameEvent struct { Events event_filter.EventWithTypes }
/* # -*- coding: utf-8 -*- # @Author : joker # @Time : 2020-08-14 10:18 # @File : of_Offer_04_二维数组中的查找.go # @Description : 在一个 n * m 的二维数组中,每一行都按照从左到右递增的顺序排序, 每一列都按照从上到下递增的顺序排序。请完成一个函数,输入这样的一个二维数组和一个整数,判断数组中是否含有该整数。 # @Attention : */ package offer import "testing" func Test_findNumberIn2DArray2(t *testing.T) { findNumberIn2DArray2([][]int{{1, 4}, {2, 5}}, 2) } func TestMinStack_GetMin2(t *testing.T) { m:=make(map[string]interface{},0) m["1"]=1 }
package middleware import ( "forum_Anpw/common" "forum_Anpw/model" "forum_Anpw/reps" "github.com/gin-gonic/gin" "strings" ) func AuthMiddleware()gin.HandlerFunc { //获取authorization header return func(c *gin.Context) { tokenString :=c.GetHeader("Authorization") //验证token格式 if tokenString ==""||!strings.HasPrefix(tokenString,"Bearer ") { reps.Unauthorized(c,nil,"权限不足") //中止 c.Abort() return } tokenString=tokenString[7:] token,claims,err:=common.PareseToken(tokenString) //如果token无效 if err!=nil || !token.Valid{ reps.Unauthorized(c,nil,"权限不足") c.Abort() return } userID:=claims.UserID DB:=common.GetDB() var user model.User DB.First(&user,userID) //用户不存在 if user.ID==0 { reps.Unauthorized(c,nil,"权限不足") c.Abort() return } //用户存在 c.Set("user",user) c.Next() } }
package main import "fmt" /* Hi man , this is a cool thing that you know */ //this is a comment func main(){ fmt.Println(fmt.Println("Hello,World")) return }
package hot100 import ( "fmt" "testing" ) func Test_generateTrees(t *testing.T) { fmt.Println(generateTrees(3)) }
package iteration import ( "testing" "lib" "fmt" ) func TestRepeat(t *testing.T) { t.Run("repeat a 5 times", func(t *testing.T) { lib.AssertEqual(t, Repeat("a", 5), "aaaaa") }) t.Run("repeat fit 10 times", func(t *testing.T) { lib.AssertEqual(t, Repeat("fit", 10), "fitfitfitfitfitfitfitfitfitfit") }) } func ExampleRepeat() { fmt.Print(Repeat("1", 5)) // Output: 11111 } func BenchmarkRepeat(b *testing.B) { lib.Bench(b, func() { Repeat("a", 10) }) }
package api import ( "fmt" "log" ) func catch(err error) { if err != nil { log.Panic(err) } } func (i *Image) filename() string { return fmt.Sprintf("%d.%s", i.Index, i.Type.extension()) } func (i *Image) zfilename() string { return fmt.Sprintf("%03d.%s", i.Index, i.Type.extension()) } func (i *Image) generateURL() string { const ImageBase = "https://i.nhentai.net" return fmt.Sprintf("%s/galleries/%s/%s", ImageBase, i.MediaID, i.filename()) } func (it *imageType) extension() (ext string) { switch *it { case jpeg: ext = "jpg" case png: ext = "png" case gif: ext = "gif" } return }
// +build !tempdll // +build !memorydll package cfrida func checkAndReleaseDLL() (bool, string) { return false, "" }
package main import ( "github.com/deepglint/util/image/heatmap" "math/rand" ) func main() { hm := heatmap.NewHeatmap("image.png") points := []heatmap.DataPoint{} for i := 0; i < 100; i++ { points = append(points, heatmap.P(float64(1000*rand.Float64()), float64(1000*rand.Float64()))) } hm.DrawHeatmap(points, 1000, 1000, "test.png", 128, 128) }
package main import ( "time" "github.com/spf13/viper" ) type imageScalerConfig struct { hostname string port int username string password string imageExchange string imageUpdateQueue string imageUpdateRoutingKey string timeout time.Duration minioURL string minioExternalURL string minioAccessKey string minioSecret string minioBucketName string minioSecure bool originalScalingFactor string scalingTarget map[string]scalingTargetConf } type scalingTargetConf struct { Factor string `toml:"factor"` Width int `toml:"width"` } func readConfig() imageScalerConfig { viper.SetConfigFile("config.toml") viper.SetConfigType("toml") viper.AddConfigPath(".") //default values suitable for vanilla rabbitmq docker container viper.SetDefault("rabbitmq.hostname", "localhost") viper.SetDefault("rabbitmq.port", "5672") viper.SetDefault("rabbitmq.username", "guest") viper.SetDefault("rabbitmq.password", "guest") viper.SetDefault("rabbitmq.timeout", "5s") viper.SetDefault("rabbitmq.image-exchange", "user.event") viper.SetDefault("rabbitmq.image-update-queue", "user.image.url.updated.dev") viper.SetDefault("rabbitmq.image-update-routingkey", "user.image.url.updated.#") //default values suitable for min.io docker container viper.SetDefault("minio.url", "localhost:9000") viper.SetDefault("minio.external-url", "https://localhost:9000") viper.SetDefault("minio.accesskey", "admin") viper.SetDefault("minio.secret", "password") viper.SetDefault("minio.bucketname", "testbucket") viper.SetDefault("minio.secure", false) viper.SetDefault("scaling.original.factor", "ORIGINAL") //load config confErr := viper.ReadInConfig() logOnError(confErr, "No configuration file loaded - using defaults") var scalingTargets map[string]scalingTargetConf sub := viper.Sub("scaling.target") sub.Unmarshal(&scalingTargets) return imageScalerConfig{ hostname: viper.GetString("rabbitmq.hostname"), port: viper.GetInt("rabbitmq.port"), username: viper.GetString("rabbitmq.username"), password: viper.GetString("rabbitmq.password"), timeout: viper.GetDuration("rabbitmq.timeout"), imageExchange: viper.GetString("rabbitmq.image-exchange"), imageUpdateQueue: viper.GetString("rabbitmq.image-update-queue"), imageUpdateRoutingKey: viper.GetString("rabbitmq.image-update-routingkey"), minioURL: viper.GetString("minio.url"), minioExternalURL: viper.GetString("minio.external-url"), minioAccessKey: viper.GetString("minio.accesskey"), minioSecret: viper.GetString("minio.secret"), minioBucketName: viper.GetString("minio.bucketname"), minioSecure: viper.GetBool("minio.secure"), originalScalingFactor: viper.GetString("scaling.original.factor"), scalingTarget: scalingTargets, } }
package setr import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document01700102 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:setr.017.001.02 Document"` Message *OrderCancellationStatusReportV02 `xml:"setr.017.001.02"` } func (d *Document01700102) AddMessage() *OrderCancellationStatusReportV02 { d.Message = new(OrderCancellationStatusReportV02) return d.Message } // Scope // The OrderCancellationStatusReport is sent by an executing party, eg, a transfer agent, to the instructing party, eg, an investment manager or its authorised representative. There may be one or more intermediary parties between the executing party and the instructing party. The intermediary party is, for example, an intermediary or a concentrator. // The message gives the status of an order cancellation instruction message that was previously sent by the instructing party. // Usage // The OrderCancellationStatusReport message is used to report the status of an order cancellation instruction message that was previously sent by the instructing party. The message can be used to report one of the following: // - the cancellation is accepted for further processing, or // - the cancellation is rejected, or // - the order has been cancelled. // When the cancellation is rejected, the reason for the rejection must be specified. type OrderCancellationStatusReportV02 struct { // Reference to a linked message that was previously received. RelatedReference []*iso20022.AdditionalReference3 `xml:"RltdRef"` // Reference to a linked message sent in a proprietary way or reference of a system. OtherReference []*iso20022.AdditionalReference3 `xml:"OthrRef"` // Status report details of a bulk or multiple or switch order cancellation instruction that was previously received. CancellationStatusReport *iso20022.OrderStatusAndReason4 `xml:"CxlStsRpt"` } func (o *OrderCancellationStatusReportV02) AddRelatedReference() *iso20022.AdditionalReference3 { newValue := new(iso20022.AdditionalReference3) o.RelatedReference = append(o.RelatedReference, newValue) return newValue } func (o *OrderCancellationStatusReportV02) AddOtherReference() *iso20022.AdditionalReference3 { newValue := new(iso20022.AdditionalReference3) o.OtherReference = append(o.OtherReference, newValue) return newValue } func (o *OrderCancellationStatusReportV02) AddCancellationStatusReport() *iso20022.OrderStatusAndReason4 { o.CancellationStatusReport = new(iso20022.OrderStatusAndReason4) return o.CancellationStatusReport }
package route import ( "github.com/kataras/iris" "github.com/kataras/iris/context" "gocherry-api-gateway/components/common_enum" "gocherry-api-gateway/proxy/filter" ) func RegisterRoutes(app *iris.Application, appConfig *common_enum.Config, servers *filter.ClientMon) { app.Any("/*", func(ctx context.Context) { proxy := new(filter.ProxyController) proxy.DoProxyHandler(ctx, appConfig, servers) }) }
package models import ( "encoding/json" "time" ) type User struct { UUID string `json:"uuid,omitempty"` Email string `json:"email,omitempty"` Password string `json:"password,omitempty"` Name string `json:"name,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` DeletedAt *time.Time `json:"deleted_at,omitempty"` UpdatedAt time.Time `json:"updated_at,omitempty"` } // MarshalJSON is a custom marshaler for the User struct that strips the password before marshaling func (u *User) MarshalJSON() ([]byte, error) { userWithoutPassword := *u userWithoutPassword.Password = "" return json.Marshal(userWithoutPassword) }
package intercom import ( "io/ioutil" "testing" ) func TestJobAPISaveUser(t *testing.T) { http := TestJobHTTPClient{t: t, expectedURI: "/bulk/users", fixtureFilename: "fixtures/job.json"} api := JobAPI{httpClient: &http} user := User{UserID: "1234"} job := JobRequest{Items: []*JobItem{NewUserJobItem(&user, JOB_POST)}, bulkType: "users"} http.f = func(job *JobRequest) { if job.Items[0].DataType != "user" { t.Errorf("job item was of wrong data type, expected %s, was %s", "user", job.Items[0].DataType) } if job.Items[0].Data.(requestUser).UserID != "1234" { t.Errorf("wrong user id sent") } } savedJob, _ := api.save(&job) if savedJob.ID != "job_5ca1ab1eca11ab1e" { t.Errorf("Did not respond with correct job") } } func TestJobAPISaveEvent(t *testing.T) { http := TestJobHTTPClient{t: t, expectedURI: "/bulk/events", fixtureFilename: "fixtures/job.json"} api := JobAPI{httpClient: &http} event := Event{UserID: "1234"} job := JobRequest{Items: []*JobItem{NewEventJobItem(&event)}, bulkType: "events"} http.f = func(job *JobRequest) { if job.Items[0].DataType != "event" { t.Errorf("job item was of wrong data type, expected %s, was %s", "event", job.Items[0].DataType) } if job.Items[0].Data.(*Event).UserID != "1234" { t.Errorf("wrong user id sent") } } savedJob, _ := api.save(&job) if savedJob.ID != "job_5ca1ab1eca11ab1e" { t.Errorf("Did not respond with correct job") } } type TestJobHTTPClient struct { TestHTTPClient t *testing.T f func(job *JobRequest) fixtureFilename string expectedURI string } func (t *TestJobHTTPClient) Post(uri string, body interface{}) ([]byte, error) { if t.expectedURI != uri { t.t.Errorf("Wrong endpoint called") } if t.f != nil { t.f(body.(*JobRequest)) } return ioutil.ReadFile(t.fixtureFilename) }
package repowatch import ( "time" "github.com/Cloud-Foundations/Dominator/lib/log" ) type Config struct { AwsSecretId string `yaml:"aws_secret_id"` Branch string `yaml:"branch"` CheckInterval time.Duration `yaml:"check_interval"` LocalRepositoryDirectory string `yaml:"local_repository_directory"` RepositoryURL string `yaml:"repository_url"` } func Watch(remoteURL, localDirectory string, checkInterval time.Duration, metricDirectory string, logger log.DebugLogger) (<-chan string, error) { return watch(Config{ CheckInterval: checkInterval, LocalRepositoryDirectory: localDirectory, RepositoryURL: remoteURL, }, metricDirectory, logger) } func WatchWithConfig(config Config, metricDirectory string, logger log.DebugLogger) (<-chan string, error) { return watch(config, metricDirectory, logger) }
package sol // heap approach func left(i int) int { return i << 1 } func right(i int) int { return i<<1 | 0x1 } func maxHeapify(nums []int, i int) { l := left(i) r := right(i) largest := i if l < len(nums) && nums[l] > nums[i] { largest = l } if r < len(nums) && nums[r] > nums[largest] { largest = r } if largest != i { nums[i], nums[largest] = nums[largest], nums[i] maxHeapify(nums, largest) } } func findKthLargest(nums []int, k int) int { if len(nums) == 1 { return nums[0] } for i := len(nums) >> 1; i >= 0; i-- { maxHeapify(nums, i) } if k == 1 { return nums[0] } for i := len(nums) - 1; i > len(nums)-k; i-- { nums[0], nums[i] = nums[i], nums[0] maxHeapify(nums[:i], 0) } return nums[0] }
package goose import "github.com/evelritual/goose/audio" // NewAudioPlayer initiliazes a new audio player for use in allocating sounds // and dealing with playback. func NewAudioPlayer() (audio.Player, error) { return activeDriver.NewAudioPlayer() }
/* Copyright 2018 Intel Corporation. SPDX-License-Identifier: Apache-2.0 */ package testlog import ( "os" "os/exec" "regexp" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/intel/oim/pkg/log" ) func TestGlobal(t *testing.T) { old := log.L() restore := SetGlobal(t) defer restore() assert.IsType(t, New(t), log.L()) restore() assert.Exactly(t, old, log.L()) } func TestOutput1(t *testing.T) { defer SetGlobal(t)() log.L().Info("TestOutput1") } func TestOutput2(t *testing.T) { defer SetGlobal(t)() log.L().Info("TestOutput2") if os.Getenv("TEST_OUTPUT2_FAIL") != "" { t.Fatal("was asked to fail") } } // When adding or removing lines above, update the line numbers in // the expected output below. func stripTimes(out []byte) string { s := string(out) parts := regexp.MustCompile(`\d+\.\d+s`).Split(s, -1) return strings.Join(parts, "") } var testOutputLogMsg = regexp.MustCompile(`\n\s+([^\n]*\.go:\d+:)`) func normalizeGoTestOutput(output string) string { result := output // Indention of test output changed from using tabs to using spaces. // Replace with a single tab. Example: // testlog_test.go:40: INFO TestOutput2 result = testOutputLogMsg.ReplaceAllString(result, `\n $1`) return result } func TestOutputSilent(t *testing.T) { cmd := exec.Command( "go", "test", "-run", "Output[12]", "github.com/intel/oim/pkg/log/testlog", ) cmd.Env = append(os.Environ(), "TEST_OUTPUT2_FAIL=1", "GOCACHE=off") out, err := cmd.CombinedOutput() assert.Error(t, err) assert.Equal(t, normalizeGoTestOutput(`--- FAIL: TestOutput2 () testlog_test.go:40: INFO TestOutput2 testlog_test.go:42: was asked to fail FAIL FAIL github.com/intel/oim/pkg/log/testlog `), normalizeGoTestOutput(stripTimes(out))) } func TestOutputVerbose(t *testing.T) { cmd := exec.Command( "go", "test", "-v", "-run", "Output[12]", "github.com/intel/oim/pkg/log/testlog", ) cmd.Env = append(os.Environ(), "TEST_OUTPUT2_FAIL=1", "GOCACHE=off") out, err := cmd.CombinedOutput() assert.Error(t, err) assert.Equal(t, normalizeGoTestOutput(`=== RUN TestOutput1 --- PASS: TestOutput1 () testlog_test.go:34: INFO TestOutput1 === RUN TestOutput2 --- FAIL: TestOutput2 () testlog_test.go:40: INFO TestOutput2 testlog_test.go:42: was asked to fail FAIL FAIL github.com/intel/oim/pkg/log/testlog `), normalizeGoTestOutput(stripTimes(out))) }
package secrets import ( "errors" "github.com/10gen/realm-cli/internal/cli" "github.com/10gen/realm-cli/internal/cli/user" "github.com/10gen/realm-cli/internal/cloud/realm" "github.com/10gen/realm-cli/internal/terminal" "github.com/AlecAivazis/survey/v2" ) type deleteInputs struct { cli.ProjectInputs secrets []string } func (i *deleteInputs) Resolve(profile *user.Profile, ui terminal.UI) error { if err := i.ProjectInputs.Resolve(ui, profile.WorkingDirectory, false); err != nil { return err } return nil } // If there are inputs then use , then func (i *deleteInputs) resolveSecrets(ui terminal.UI, appSecrets []realm.Secret) ([]realm.Secret, error) { if len(appSecrets) == 0 { return nil, nil } if len(i.secrets) > 0 { secretsByID := make(map[string]realm.Secret, len(appSecrets)) secretsByName := make(map[string]realm.Secret, len(appSecrets)) for _, secret := range appSecrets { secretsByID[secret.ID] = secret secretsByName[secret.Name] = secret } secrets := make([]realm.Secret, 0, len(i.secrets)) for _, identifier := range i.secrets { if secret, ok := secretsByName[identifier]; ok { secrets = append(secrets, secret) } else if secret, ok := secretsByID[identifier]; ok { secrets = append(secrets, secret) } } if len(secrets) == 0 { return nil, errors.New("unable to find secrets") } return secrets, nil } options := make([]string, 0, len(appSecrets)) secretsByOption := map[string]realm.Secret{} for _, secret := range appSecrets { option := displaySecretOption(secret) options = append(options, option) secretsByOption[option] = secret } var selections []string if err := ui.AskOne( &selections, &survey.MultiSelect{ Message: "Which secret(s) would you like to delete?", Options: options, }, ); err != nil { return nil, err } secrets := make([]realm.Secret, 0, len(selections)) for _, selection := range selections { secrets = append(secrets, secretsByOption[selection]) } return secrets, nil }
// Package storage - служба хранения данных package storage import ( "go.core/lesson6/pkg/crawler" ) type Storager interface { Create(docs []crawler.Document) Document(id int) (crawler.Document, bool) Add(d crawler.Document) } // New - конструктор службы хранения данных func New(s Storager) Storager { return s }
package oidc_test import ( "context" "testing" "github.com/golang-jwt/jwt/v5" "github.com/ory/fosite" fjwt "github.com/ory/fosite/token/jwt" "github.com/stretchr/testify/assert" "github.com/authelia/authelia/v4/internal/oidc" ) func TestStatelessJWTValidator_IntrospectToken(t *testing.T) { signer := &fjwt.DefaultSigner{ GetPrivateKey: func(ctx context.Context) (any, error) { return keyRSA2048, nil }, } maketoken := func(method jwt.SigningMethod, claims jwt.MapClaims, header map[string]any) string { j := &jwt.Token{ Header: header, Claims: claims, Method: method, } if _, ok := j.Header[oidc.JWTHeaderKeyAlgorithm]; !ok { j.Header[oidc.JWTHeaderKeyAlgorithm] = method.Alg() } token, err := j.SignedString(keyRSA2048) if err != nil { panic(err) } return token } handler := oidc.StatelessJWTValidator{ Signer: signer, Config: &ScopeStrategyProvider{ value: fosite.ExactScopeStrategy, }, } testCases := []struct { name string have string scopes []string expected fosite.TokenUse err string }{ { "ShouldHandleAccessTokenJWT", maketoken(jwt.SigningMethodRS256, jwt.MapClaims{}, map[string]any{oidc.JWTHeaderKeyType: oidc.JWTHeaderTypeValueAccessTokenJWT}), nil, fosite.AccessToken, "", }, { "ShouldHandleAccessTokenJWTWithScopes", maketoken(jwt.SigningMethodRS256, jwt.MapClaims{oidc.ClaimScope: "example"}, map[string]any{oidc.JWTHeaderKeyType: oidc.JWTHeaderTypeValueAccessTokenJWT}), []string{"example"}, fosite.AccessToken, "", }, { "ShouldHandleAccessTokenJWTWithScopes", maketoken(jwt.SigningMethodRS256, jwt.MapClaims{oidc.ClaimScope: "example2"}, map[string]any{oidc.JWTHeaderKeyType: oidc.JWTHeaderTypeValueAccessTokenJWT}), []string{"example"}, fosite.AccessToken, "The requested scope is invalid, unknown, or malformed. The request scope 'example' has not been granted or is not allowed to be requested.", }, { "ShouldRejectStandardJWT", maketoken(jwt.SigningMethodRS256, jwt.MapClaims{}, map[string]any{oidc.JWTHeaderKeyType: "JWT"}), nil, fosite.TokenUse(""), "The request could not be authorized. Check that you provided valid credentials in the right format. The provided token is not a valid RFC9068 JWT Profile Access Token as it is missing the header 'typ' value of 'at+jwt'.", }, { "ShouldRejectNonJWT", "authelia_at_example", nil, fosite.TokenUse(""), "The handler is not responsible for this request. The provided token appears to be an opaque token not a JWT.", }, { "ShouldRejectTokenWithOpaquePrefix", "authelia_at_example.another.example", nil, fosite.TokenUse(""), "The handler is not responsible for this request. The provided token appears to be an opaque token not a JWT.", }, { "ShouldRecjectInvalidToken", "example.another.example", nil, fosite.TokenUse(""), "Invalid token format. Check that you provided a valid token in the right format. invalid character '\\x16' looking for beginning of object key string", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ar := fosite.NewAccessRequest(oidc.NewSession()) actual, err := handler.IntrospectToken(context.TODO(), tc.have, fosite.AccessToken, ar, tc.scopes) assert.Equal(t, tc.expected, actual) if len(tc.err) == 0 { assert.NoError(t, oidc.ErrorToDebugRFC6749Error(err)) } else { assert.EqualError(t, oidc.ErrorToDebugRFC6749Error(err), tc.err) } }) } } type ScopeStrategyProvider struct { value fosite.ScopeStrategy } func (p *ScopeStrategyProvider) GetScopeStrategy(ctx context.Context) fosite.ScopeStrategy { return p.value }
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package core import ( "cmp" "context" "fmt" "math" "runtime" "slices" "strconv" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/diagnosticspb" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/lock" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/planner/util/debugtrace" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" utilhint "github.com/pingcap/tidb/util/hint" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/set" "github.com/pingcap/tidb/util/tracing" "github.com/pingcap/tipb/go-tipb" "go.uber.org/atomic" "go.uber.org/zap" ) // OptimizeAstNode optimizes the query to a physical plan directly. var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error) // AllowCartesianProduct means whether tidb allows cartesian join without equal conditions. var AllowCartesianProduct = atomic.NewBool(true) // IsReadOnly check whether the ast.Node is a read only statement. var IsReadOnly func(node ast.Node, vars *variable.SessionVars) bool const ( flagGcSubstitute uint64 = 1 << iota flagPrunColumns flagStabilizeResults flagBuildKeyInfo flagDecorrelate flagSemiJoinRewrite flagEliminateAgg flagSkewDistinctAgg flagEliminateProjection flagMaxMinEliminate flagPredicatePushDown flagEliminateOuterJoin flagPartitionProcessor flagCollectPredicateColumnsPoint flagPushDownAgg flagDeriveTopNFromWindow flagPredicateSimplification flagPushDownTopN flagSyncWaitStatsLoadPoint flagJoinReOrder flagPrunColumnsAgain flagPushDownSequence flagResolveExpand ) var optRuleList = []logicalOptRule{ &gcSubstituter{}, &columnPruner{}, &resultReorder{}, &buildKeySolver{}, &decorrelateSolver{}, &semiJoinRewriter{}, &aggregationEliminator{}, &skewDistinctAggRewriter{}, &projectionEliminator{}, &maxMinEliminator{}, &ppdSolver{}, &outerJoinEliminator{}, &partitionProcessor{}, &collectPredicateColumnsPoint{}, &aggregationPushDownSolver{}, &deriveTopNFromWindow{}, &predicateSimplification{}, &pushDownTopNOptimizer{}, &syncWaitStatsLoadPoint{}, &joinReOrderSolver{}, &columnPruner{}, // column pruning again at last, note it will mess up the results of buildKeySolver &pushDownSequenceSolver{}, &resolveExpand{}, } type logicalOptimizeOp struct { // tracer is goring to track optimize steps during rule optimizing tracer *tracing.LogicalOptimizeTracer } func defaultLogicalOptimizeOption() *logicalOptimizeOp { return &logicalOptimizeOp{} } func (op *logicalOptimizeOp) withEnableOptimizeTracer(tracer *tracing.LogicalOptimizeTracer) *logicalOptimizeOp { op.tracer = tracer return op } func (op *logicalOptimizeOp) appendBeforeRuleOptimize(index int, name string, before LogicalPlan) { if op == nil || op.tracer == nil { return } op.tracer.AppendRuleTracerBeforeRuleOptimize(index, name, before.BuildPlanTrace()) } func (op *logicalOptimizeOp) appendStepToCurrent(id int, tp string, reason, action func() string) { if op == nil || op.tracer == nil { return } op.tracer.AppendRuleTracerStepToCurrent(id, tp, reason(), action()) } func (op *logicalOptimizeOp) recordFinalLogicalPlan(final LogicalPlan) { if op == nil || op.tracer == nil { return } op.tracer.RecordFinalLogicalPlan(final.BuildPlanTrace()) } // logicalOptRule means a logical optimizing rule, which contains decorrelate, ppd, column pruning, etc. type logicalOptRule interface { optimize(context.Context, LogicalPlan, *logicalOptimizeOp) (LogicalPlan, error) name() string } // BuildLogicalPlanForTest builds a logical plan for testing purpose from ast.Node. func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node ast.Node, infoSchema infoschema.InfoSchema) (Plan, types.NameSlice, error) { sctx.GetSessionVars().PlanID.Store(0) sctx.GetSessionVars().PlanColumnID.Store(0) builder, _ := NewPlanBuilder().Init(sctx, infoSchema, &utilhint.BlockHintProcessor{}) p, err := builder.Build(ctx, node) if err != nil { return nil, nil, err } return p, p.OutputNames(), err } // CheckPrivilege checks the privilege for a user. func CheckPrivilege(activeRoles []*auth.RoleIdentity, pm privilege.Manager, vs []visitInfo) error { for _, v := range vs { if v.privilege == mysql.ExtendedPriv { if !pm.RequestDynamicVerification(activeRoles, v.dynamicPriv, v.dynamicWithGrant) { if v.err == nil { return ErrPrivilegeCheckFail.GenWithStackByArgs(v.dynamicPriv) } return v.err } } else if !pm.RequestVerification(activeRoles, v.db, v.table, v.column, v.privilege) { if v.err == nil { return ErrPrivilegeCheckFail.GenWithStackByArgs(v.privilege.String()) } return v.err } } return nil } // VisitInfo4PrivCheck generates privilege check infos because privilege check of local temporary tables is different // with normal tables. `CREATE` statement needs `CREATE TEMPORARY TABLE` privilege from the database, and subsequent // statements do not need any privileges. func VisitInfo4PrivCheck(is infoschema.InfoSchema, node ast.Node, vs []visitInfo) (privVisitInfo []visitInfo) { if node == nil { return vs } switch stmt := node.(type) { case *ast.CreateTableStmt: privVisitInfo = make([]visitInfo, 0, len(vs)) for _, v := range vs { if v.privilege == mysql.CreatePriv { if stmt.TemporaryKeyword == ast.TemporaryLocal { // `CREATE TEMPORARY TABLE` privilege is required from the database, not the table. newVisitInfo := v newVisitInfo.privilege = mysql.CreateTMPTablePriv newVisitInfo.table = "" privVisitInfo = append(privVisitInfo, newVisitInfo) } else { // If both the normal table and temporary table already exist, we need to check the privilege. privVisitInfo = append(privVisitInfo, v) } } else { // `CREATE TABLE LIKE tmp` or `CREATE TABLE FROM SELECT tmp` in the future. if needCheckTmpTablePriv(is, v) { privVisitInfo = append(privVisitInfo, v) } } } case *ast.DropTableStmt: // Dropping a local temporary table doesn't need any privileges. if stmt.IsView { privVisitInfo = vs } else { privVisitInfo = make([]visitInfo, 0, len(vs)) if stmt.TemporaryKeyword != ast.TemporaryLocal { for _, v := range vs { if needCheckTmpTablePriv(is, v) { privVisitInfo = append(privVisitInfo, v) } } } } case *ast.GrantStmt, *ast.DropSequenceStmt, *ast.DropPlacementPolicyStmt: // Some statements ignore local temporary tables, so they should check the privileges on normal tables. privVisitInfo = vs default: privVisitInfo = make([]visitInfo, 0, len(vs)) for _, v := range vs { if needCheckTmpTablePriv(is, v) { privVisitInfo = append(privVisitInfo, v) } } } return } func needCheckTmpTablePriv(is infoschema.InfoSchema, v visitInfo) bool { if v.db != "" && v.table != "" { // Other statements on local temporary tables except `CREATE` do not check any privileges. tb, err := is.TableByName(model.NewCIStr(v.db), model.NewCIStr(v.table)) // If the table doesn't exist, we do not report errors to avoid leaking the existence of the table. if err == nil && tb.Meta().TempTableType == model.TempTableLocal { return false } } return true } // CheckTableLock checks the table lock. func CheckTableLock(ctx sessionctx.Context, is infoschema.InfoSchema, vs []visitInfo) error { if !config.TableLockEnabled() { return nil } checker := lock.NewChecker(ctx, is) for i := range vs { err := checker.CheckTableLock(vs[i].db, vs[i].table, vs[i].privilege, vs[i].alterWritable) // if table with lock-write table dropped, we can access other table, such as `rename` operation if err == lock.ErrLockedTableDropped { break } if err != nil { return err } } return nil } func checkStableResultMode(sctx sessionctx.Context) bool { s := sctx.GetSessionVars() st := s.StmtCtx return s.EnableStableResultMode && (!st.InInsertStmt && !st.InUpdateStmt && !st.InDeleteStmt && !st.InLoadDataStmt) } // DoOptimizeAndLogicAsRet optimizes a logical plan to a physical plan and return the optimized logical plan. func DoOptimizeAndLogicAsRet(ctx context.Context, sctx sessionctx.Context, flag uint64, logic LogicalPlan) (LogicalPlan, PhysicalPlan, float64, error) { sessVars := sctx.GetSessionVars() // if there is something after flagPrunColumns, do flagPrunColumnsAgain if flag&flagPrunColumns > 0 && flag-flagPrunColumns > flagPrunColumns { flag |= flagPrunColumnsAgain } if checkStableResultMode(logic.SCtx()) { flag |= flagStabilizeResults } if logic.SCtx().GetSessionVars().StmtCtx.StraightJoinOrder { // When we use the straight Join Order hint, we should disable the join reorder optimization. flag &= ^flagJoinReOrder } flag |= flagCollectPredicateColumnsPoint flag |= flagSyncWaitStatsLoadPoint logic, err := logicalOptimize(ctx, flag, logic) if err != nil { return nil, nil, 0, err } if !AllowCartesianProduct.Load() && existsCartesianProduct(logic) { return nil, nil, 0, errors.Trace(ErrCartesianProductUnsupported) } planCounter := PlanCounterTp(sessVars.StmtCtx.StmtHints.ForceNthPlan) if planCounter == 0 { planCounter = -1 } physical, cost, err := physicalOptimize(logic, &planCounter) if err != nil { return nil, nil, 0, err } finalPlan, err := postOptimize(ctx, sctx, physical) if err != nil { return nil, nil, 0, err } if sessVars.StmtCtx.EnableOptimizerCETrace { refineCETrace(sctx) } if sessVars.StmtCtx.EnableOptimizeTrace { sessVars.StmtCtx.OptimizeTracer.RecordFinalPlan(finalPlan.BuildPlanTrace()) } return logic, finalPlan, cost, nil } // DoOptimize optimizes a logical plan to a physical plan. func DoOptimize(ctx context.Context, sctx sessionctx.Context, flag uint64, logic LogicalPlan) (PhysicalPlan, float64, error) { sessVars := sctx.GetSessionVars() if sessVars.StmtCtx.EnableOptimizerDebugTrace { debugtrace.EnterContextCommon(sctx) defer debugtrace.LeaveContextCommon(sctx) } _, finalPlan, cost, err := DoOptimizeAndLogicAsRet(ctx, sctx, flag, logic) return finalPlan, cost, err } // refineCETrace will adjust the content of CETrace. // Currently, it will (1) deduplicate trace records, (2) sort the trace records (to make it easier in the tests) and (3) fill in the table name. func refineCETrace(sctx sessionctx.Context) { stmtCtx := sctx.GetSessionVars().StmtCtx stmtCtx.OptimizerCETrace = tracing.DedupCETrace(stmtCtx.OptimizerCETrace) slices.SortFunc(stmtCtx.OptimizerCETrace, func(i, j *tracing.CETraceRecord) int { if i == nil && j != nil { return -1 } if i == nil || j == nil { return 1 } if c := cmp.Compare(i.TableID, j.TableID); c != 0 { return c } if c := cmp.Compare(i.Type, j.Type); c != 0 { return c } if c := cmp.Compare(i.Expr, j.Expr); c != 0 { return c } return cmp.Compare(i.RowCount, j.RowCount) }) traceRecords := stmtCtx.OptimizerCETrace is := sctx.GetDomainInfoSchema().(infoschema.InfoSchema) for _, rec := range traceRecords { tbl, ok := is.TableByID(rec.TableID) if ok { rec.TableName = tbl.Meta().Name.O continue } tbl, _, _ = is.FindTableByPartitionID(rec.TableID) if tbl != nil { rec.TableName = tbl.Meta().Name.O continue } logutil.BgLogger().Warn("Failed to find table in infoschema", zap.String("category", "OptimizerTrace"), zap.Int64("table id", rec.TableID)) } } // mergeContinuousSelections merge continuous selections which may occur after changing plans. func mergeContinuousSelections(p PhysicalPlan) { if sel, ok := p.(*PhysicalSelection); ok { for { childSel := sel.children[0] tmp, ok := childSel.(*PhysicalSelection) if !ok { break } sel.Conditions = append(sel.Conditions, tmp.Conditions...) sel.SetChild(0, tmp.children[0]) } } for _, child := range p.Children() { mergeContinuousSelections(child) } // merge continuous selections in a coprocessor task of tiflash tableReader, isTableReader := p.(*PhysicalTableReader) if isTableReader && tableReader.StoreType == kv.TiFlash { mergeContinuousSelections(tableReader.tablePlan) tableReader.TablePlans = flattenPushDownPlan(tableReader.tablePlan) } } func postOptimize(ctx context.Context, sctx sessionctx.Context, plan PhysicalPlan) (PhysicalPlan, error) { // some cases from update optimize will require avoiding projection elimination. // see comments ahead of call of DoOptimize in function of buildUpdate(). err := prunePhysicalColumns(sctx, plan) if err != nil { return nil, err } plan = eliminatePhysicalProjection(plan) plan = InjectExtraProjection(plan) mergeContinuousSelections(plan) plan = eliminateUnionScanAndLock(sctx, plan) plan = enableParallelApply(sctx, plan) handleFineGrainedShuffle(ctx, sctx, plan) propagateProbeParents(plan, nil) countStarRewrite(plan) disableReuseChunkIfNeeded(sctx, plan) tryEnableLateMaterialization(sctx, plan) generateRuntimeFilter(sctx, plan) return plan, nil } func generateRuntimeFilter(sctx sessionctx.Context, plan PhysicalPlan) { if !sctx.GetSessionVars().IsRuntimeFilterEnabled() || sctx.GetSessionVars().InRestrictedSQL { return } logutil.BgLogger().Debug("Start runtime filter generator") rfGenerator := &RuntimeFilterGenerator{ rfIDGenerator: &util.IDGenerator{}, columnUniqueIDToRF: map[int64][]*RuntimeFilter{}, parentPhysicalPlan: plan, } startRFGenerator := time.Now() rfGenerator.GenerateRuntimeFilter(plan) logutil.BgLogger().Debug("Finish runtime filter generator", zap.Duration("Cost", time.Since(startRFGenerator))) } // prunePhysicalColumns currently only work for MPP(HashJoin<-Exchange). // Here add projection instead of pruning columns directly for safety considerations. // And projection is cheap here for it saves the network cost and work in memory. func prunePhysicalColumns(sctx sessionctx.Context, plan PhysicalPlan) error { if tableReader, ok := plan.(*PhysicalTableReader); ok { if _, isExchangeSender := tableReader.tablePlan.(*PhysicalExchangeSender); isExchangeSender { err := prunePhysicalColumnsInternal(sctx, tableReader.tablePlan) if err != nil { return err } } } else { for _, child := range plan.Children() { return prunePhysicalColumns(sctx, child) } } return nil } func (p *PhysicalHashJoin) extractUsedCols(parentUsedCols []*expression.Column) (leftCols []*expression.Column, rightCols []*expression.Column) { for _, eqCond := range p.EqualConditions { parentUsedCols = append(parentUsedCols, expression.ExtractColumns(eqCond)...) } for _, neCond := range p.NAEqualConditions { parentUsedCols = append(parentUsedCols, expression.ExtractColumns(neCond)...) } for _, leftCond := range p.LeftConditions { parentUsedCols = append(parentUsedCols, expression.ExtractColumns(leftCond)...) } for _, rightCond := range p.RightConditions { parentUsedCols = append(parentUsedCols, expression.ExtractColumns(rightCond)...) } for _, otherCond := range p.OtherConditions { parentUsedCols = append(parentUsedCols, expression.ExtractColumns(otherCond)...) } lChild := p.children[0] rChild := p.children[1] for _, col := range parentUsedCols { if lChild.Schema().Contains(col) { leftCols = append(leftCols, col) } else if rChild.Schema().Contains(col) { rightCols = append(rightCols, col) } } return leftCols, rightCols } func prunePhysicalColumnForHashJoinChild(sctx sessionctx.Context, hashJoin *PhysicalHashJoin, joinUsedCols []*expression.Column, sender *PhysicalExchangeSender) error { var err error joinUsed := expression.GetUsedList(joinUsedCols, sender.Schema()) hashCols := make([]*expression.Column, len(sender.HashCols)) for i, mppCol := range sender.HashCols { hashCols[i] = mppCol.Col } hashUsed := expression.GetUsedList(hashCols, sender.Schema()) needPrune := false usedExprs := make([]expression.Expression, len(sender.Schema().Columns)) prunedSchema := sender.Schema().Clone() for i := len(joinUsed) - 1; i >= 0; i-- { usedExprs[i] = sender.Schema().Columns[i] if !joinUsed[i] && !hashUsed[i] { needPrune = true usedExprs = append(usedExprs[:i], usedExprs[i+1:]...) prunedSchema.Columns = append(prunedSchema.Columns[:i], prunedSchema.Columns[i+1:]...) } } if needPrune && len(sender.children) > 0 { ch := sender.children[0] proj := PhysicalProjection{ Exprs: usedExprs, }.Init(sctx, ch.StatsInfo(), ch.SelectBlockOffset()) proj.SetSchema(prunedSchema) proj.SetChildren(ch) sender.children[0] = proj // Resolve Indices from bottom to up err = proj.ResolveIndicesItself() if err != nil { return err } err = sender.ResolveIndicesItself() if err != nil { return err } err = hashJoin.ResolveIndicesItself() if err != nil { return err } } return err } func prunePhysicalColumnsInternal(sctx sessionctx.Context, plan PhysicalPlan) error { var err error switch x := plan.(type) { case *PhysicalHashJoin: schemaColumns := x.Schema().Clone().Columns leftCols, rightCols := x.extractUsedCols(schemaColumns) matchPattern := false for i := 0; i <= 1; i++ { // Pattern: HashJoin <- ExchangeReceiver <- ExchangeSender matchPattern = false var exchangeSender *PhysicalExchangeSender if receiver, ok := x.children[i].(*PhysicalExchangeReceiver); ok { exchangeSender, matchPattern = receiver.children[0].(*PhysicalExchangeSender) } if matchPattern { if i == 0 { err = prunePhysicalColumnForHashJoinChild(sctx, x, leftCols, exchangeSender) } else { err = prunePhysicalColumnForHashJoinChild(sctx, x, rightCols, exchangeSender) } if err != nil { return nil } } /// recursively travel the physical plan err = prunePhysicalColumnsInternal(sctx, x.children[i]) if err != nil { return nil } } default: for _, child := range x.Children() { err = prunePhysicalColumnsInternal(sctx, child) if err != nil { return err } } } return nil } // tryEnableLateMaterialization tries to push down some filter conditions to the table scan operator // @brief: push down some filter conditions to the table scan operator // @param: sctx: session context // @param: plan: the physical plan to be pruned // @note: this optimization is only applied when the TiFlash is used. // @note: the following conditions should be satisfied: // - Only the filter conditions with high selectivity should be pushed down. // - The filter conditions which contain heavy cost functions should not be pushed down. // - Filter conditions that apply to the same column are either pushed down or not pushed down at all. func tryEnableLateMaterialization(sctx sessionctx.Context, plan PhysicalPlan) { // check if EnableLateMaterialization is set if sctx.GetSessionVars().EnableLateMaterialization && !sctx.GetSessionVars().TiFlashFastScan { predicatePushDownToTableScan(sctx, plan) } if sctx.GetSessionVars().EnableLateMaterialization && sctx.GetSessionVars().TiFlashFastScan { sc := sctx.GetSessionVars().StmtCtx sc.AppendWarning(errors.New("FastScan is not compatible with late materialization, late materialization is disabled")) } } /* * The countStarRewriter is used to rewrite count(*) -> count(not null column) **Only for TiFlash** Attention: Since count(*) is directly translated into count(1) during grammar parsing, the rewritten pattern actually matches count(constant) Pattern: PhysicalAggregation: count(constant) | TableFullScan: TiFlash Optimize: Table <k1 bool not null, k2 int null, k3 bigint not null> Query: select count(*) from table ColumnPruningRule: datasource pick row_id countStarRewrite: datasource pick k1 instead of row_id rewrite count(*) -> count(k1) Rewritten Query: select count(k1) from table */ func countStarRewrite(plan PhysicalPlan) { countStarRewriteInternal(plan) if tableReader, ok := plan.(*PhysicalTableReader); ok { countStarRewrite(tableReader.tablePlan) } else { for _, child := range plan.Children() { countStarRewrite(child) } } } func countStarRewriteInternal(plan PhysicalPlan) { // match pattern any agg(count(constant)) -> tablefullscan(tiflash) var physicalAgg *basePhysicalAgg switch x := plan.(type) { case *PhysicalHashAgg: physicalAgg = x.getPointer() case *PhysicalStreamAgg: physicalAgg = x.getPointer() default: return } if len(physicalAgg.GroupByItems) > 0 || len(physicalAgg.children) != 1 { return } for _, aggFunc := range physicalAgg.AggFuncs { if aggFunc.Name != "count" || len(aggFunc.Args) != 1 || aggFunc.HasDistinct { return } if _, ok := aggFunc.Args[0].(*expression.Constant); !ok { return } } physicalTableScan, ok := physicalAgg.Children()[0].(*PhysicalTableScan) if !ok || !physicalTableScan.isFullScan() || physicalTableScan.StoreType != kv.TiFlash || len(physicalTableScan.schema.Columns) != 1 { return } // rewrite datasource and agg args rewriteTableScanAndAggArgs(physicalTableScan, physicalAgg.AggFuncs) } // rewriteTableScanAndAggArgs Pick the narrowest and not null column from table // If there is no not null column in Data Source, the row_id or pk column will be retained func rewriteTableScanAndAggArgs(physicalTableScan *PhysicalTableScan, aggFuncs []*aggregation.AggFuncDesc) { var resultColumnInfo *model.ColumnInfo var resultColumn *expression.Column resultColumnInfo = physicalTableScan.Columns[0] resultColumn = physicalTableScan.schema.Columns[0] // prefer not null column from table for _, columnInfo := range physicalTableScan.Table.Columns { if columnInfo.FieldType.IsVarLengthType() { continue } if mysql.HasNotNullFlag(columnInfo.GetFlag()) { if columnInfo.GetFlen() < resultColumnInfo.GetFlen() { resultColumnInfo = columnInfo resultColumn = &expression.Column{ UniqueID: physicalTableScan.SCtx().GetSessionVars().AllocPlanColumnID(), ID: resultColumnInfo.ID, RetType: resultColumnInfo.FieldType.Clone(), OrigName: fmt.Sprintf("%s.%s.%s", physicalTableScan.DBName.L, physicalTableScan.Table.Name.L, resultColumnInfo.Name), } } } } // table scan (row_id) -> (not null column) physicalTableScan.Columns[0] = resultColumnInfo physicalTableScan.schema.Columns[0] = resultColumn // agg arg count(1) -> count(not null column) arg := resultColumn.Clone() for _, aggFunc := range aggFuncs { constExpr, ok := aggFunc.Args[0].(*expression.Constant) if !ok { return } // count(null) shouldn't be rewritten if constExpr.Value.IsNull() { continue } aggFunc.Args[0] = arg } } // Only for MPP(Window<-[Sort]<-ExchangeReceiver<-ExchangeSender). // TiFlashFineGrainedShuffleStreamCount: // < 0: fine grained shuffle is disabled. // > 0: use TiFlashFineGrainedShuffleStreamCount as stream count. // == 0: use TiFlashMaxThreads as stream count when it's greater than 0. Otherwise set status as uninitialized. func handleFineGrainedShuffle(ctx context.Context, sctx sessionctx.Context, plan PhysicalPlan) { streamCount := sctx.GetSessionVars().TiFlashFineGrainedShuffleStreamCount if streamCount < 0 { return } if streamCount == 0 { if sctx.GetSessionVars().TiFlashMaxThreads > 0 { streamCount = sctx.GetSessionVars().TiFlashMaxThreads } } // use two separate cluster info to avoid grpc calls cost tiflashServerCountInfo := tiflashClusterInfo{unInitialized, 0} streamCountInfo := tiflashClusterInfo{unInitialized, 0} if streamCount != 0 { streamCountInfo.itemStatus = initialized streamCountInfo.itemValue = uint64(streamCount) } setupFineGrainedShuffle(ctx, sctx, &streamCountInfo, &tiflashServerCountInfo, plan) } func setupFineGrainedShuffle(ctx context.Context, sctx sessionctx.Context, streamCountInfo *tiflashClusterInfo, tiflashServerCountInfo *tiflashClusterInfo, plan PhysicalPlan) { if tableReader, ok := plan.(*PhysicalTableReader); ok { if _, isExchangeSender := tableReader.tablePlan.(*PhysicalExchangeSender); isExchangeSender { helper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: make([]*basePhysicalPlan, 1)} setupFineGrainedShuffleInternal(ctx, sctx, tableReader.tablePlan, &helper, streamCountInfo, tiflashServerCountInfo) } } else { for _, child := range plan.Children() { setupFineGrainedShuffle(ctx, sctx, streamCountInfo, tiflashServerCountInfo, child) } } } type shuffleTarget uint8 const ( unknown shuffleTarget = iota window joinBuild hashAgg ) type fineGrainedShuffleHelper struct { shuffleTarget shuffleTarget plans []*basePhysicalPlan joinKeysCount int } type tiflashClusterInfoStatus uint8 const ( unInitialized tiflashClusterInfoStatus = iota initialized failed ) type tiflashClusterInfo struct { itemStatus tiflashClusterInfoStatus itemValue uint64 } func (h *fineGrainedShuffleHelper) clear() { h.shuffleTarget = unknown h.plans = h.plans[:0] h.joinKeysCount = 0 } func (h *fineGrainedShuffleHelper) updateTarget(t shuffleTarget, p *basePhysicalPlan) { h.shuffleTarget = t h.plans = append(h.plans, p) } // calculateTiFlashStreamCountUsingMinLogicalCores uses minimal logical cpu cores among tiflash servers, and divide by 2 // return false, 0 if any err happens func calculateTiFlashStreamCountUsingMinLogicalCores(ctx context.Context, sctx sessionctx.Context, serversInfo []infoschema.ServerInfo) (bool, uint64) { failpoint.Inject("mockTiFlashStreamCountUsingMinLogicalCores", func(val failpoint.Value) { intVal, err := strconv.Atoi(val.(string)) if err == nil { failpoint.Return(true, uint64(intVal)) } else { failpoint.Return(false, 0) } }) rows, err := infoschema.FetchClusterServerInfoWithoutPrivilegeCheck(ctx, sctx, serversInfo, diagnosticspb.ServerInfoType_HardwareInfo, false) if err != nil { return false, 0 } var initialMaxCores uint64 = 10000 var minLogicalCores = initialMaxCores // set to a large enough value here for _, row := range rows { if row[4].GetString() == "cpu-logical-cores" { logicalCpus, err := strconv.Atoi(row[5].GetString()) if err == nil && logicalCpus > 0 { minLogicalCores = mathutil.Min(minLogicalCores, uint64(logicalCpus)) } } } // No need to check len(serersInfo) == serverCount here, since missing some servers' info won't affect the correctness if minLogicalCores > 1 && minLogicalCores != initialMaxCores { if runtime.GOARCH == "amd64" { // In most x86-64 platforms, `Thread(s) per core` is 2 return true, minLogicalCores / 2 } // ARM cpus don't implement Hyper-threading. return true, minLogicalCores // Other platforms are too rare to consider } return false, 0 } func checkFineGrainedShuffleForJoinAgg(ctx context.Context, sctx sessionctx.Context, streamCountInfo *tiflashClusterInfo, tiflashServerCountInfo *tiflashClusterInfo, exchangeColCount int, splitLimit uint64) (applyFlag bool, streamCount uint64) { switch (*streamCountInfo).itemStatus { case unInitialized: streamCount = 4 // assume 8c node in cluster as minimal, stream count is 8 / 2 = 4 case initialized: streamCount = (*streamCountInfo).itemValue case failed: return false, 0 // probably won't reach this path } var tiflashServerCount uint64 switch (*tiflashServerCountInfo).itemStatus { case unInitialized: serversInfo, err := infoschema.GetTiFlashServerInfo(sctx) if err != nil { (*tiflashServerCountInfo).itemStatus = failed (*tiflashServerCountInfo).itemValue = 0 if (*streamCountInfo).itemStatus == unInitialized { setDefaultStreamCount(streamCountInfo) } return false, 0 } tiflashServerCount = uint64(len(serversInfo)) (*tiflashServerCountInfo).itemStatus = initialized (*tiflashServerCountInfo).itemValue = tiflashServerCount case initialized: tiflashServerCount = (*tiflashServerCountInfo).itemValue case failed: return false, 0 } // if already exceeds splitLimit, no need to fetch actual logical cores if tiflashServerCount*uint64(exchangeColCount)*streamCount > splitLimit { return false, 0 } // if streamCount already initialized, and can pass splitLimit check if (*streamCountInfo).itemStatus == initialized { return true, streamCount } serversInfo, err := infoschema.GetTiFlashServerInfo(sctx) if err != nil { (*tiflashServerCountInfo).itemStatus = failed (*tiflashServerCountInfo).itemValue = 0 return false, 0 } flag, temStreamCount := calculateTiFlashStreamCountUsingMinLogicalCores(ctx, sctx, serversInfo) if !flag { setDefaultStreamCount(streamCountInfo) (*tiflashServerCountInfo).itemStatus = failed return false, 0 } streamCount = temStreamCount (*streamCountInfo).itemStatus = initialized (*streamCountInfo).itemValue = streamCount applyFlag = tiflashServerCount*uint64(exchangeColCount)*streamCount <= splitLimit return applyFlag, streamCount } func inferFineGrainedShuffleStreamCountForWindow(ctx context.Context, sctx sessionctx.Context, streamCountInfo *tiflashClusterInfo, tiflashServerCountInfo *tiflashClusterInfo) (streamCount uint64) { switch (*streamCountInfo).itemStatus { case unInitialized: if (*tiflashServerCountInfo).itemStatus == failed { setDefaultStreamCount(streamCountInfo) streamCount = (*streamCountInfo).itemValue break } serversInfo, err := infoschema.GetTiFlashServerInfo(sctx) if err != nil { setDefaultStreamCount(streamCountInfo) streamCount = (*streamCountInfo).itemValue (*tiflashServerCountInfo).itemStatus = failed break } if (*tiflashServerCountInfo).itemStatus == unInitialized { (*tiflashServerCountInfo).itemStatus = initialized (*tiflashServerCountInfo).itemValue = uint64(len(serversInfo)) } flag, temStreamCount := calculateTiFlashStreamCountUsingMinLogicalCores(ctx, sctx, serversInfo) if !flag { setDefaultStreamCount(streamCountInfo) streamCount = (*streamCountInfo).itemValue (*tiflashServerCountInfo).itemStatus = failed break } streamCount = temStreamCount (*streamCountInfo).itemStatus = initialized (*streamCountInfo).itemValue = streamCount case initialized: streamCount = (*streamCountInfo).itemValue case failed: setDefaultStreamCount(streamCountInfo) streamCount = (*streamCountInfo).itemValue } return streamCount } func setDefaultStreamCount(streamCountInfo *tiflashClusterInfo) { (*streamCountInfo).itemStatus = initialized (*streamCountInfo).itemValue = variable.DefStreamCountWhenMaxThreadsNotSet } func setupFineGrainedShuffleInternal(ctx context.Context, sctx sessionctx.Context, plan PhysicalPlan, helper *fineGrainedShuffleHelper, streamCountInfo *tiflashClusterInfo, tiflashServerCountInfo *tiflashClusterInfo) { switch x := plan.(type) { case *PhysicalWindow: // Do not clear the plans because window executor will keep the data partition. // For non hash partition window function, there will be a passthrough ExchangeSender to collect data, // which will break data partition. helper.updateTarget(window, &x.basePhysicalPlan) setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], helper, streamCountInfo, tiflashServerCountInfo) case *PhysicalSort: if x.IsPartialSort { // Partial sort will keep the data partition. helper.plans = append(helper.plans, &x.basePhysicalPlan) } else { // Global sort will break the data partition. helper.clear() } setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], helper, streamCountInfo, tiflashServerCountInfo) case *PhysicalSelection: helper.plans = append(helper.plans, &x.basePhysicalPlan) setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], helper, streamCountInfo, tiflashServerCountInfo) case *PhysicalProjection: helper.plans = append(helper.plans, &x.basePhysicalPlan) setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], helper, streamCountInfo, tiflashServerCountInfo) case *PhysicalExchangeReceiver: helper.plans = append(helper.plans, &x.basePhysicalPlan) setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], helper, streamCountInfo, tiflashServerCountInfo) case *PhysicalHashAgg: // Todo: allow hash aggregation's output still benefits from fine grained shuffle aggHelper := fineGrainedShuffleHelper{shuffleTarget: hashAgg, plans: []*basePhysicalPlan{}} aggHelper.plans = append(aggHelper.plans, &x.basePhysicalPlan) setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], &aggHelper, streamCountInfo, tiflashServerCountInfo) case *PhysicalHashJoin: child0 := x.children[0] child1 := x.children[1] buildChild := child0 probChild := child1 joinKeys := x.LeftJoinKeys if x.InnerChildIdx != 0 { // Child1 is build side. buildChild = child1 joinKeys = x.RightJoinKeys probChild = child0 } if len(joinKeys) > 0 { // Not cross join buildHelper := fineGrainedShuffleHelper{shuffleTarget: joinBuild, plans: []*basePhysicalPlan{}} buildHelper.plans = append(buildHelper.plans, &x.basePhysicalPlan) buildHelper.joinKeysCount = len(joinKeys) setupFineGrainedShuffleInternal(ctx, sctx, buildChild, &buildHelper, streamCountInfo, tiflashServerCountInfo) } else { buildHelper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: []*basePhysicalPlan{}} setupFineGrainedShuffleInternal(ctx, sctx, buildChild, &buildHelper, streamCountInfo, tiflashServerCountInfo) } // don't apply fine grained shuffle for probe side helper.clear() setupFineGrainedShuffleInternal(ctx, sctx, probChild, helper, streamCountInfo, tiflashServerCountInfo) case *PhysicalExchangeSender: if x.ExchangeType == tipb.ExchangeType_Hash { // Set up stream count for all plans based on shuffle target type. var exchangeColCount = x.Schema().Len() switch helper.shuffleTarget { case window: streamCount := inferFineGrainedShuffleStreamCountForWindow(ctx, sctx, streamCountInfo, tiflashServerCountInfo) x.TiFlashFineGrainedShuffleStreamCount = streamCount for _, p := range helper.plans { p.TiFlashFineGrainedShuffleStreamCount = streamCount } case hashAgg: applyFlag, streamCount := checkFineGrainedShuffleForJoinAgg(ctx, sctx, streamCountInfo, tiflashServerCountInfo, exchangeColCount, 1200) // 1200: performance test result if applyFlag { x.TiFlashFineGrainedShuffleStreamCount = streamCount for _, p := range helper.plans { p.TiFlashFineGrainedShuffleStreamCount = streamCount } } case joinBuild: // Support hashJoin only when shuffle hash keys equals to join keys due to tiflash implementations if len(x.HashCols) != helper.joinKeysCount { break } applyFlag, streamCount := checkFineGrainedShuffleForJoinAgg(ctx, sctx, streamCountInfo, tiflashServerCountInfo, exchangeColCount, 600) // 600: performance test result if applyFlag { x.TiFlashFineGrainedShuffleStreamCount = streamCount for _, p := range helper.plans { p.TiFlashFineGrainedShuffleStreamCount = streamCount } } } } // exchange sender will break the data partition. helper.clear() setupFineGrainedShuffleInternal(ctx, sctx, x.children[0], helper, streamCountInfo, tiflashServerCountInfo) default: for _, child := range x.Children() { childHelper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: []*basePhysicalPlan{}} setupFineGrainedShuffleInternal(ctx, sctx, child, &childHelper, streamCountInfo, tiflashServerCountInfo) } } } // propagateProbeParents doesn't affect the execution plan, it only sets the probeParents field of a PhysicalPlan. // It's for handling the inconsistency between row count in the statsInfo and the recorded actual row count. Please // see comments in PhysicalPlan for details. func propagateProbeParents(plan PhysicalPlan, probeParents []PhysicalPlan) { plan.setProbeParents(probeParents) switch x := plan.(type) { case *PhysicalApply, *PhysicalIndexJoin, *PhysicalIndexHashJoin, *PhysicalIndexMergeJoin: if join, ok := plan.(interface{ getInnerChildIdx() int }); ok { propagateProbeParents(plan.Children()[1-join.getInnerChildIdx()], probeParents) // The core logic of this method: // Record every Apply and Index Join we met, record it in a slice, and set it in their inner children. newParents := make([]PhysicalPlan, len(probeParents), len(probeParents)+1) copy(newParents, probeParents) newParents = append(newParents, plan) propagateProbeParents(plan.Children()[join.getInnerChildIdx()], newParents) } case *PhysicalTableReader: propagateProbeParents(x.tablePlan, probeParents) case *PhysicalIndexReader: propagateProbeParents(x.indexPlan, probeParents) case *PhysicalIndexLookUpReader: propagateProbeParents(x.indexPlan, probeParents) propagateProbeParents(x.tablePlan, probeParents) case *PhysicalIndexMergeReader: for _, pchild := range x.partialPlans { propagateProbeParents(pchild, probeParents) } propagateProbeParents(x.tablePlan, probeParents) default: for _, child := range plan.Children() { propagateProbeParents(child, probeParents) } } } func enableParallelApply(sctx sessionctx.Context, plan PhysicalPlan) PhysicalPlan { if !sctx.GetSessionVars().EnableParallelApply { return plan } // the parallel apply has three limitation: // 1. the parallel implementation now cannot keep order; // 2. the inner child has to support clone; // 3. if one Apply is in the inner side of another Apply, it cannot be parallel, for example: // The topology of 3 Apply operators are A1(A2, A3), which means A2 is the outer child of A1 // while A3 is the inner child. Then A1 and A2 can be parallel and A3 cannot. if apply, ok := plan.(*PhysicalApply); ok { outerIdx := 1 - apply.InnerChildIdx noOrder := len(apply.GetChildReqProps(outerIdx).SortItems) == 0 // limitation 1 _, err := SafeClone(apply.Children()[apply.InnerChildIdx]) supportClone := err == nil // limitation 2 if noOrder && supportClone { apply.Concurrency = sctx.GetSessionVars().ExecutorConcurrency } else { sctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("Some apply operators can not be executed in parallel")) } // because of the limitation 3, we cannot parallelize Apply operators in this Apply's inner size, // so we only invoke recursively for its outer child. apply.SetChild(outerIdx, enableParallelApply(sctx, apply.Children()[outerIdx])) return apply } for i, child := range plan.Children() { plan.SetChild(i, enableParallelApply(sctx, child)) } return plan } // LogicalOptimizeTest is just exported for test. func LogicalOptimizeTest(ctx context.Context, flag uint64, logic LogicalPlan) (LogicalPlan, error) { return logicalOptimize(ctx, flag, logic) } func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (LogicalPlan, error) { if logic.SCtx().GetSessionVars().StmtCtx.EnableOptimizerDebugTrace { debugtrace.EnterContextCommon(logic.SCtx()) defer debugtrace.LeaveContextCommon(logic.SCtx()) } opt := defaultLogicalOptimizeOption() vars := logic.SCtx().GetSessionVars() if vars.StmtCtx.EnableOptimizeTrace { vars.StmtCtx.OptimizeTracer = &tracing.OptimizeTracer{} tracer := &tracing.LogicalOptimizeTracer{ Steps: make([]*tracing.LogicalRuleOptimizeTracer, 0), } opt = opt.withEnableOptimizeTracer(tracer) defer func() { vars.StmtCtx.OptimizeTracer.Logical = tracer }() } var err error for i, rule := range optRuleList { // The order of flags is same as the order of optRule in the list. // We use a bitmask to record which opt rules should be used. If the i-th bit is 1, it means we should // apply i-th optimizing rule. if flag&(1<<uint(i)) == 0 || isLogicalRuleDisabled(rule) { continue } opt.appendBeforeRuleOptimize(i, rule.name(), logic) logic, err = rule.optimize(ctx, logic, opt) if err != nil { return nil, err } } opt.recordFinalLogicalPlan(logic) return logic, err } func isLogicalRuleDisabled(r logicalOptRule) bool { disabled := DefaultDisabledLogicalRulesList.Load().(set.StringSet).Exist(r.name()) return disabled } func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan PhysicalPlan, cost float64, err error) { if logic.SCtx().GetSessionVars().StmtCtx.EnableOptimizerDebugTrace { debugtrace.EnterContextCommon(logic.SCtx()) defer debugtrace.LeaveContextCommon(logic.SCtx()) } if _, err := logic.recursiveDeriveStats(nil); err != nil { return nil, 0, err } preparePossibleProperties(logic) prop := &property.PhysicalProperty{ TaskTp: property.RootTaskType, ExpectedCnt: math.MaxFloat64, } opt := defaultPhysicalOptimizeOption() stmtCtx := logic.SCtx().GetSessionVars().StmtCtx if stmtCtx.EnableOptimizeTrace { tracer := &tracing.PhysicalOptimizeTracer{ PhysicalPlanCostDetails: make(map[string]*tracing.PhysicalPlanCostDetail), Candidates: make(map[int]*tracing.CandidatePlanTrace), } opt = opt.withEnableOptimizeTracer(tracer) defer func() { r := recover() if r != nil { panic(r) /* pass panic to upper function to handle */ } if err == nil { tracer.RecordFinalPlanTrace(plan.BuildPlanTrace()) stmtCtx.OptimizeTracer.Physical = tracer } }() } logic.SCtx().GetSessionVars().StmtCtx.TaskMapBakTS = 0 t, _, err := logic.findBestTask(prop, planCounter, opt) if err != nil { return nil, 0, err } if *planCounter > 0 { logic.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("The parameter of nth_plan() is out of range")) } if t.invalid() { errMsg := "Can't find a proper physical plan for this query" if config.GetGlobalConfig().DisaggregatedTiFlash && !logic.SCtx().GetSessionVars().IsMPPAllowed() { errMsg += ": cop and batchCop are not allowed in disaggregated tiflash mode, you should turn on tidb_allow_mpp switch" } return nil, 0, ErrInternal.GenWithStackByArgs(errMsg) } if err = t.plan().ResolveIndices(); err != nil { return nil, 0, err } cost, err = getPlanCost(t.plan(), property.RootTaskType, NewDefaultPlanCostOption()) return t.plan(), cost, err } // eliminateUnionScanAndLock set lock property for PointGet and BatchPointGet and eliminates UnionScan and Lock. func eliminateUnionScanAndLock(sctx sessionctx.Context, p PhysicalPlan) PhysicalPlan { var pointGet *PointGetPlan var batchPointGet *BatchPointGetPlan var physLock *PhysicalLock var unionScan *PhysicalUnionScan iteratePhysicalPlan(p, func(p PhysicalPlan) bool { if len(p.Children()) > 1 { return false } switch x := p.(type) { case *PointGetPlan: pointGet = x case *BatchPointGetPlan: batchPointGet = x case *PhysicalLock: physLock = x case *PhysicalUnionScan: unionScan = x } return true }) if pointGet == nil && batchPointGet == nil { return p } if physLock == nil && unionScan == nil { return p } if physLock != nil { lock, waitTime := getLockWaitTime(sctx, physLock.Lock) if !lock { return p } if pointGet != nil { pointGet.Lock = lock pointGet.LockWaitTime = waitTime } else { batchPointGet.Lock = lock batchPointGet.LockWaitTime = waitTime } } return transformPhysicalPlan(p, func(p PhysicalPlan) PhysicalPlan { if p == physLock { return p.Children()[0] } if p == unionScan { return p.Children()[0] } return p }) } func iteratePhysicalPlan(p PhysicalPlan, f func(p PhysicalPlan) bool) { if !f(p) { return } for _, child := range p.Children() { iteratePhysicalPlan(child, f) } } func transformPhysicalPlan(p PhysicalPlan, f func(p PhysicalPlan) PhysicalPlan) PhysicalPlan { for i, child := range p.Children() { p.Children()[i] = transformPhysicalPlan(child, f) } return f(p) } func existsCartesianProduct(p LogicalPlan) bool { if join, ok := p.(*LogicalJoin); ok && len(join.EqualConditions) == 0 { return join.JoinType == InnerJoin || join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin } for _, child := range p.Children() { if existsCartesianProduct(child) { return true } } return false } // DefaultDisabledLogicalRulesList indicates the logical rules which should be banned. var DefaultDisabledLogicalRulesList *atomic.Value func init() { expression.EvalAstExpr = evalAstExpr expression.RewriteAstExpr = rewriteAstExpr DefaultDisabledLogicalRulesList = new(atomic.Value) DefaultDisabledLogicalRulesList.Store(set.NewStringSet()) } func disableReuseChunkIfNeeded(sctx sessionctx.Context, plan PhysicalPlan) { if !sctx.GetSessionVars().IsAllocValid() { return } if checkOverlongColType(sctx, plan) { return } for _, child := range plan.Children() { disableReuseChunkIfNeeded(sctx, child) } } // checkOverlongColType Check if read field type is long field. func checkOverlongColType(sctx sessionctx.Context, plan PhysicalPlan) bool { if plan == nil { return false } switch plan.(type) { case *PhysicalTableReader, *PhysicalIndexReader, *PhysicalIndexLookUpReader, *PhysicalIndexMergeReader, *PointGetPlan: if existsOverlongType(plan.Schema()) { sctx.GetSessionVars().ClearAlloc(nil, false) return true } } return false } // existsOverlongType Check if exists long type column. func existsOverlongType(schema *expression.Schema) bool { if schema == nil { return false } for _, column := range schema.Columns { switch column.RetType.GetType() { case mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob, mysql.TypeJSON: return true case mysql.TypeVarString, mysql.TypeVarchar: // if the column is varchar and the length of // the column is defined to be more than 1000, // the column is considered a large type and // disable chunk_reuse. if column.RetType.GetFlen() > 1000 { return true } } } return false }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package serviceaffinity import ( "context" "fmt" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) const ( // Name is the name of the plugin used in the plugin registry and configurations. Name = "ServiceAffinity" // preFilterStateKey is the key in CycleState to ServiceAffinity pre-computed data. // Using the name of the plugin will likely help us avoid collisions with other plugins. preFilterStateKey = "PreFilter" + Name // ErrReason is used for CheckServiceAffinity predicate error. ErrReason = "node(s) didn't match service affinity" ) // preFilterState computed at PreFilter and used at Filter. type preFilterState struct { matchingPodList []*v1.Pod matchingPodServices []*v1.Service } // Clone the prefilter state. func (s *preFilterState) Clone() framework.StateData { if s == nil { return nil } copy := preFilterState{} copy.matchingPodServices = append([]*v1.Service(nil), s.matchingPodServices...) copy.matchingPodList = append([]*v1.Pod(nil), s.matchingPodList...) return &copy } // New initializes a new plugin and returns it. func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) { args, err := getArgs(plArgs) if err != nil { return nil, err } serviceLister := handle.SharedInformerFactory().Core().V1().Services().Lister() return &ServiceAffinity{ sharedLister: handle.SnapshotSharedLister(), serviceLister: serviceLister, args: args, }, nil } func getArgs(obj runtime.Object) (config.ServiceAffinityArgs, error) { ptr, ok := obj.(*config.ServiceAffinityArgs) if !ok { return config.ServiceAffinityArgs{}, fmt.Errorf("want args to be of type ServiceAffinityArgs, got %T", obj) } return *ptr, nil } // ServiceAffinity is a plugin that checks service affinity. type ServiceAffinity struct { args config.ServiceAffinityArgs sharedLister framework.SharedLister serviceLister corelisters.ServiceLister } var _ framework.PreFilterPlugin = &ServiceAffinity{} var _ framework.FilterPlugin = &ServiceAffinity{} var _ framework.ScorePlugin = &ServiceAffinity{} // Name returns name of the plugin. It is used in logs, etc. func (pl *ServiceAffinity) Name() string { return Name } func (pl *ServiceAffinity) createPreFilterState(pod *v1.Pod) (*preFilterState, error) { if pod == nil { return nil, fmt.Errorf("a pod is required to calculate service affinity preFilterState") } // Store services which match the pod. matchingPodServices, err := helper.GetPodServices(pl.serviceLister, pod) if err != nil { return nil, fmt.Errorf("listing pod services: %v", err.Error()) } selector := createSelectorFromLabels(pod.Labels) // consider only the pods that belong to the same namespace nodeInfos, err := pl.sharedLister.NodeInfos().List() if err != nil { return nil, fmt.Errorf("listing nodeInfos: %v", err.Error()) } matchingPodList := filterPods(nodeInfos, selector, pod.Namespace) return &preFilterState{ matchingPodList: matchingPodList, matchingPodServices: matchingPodServices, }, nil } // PreFilter invoked at the prefilter extension point. func (pl *ServiceAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status { s, err := pl.createPreFilterState(pod) if err != nil { return framework.NewStatus(framework.Error, fmt.Sprintf("could not create preFilterState: %v", err)) } cycleState.Write(preFilterStateKey, s) return nil } // PreFilterExtensions returns prefilter extensions, pod add and remove. func (pl *ServiceAffinity) PreFilterExtensions() framework.PreFilterExtensions { return pl } // AddPod from pre-computed data in cycleState. func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } // If addedPod is in the same namespace as the pod, update the list // of matching pods if applicable. if podToAdd.Namespace != podToSchedule.Namespace { return nil } selector := createSelectorFromLabels(podToSchedule.Labels) if selector.Matches(labels.Set(podToAdd.Labels)) { s.matchingPodList = append(s.matchingPodList, podToAdd) } return nil } // RemovePod from pre-computed data in cycleState. func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } if len(s.matchingPodList) == 0 || podToRemove.Namespace != s.matchingPodList[0].Namespace { return nil } for i, pod := range s.matchingPodList { if pod.Name == podToRemove.Name && pod.Namespace == podToRemove.Namespace { s.matchingPodList = append(s.matchingPodList[:i], s.matchingPodList[i+1:]...) break } } return nil } func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { c, err := cycleState.Read(preFilterStateKey) if err != nil { // preFilterState doesn't exist, likely PreFilter wasn't invoked. return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err) } if c == nil { return nil, nil } s, ok := c.(*preFilterState) if !ok { return nil, fmt.Errorf("%+v convert to interpodaffinity.state error", c) } return s, nil } // Filter matches nodes in such a way to force that // ServiceAffinity.labels are homogeneous for pods that are scheduled to a node. // (i.e. it returns true IFF this pod can be added to this node such that all other pods in // the same service are running on nodes with the exact same ServiceAffinity.label values). // // For example: // If the first pod of a service was scheduled to a node with label "region=foo", // all the other subsequent pods belong to the same service will be schedule on // nodes with the same "region=foo" label. // // Details: // // If (the svc affinity labels are not a subset of pod's label selectors ) // The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate // the match. // Otherwise: // Create an "implicit selector" which guarantees pods will land on nodes with similar values // for the affinity labels. // // To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace. // These backfilled labels in the selector "L" are defined like so: // - L is a label that the ServiceAffinity object needs as a matching constraint. // - L is not defined in the pod itself already. // - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value. func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if len(pl.args.AffinityLabels) == 0 { return nil } node := nodeInfo.Node() if node == nil { return framework.NewStatus(framework.Error, "node not found") } s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } pods, services := s.matchingPodList, s.matchingPodServices filteredPods := nodeInfo.FilterOutPods(pods) // check if the pod being scheduled has the affinity labels specified in its NodeSelector affinityLabels := findLabelsInSet(pl.args.AffinityLabels, labels.Set(pod.Spec.NodeSelector)) // Step 1: If we don't have all constraints, introspect nodes to find the missing constraints. if len(pl.args.AffinityLabels) > len(affinityLabels) { if len(services) > 0 { if len(filteredPods) > 0 { nodeWithAffinityLabels, err := pl.sharedLister.NodeInfos().Get(filteredPods[0].Spec.NodeName) if err != nil { return framework.NewStatus(framework.Error, "node not found") } addUnsetLabelsToMap(affinityLabels, pl.args.AffinityLabels, labels.Set(nodeWithAffinityLabels.Node().Labels)) } } } // Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find. if createSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) { return nil } return framework.NewStatus(framework.Unschedulable, ErrReason) } // Score invoked at the Score extension point. func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { nodeInfo, err := pl.sharedLister.NodeInfos().Get(nodeName) if err != nil { return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) } node := nodeInfo.Node() if node == nil { return 0, framework.NewStatus(framework.Error, fmt.Sprintf("node not found")) } // Pods matched namespace,selector on current node. var selector labels.Selector if services, err := helper.GetPodServices(pl.serviceLister, pod); err == nil && len(services) > 0 { selector = labels.SelectorFromSet(services[0].Spec.Selector) } else { selector = labels.NewSelector() } if len(nodeInfo.Pods) == 0 || selector.Empty() { return 0, nil } var score int64 for _, existingPod := range nodeInfo.Pods { // Ignore pods being deleted for spreading purposes // Similar to how it is done for SelectorSpreadPriority if pod.Namespace == existingPod.Pod.Namespace && existingPod.Pod.DeletionTimestamp == nil { if selector.Matches(labels.Set(existingPod.Pod.Labels)) { score++ } } } return score, nil } // NormalizeScore invoked after scoring all nodes. func (pl *ServiceAffinity) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { reduceResult := make([]float64, len(scores)) for _, label := range pl.args.AntiAffinityLabelsPreference { if err := pl.updateNodeScoresForLabel(pl.sharedLister, scores, reduceResult, label); err != nil { return framework.NewStatus(framework.Error, err.Error()) } } // Update the result after all labels have been evaluated. for i, nodeScore := range reduceResult { scores[i].Score = int64(nodeScore) } return nil } // updateNodeScoresForLabel updates the node scores for a single label. Note it does not update the // original result from the map phase directly, but instead updates the reduceResult, which is used // to update the original result finally. This makes sure that each call to updateNodeScoresForLabel // receives the same mapResult to work with. // Why are doing this? This is a workaround for the migration from priorities to score plugins. // Historically the priority is designed to handle only one label, and multiple priorities are configured // to work with multiple labels. Using multiple plugins is not allowed in the new framework. Therefore // we need to modify the old priority to be able to handle multiple labels so that it can be mapped // to a single plugin. // TODO: This will be deprecated soon. func (pl *ServiceAffinity) updateNodeScoresForLabel(sharedLister framework.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error { var numServicePods int64 var labelValue string podCounts := map[string]int64{} labelNodesStatus := map[string]string{} maxPriorityFloat64 := float64(framework.MaxNodeScore) for _, nodePriority := range mapResult { numServicePods += nodePriority.Score nodeInfo, err := sharedLister.NodeInfos().Get(nodePriority.Name) if err != nil { return err } if !labels.Set(nodeInfo.Node().Labels).Has(label) { continue } labelValue = labels.Set(nodeInfo.Node().Labels).Get(label) labelNodesStatus[nodePriority.Name] = labelValue podCounts[labelValue] += nodePriority.Score } //score int - scale of 0-maxPriority // 0 being the lowest priority and maxPriority being the highest for i, nodePriority := range mapResult { labelValue, ok := labelNodesStatus[nodePriority.Name] if !ok { continue } // initializing to the default/max node score of maxPriority fScore := maxPriorityFloat64 if numServicePods > 0 { fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[labelValue]) / float64(numServicePods)) } // The score of current label only accounts for 1/len(s.labels) of the total score. // The policy API definition only allows a single label to be configured, associated with a weight. // This is compensated by the fact that the total weight is the sum of all weights configured // in each policy config. reduceResult[i] += fScore / float64(len(pl.args.AntiAffinityLabelsPreference)) } return nil } // ScoreExtensions of the Score plugin. func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions { return pl } // addUnsetLabelsToMap backfills missing values with values we find in a map. func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) { for _, l := range labelsToAdd { // if the label is already there, dont overwrite it. if _, exists := aL[l]; exists { continue } // otherwise, backfill this label. if labelSet.Has(l) { aL[l] = labelSet.Get(l) } } } // createSelectorFromLabels is used to define a selector that corresponds to the keys in a map. func createSelectorFromLabels(aL map[string]string) labels.Selector { if len(aL) == 0 { return labels.Everything() } return labels.Set(aL).AsSelector() } // filterPods filters pods outside a namespace from the given list. func filterPods(nodeInfos []*framework.NodeInfo, selector labels.Selector, ns string) []*v1.Pod { maxSize := 0 for _, n := range nodeInfos { maxSize += len(n.Pods) } pods := make([]*v1.Pod, 0, maxSize) for _, n := range nodeInfos { for _, p := range n.Pods { if p.Pod.Namespace == ns && selector.Matches(labels.Set(p.Pod.Labels)) { pods = append(pods, p.Pod) } } } return pods } // findLabelsInSet gets as many key/value pairs as possible out of a label set. func findLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string { aL := make(map[string]string) for _, l := range labelsToKeep { if selector.Has(l) { aL[l] = selector.Get(l) } } return aL }
// Package lambdas provides helper generic functions. // // These functions are especially helpful in combination with other `genesis` packages. package lambdas
//go:generate protoc --go_out=. messages/proxy.proto package proxy
package getter import ( "testing" "github.com/stretchr/testify/assert" "github.com/quilt/quilt/api" "github.com/quilt/quilt/api/client" "github.com/quilt/quilt/api/client/mocks" "github.com/quilt/quilt/db" ) func TestGetLeaderClient(t *testing.T) { t.Parallel() passedClient := &mocks.Client{} mockGetter := mockAddrClientGetter{ func(host string) (client.Client, error) { switch host { // One machine doesn't know the LeaderIP case api.RemoteAddress("8.8.8.8"): return &mocks.Client{ EtcdReturn: []db.Etcd{ { LeaderIP: "", }, }, }, nil // The other machine knows the LeaderIP case api.RemoteAddress("9.9.9.9"): return &mocks.Client{ EtcdReturn: []db.Etcd{ { LeaderIP: "leader-priv", }, }, }, nil case api.RemoteAddress("leader"): return passedClient, nil default: t.Fatalf("Unexpected call to getClient with host %s", host) } panic("unreached") }, } localClient := &mocks.Client{ MachineReturn: []db.Machine{ { PublicIP: "8.8.8.8", }, { PublicIP: "9.9.9.9", }, { PublicIP: "leader", PrivateIP: "leader-priv", }, }, } res, err := clientGetterImpl{mockGetter}.LeaderClient(localClient) assert.Nil(t, err) assert.Equal(t, passedClient, res) } func TestNoLeader(t *testing.T) { t.Parallel() mockGetter := mockAddrClientGetter{ func(host string) (client.Client, error) { // No client knows the leader IP. return &mocks.Client{ EtcdReturn: []db.Etcd{ { LeaderIP: "", }, }, }, nil }, } localClient := &mocks.Client{ MachineReturn: []db.Machine{ { PublicIP: "8.8.8.8", }, { PublicIP: "9.9.9.9", }, }, } _, err := clientGetterImpl{mockGetter}.LeaderClient(localClient) assert.EqualError(t, err, "no leader found") } func TestGetContainerClient(t *testing.T) { t.Parallel() targetContainer := "1" workerHost := "worker" leaderHost := "leader" passedClient := &mocks.Client{} mockGetter := mockAddrClientGetter{ func(host string) (client.Client, error) { switch host { case api.RemoteAddress(leaderHost): return &mocks.Client{ ContainerReturn: []db.Container{ { StitchID: targetContainer, Minion: workerHost, }, { StitchID: "5", Minion: "bad", }, }, EtcdReturn: []db.Etcd{ { LeaderIP: leaderHost, }, }, }, nil case api.RemoteAddress(workerHost): return passedClient, nil default: t.Fatalf("Unexpected call to getClient with host %s", host) } panic("unreached") }, } localClient := &mocks.Client{ MachineReturn: []db.Machine{ { PublicIP: leaderHost, PrivateIP: leaderHost, }, { PrivateIP: workerHost, PublicIP: workerHost, }, }, } res, err := clientGetterImpl{mockGetter}.ContainerClient( localClient, targetContainer) assert.Nil(t, err) assert.Equal(t, passedClient, res) } type mockAddrClientGetter struct { getter func(host string) (client.Client, error) } func (mcg mockAddrClientGetter) Client(host string) (client.Client, error) { return mcg.getter(host) }
package sdkc // ErrorResponseWriter writes a error response failure type ErrorResponseWriter struct { } // NewErrorResponseWriter creates a new error response writer. func NewErrorResponseWriter() *ErrorResponseWriter { return &ErrorResponseWriter{} } func (erw *ErrorResponseWriter) Write(p []byte) (n int, err error) { if err = lambdaWriteError(string(p)); err == nil { n = len(p) } return } // ResponseWriter is writes a success response payload type ResponseWriter struct { } // NewResponseWriter creates a new response writer. func NewResponseWriter() *ResponseWriter { return &ResponseWriter{} } func (rw *ResponseWriter) Write(p []byte) (n int, err error) { if err = lambdaWriteResponse(p); err == nil { n = len(p) } return }
package reader import ( "bufio" "bytes" "encoding/binary" "errors" "fmt" "io/ioutil" "os" "os/user" "reflect" "strings" "syscall" ) const ( inputs = "/sys/class/input/event%d/device/uevent" deviceFile = "/dev/input/event%d" maxFiles = 255 ) // event types const ( EvSYN = 0x00 EvKEY = 0x01 EvREL = 0x02 EvABS = 0x03 EvMSC = 0x04 EvSW = 0x05 EvLED = 0x11 EvSND = 0x12 EvREP = 0x14 EvFF = 0x15 EvPWR = 0x16 EvFFStatus = 0x17 EvMAX = 0x1f ) var eventsize = int(reflect.TypeOf(InputEvent{}).Size()) // Reader represents a Vending Machine compatible reader type Reader struct { dev *InputDevice } type InputDevice struct { Id int Name string } type InputEvent struct { Time syscall.Timeval Type uint16 Code uint16 Value int32 } // GetReader returns a detected reader or appropriate error func GetReader(readerName string) (*Reader, error) { var devices []*InputDevice if err := checkRoot(); err != nil { return nil, err } for i := 0; i < maxFiles; i++ { buff, err := ioutil.ReadFile(fmt.Sprintf(inputs, i)) if err != nil { break } devices = append(devices, newInputDeviceReader(buff, i)) } for _, d := range devices { if d.Name == readerName { return &Reader{ dev: d, }, nil } } return nil, errors.New("no suitable reader found") } func checkRoot() error { u, err := user.Current() if err != nil { return err } if u.Uid != "0" { return fmt.Errorf("cannot read device files. Are you running as root?") } return nil } func newInputDeviceReader(buff []byte, id int) *InputDevice { rd := bufio.NewReader(bytes.NewReader(buff)) rd.ReadLine() dev, _, _ := rd.ReadLine() split := strings.Split(string(dev), "=") return &InputDevice{ Id: id, Name: strings.Trim(split[1], "\""), } } // GetCardChannel returns channel that spits out card id everytime card is swiped func (r *Reader) GetCardChannel() (<-chan string, error) { ret := make(chan string, 512) if err := checkRoot(); err != nil { close(ret) return ret, err } fd, err := os.Open(fmt.Sprintf(deviceFile, r.dev.Id)) if err != nil { close(ret) return ret, err } go func() { tmp := make([]byte, eventsize) event := InputEvent{} id := "" for { _, err := fd.Read(tmp) if err != nil { close(ret) break } if err := binary.Read(bytes.NewBuffer(tmp), binary.LittleEndian, &event); err != nil { panic(err) } if event.Type == EvKEY && event.Value == 1 { // We only read Key Events and we only read Key pressed (1) if event.KeyString() == "ENTER" { // If we get Enter we have all the read chars and return the string. ret <- id id = "" } else { id += event.KeyString() // Concatenate to get full ID } } } }() return ret, nil } // Keystring Returns the char of a key event func (i *InputEvent) KeyString() string { return keyCodeMap[i.Code] }
package main import ( "testing" "github.com/Jeffail/gabs/v2" ) func TestAddObjectSimple(t *testing.T) { input := []byte(`{}`) correctResult := `{"foo":"bar"}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addObjectOptions{ json: j, path: "foo", delimiter: ".", values: []string{"bar"}, } result, err := addObject(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddObjectWithObject(t *testing.T) { input := []byte(`{"foo":{}}`) correctResult := `{"foo":{"bar":"baz"}}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addObjectOptions{ json: j, path: "foo", delimiter: ".", keys: []string{"bar"}, values: []string{"baz"}, } result, err := addObject(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddObjectExisting(t *testing.T) { input := []byte(`{"foo":"bar"}`) correctResult := `{"baz":"qux","foo":"bar"}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addObjectOptions{ json: j, path: "baz", delimiter: ".", values: []string{"qux"}, } result, err := addObject(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddObjectNested(t *testing.T) { input := []byte(`{"foo":{"bar":"qux"}}`) correctResult := `{"foo":{"bar":"qux","omg":"wtf"}}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addObjectOptions{ json: j, path: "foo.omg", delimiter: ".", values: []string{"wtf"}, } result, err := addObject(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddObjectNumber(t *testing.T) { input := []byte(`{"foo":"bar"}`) correctResult := `{"foo":1}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addObjectOptions{ json: j, path: "foo", delimiter: ".", values: []string{"1"}, } result, err := addObject(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddObjectsAndValues(t *testing.T) { input := []byte(`{}`) correctResult := `{"baz":"qux","foo":"bar"}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addObjectOptions{ json: j, delimiter: ".", keys: []string{"foo", "baz"}, values: []string{"bar", "qux"}, } result, err := addObject(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArraySimple(t *testing.T) { input := []byte(`{}`) correctResult := `{"foo":["a","b","c"]}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayOptions{ json: j, path: "foo", delimiter: ".", values: []string{"a", "b", "c"}, } result, err := addArray(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArrayExisting(t *testing.T) { input := []byte(`{"foo":"bar"}`) correctResult := `{"baz":["a","b","c"],"foo":"bar"}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayOptions{ json: j, path: "baz", delimiter: ".", values: []string{"a", "b", "c"}, } result, err := addArray(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArrayNested(t *testing.T) { input := []byte(`{"foo":{"bar":"qux"}}`) correctResult := `{"foo":{"bar":"qux","omg":["a","b","c"]}}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayOptions{ json: j, path: "foo.omg", delimiter: ".", values: []string{"a", "b", "c"}, } result, err := addArray(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArrayNumber(t *testing.T) { input := []byte(`{"foo":"bar"}`) correctResult := `{"foo":[1,2,3]}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayOptions{ json: j, path: "foo", delimiter: ".", values: []string{"1", "2", "3"}, } result, err := addArray(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArrayElementSimple(t *testing.T) { input := []byte(`{"foo":[1,2,3]}`) correctResult := `{"foo":[1,2,3,4]}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayElementOptions{ json: j, path: "foo", delimiter: ".", value: "4", exists: false, } result, err := addArrayElement(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArrayElementPosition(t *testing.T) { input := []byte(`{"foo":[1,2,3]}`) correctResult := `{"foo":[1,4,3]}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayElementOptions{ json: j, path: "foo.1", delimiter: ".", value: "4", exists: false, } result, err := addArrayElement(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } correctResult = `{"foo":[1,"a",3]}` options = addArrayElementOptions{ json: j, path: "foo.1", delimiter: ".", value: "a", exists: false, } result, err = addArrayElement(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } } func TestAddArrayElementContains(t *testing.T) { input := []byte(`{"foo":[1,2,3]}`) correctResult := `{"foo":[1,2,3]}` j, err := gabs.ParseJSON(input) if err != nil { t.Error(err) } options := addArrayElementOptions{ json: j, path: "foo", delimiter: ".", value: "3", exists: true, } result, err := addArrayElement(options) if err != nil { t.Error(err) } if result.String() != correctResult { t.Errorf("Wanted %s\nGot %s", correctResult, result) } }
package main import "fmt" func main() { var studentsAge map[string]int studentsAge["john"] = 32 studentsAge["bob"] = 31 fmt.Println(studentsAge) }
package server import ( "errors" "net/http" ) func (h *handler) needAuth(w http.ResponseWriter, r *http.Request) { w.Header().Set("WWW-Authenticate", "Basic realm=\""+r.URL.Path+"\"") } func (h *handler) verifyAuth(r *http.Request) (username string, success bool, err error) { var password string var hasAuthReq bool username, password, hasAuthReq = r.BasicAuth() if hasAuthReq { success = h.users.Auth(username, password) if !success { err = errors.New(r.RemoteAddr + " auth failed") } } else { err = errors.New(r.RemoteAddr + " missing auth info") } return } func (h *handler) authFailed(w http.ResponseWriter) { w.WriteHeader(http.StatusUnauthorized) }
package get import ( "context" "fmt" "os" "strings" "github.com/devopstoday11/tarian/pkg/logger" "github.com/devopstoday11/tarian/pkg/tarianctl/client" "github.com/devopstoday11/tarian/pkg/tarianctl/util" "github.com/devopstoday11/tarian/pkg/tarianpb" "github.com/olekukonko/tablewriter" cli "github.com/urfave/cli/v2" "gopkg.in/yaml.v3" ) func NewGetConstraintsCommand() *cli.Command { return &cli.Command{ Name: "constraints", Usage: "Get constraints from the Tarian Server.", Flags: []cli.Flag{&cli.StringFlag{ Name: "output", Aliases: []string{"o"}, Usage: "Output format. Valid values: yaml", Value: "", }}, Action: func(c *cli.Context) error { logger := logger.GetLogger(c.String("log-level"), c.String("log-encoding")) util.SetLogger(logger) opts := util.ClientOptionsFromCliContext(c) client, _ := client.NewConfigClient(c.String("server-address"), opts...) response, err := client.GetConstraints(context.Background(), &tarianpb.GetConstraintsRequest{}) if err != nil { logger.Fatal(err) } outputFormat := c.String("output") if outputFormat == "" { table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Namespace", "Constraint Name", "Selector", "Allowed Processes", "Allowed Files"}) table.SetColumnSeparator(" ") table.SetCenterSeparator("-") table.SetAlignment(tablewriter.ALIGN_LEFT) for _, c := range response.GetConstraints() { table.Append([]string{c.GetNamespace(), c.GetName(), matchLabelsToString(c.GetSelector().GetMatchLabels()), allowedProcessesToString(c.GetAllowedProcesses()), allowedFilesToString(c.GetAllowedFiles())}) } table.Render() } else if outputFormat == "yaml" { for _, c := range response.GetConstraints() { d, err := yaml.Marshal(c) if err != nil { return err } fmt.Print(string(d)) fmt.Println("---") } } return nil }, } } func matchLabelsToString(labels []*tarianpb.MatchLabel) string { if len(labels) == 0 { return "" } str := strings.Builder{} str.WriteString("matchLabels:") for i, l := range labels { str.WriteString(l.GetKey()) str.WriteString("=") str.WriteString(l.GetValue()) if i < len(labels)-1 { str.WriteString(",") } } return str.String() } func allowedProcessesToString(rules []*tarianpb.AllowedProcessRule) string { str := strings.Builder{} for i, r := range rules { str.WriteString("regex:") str.WriteString(r.GetRegex()) if i < len(rules)-1 { str.WriteString(",") } } return str.String() } func allowedFilesToString(rules []*tarianpb.AllowedFileRule) string { str := strings.Builder{} for i, r := range rules { str.WriteString(r.GetName()) str.WriteString(":") str.WriteString(r.GetSha256Sum()) if i < len(rules)-1 { str.WriteString(",") } } return str.String() }
// Copyright (c) 2021 Zededa, Inc. // SPDX-License-Identifier: Apache-2.0 // Network Instance/underlay network IP Address Management, // App Number management Module // Allocate a small integer for each application UUID. // The number can not exceed 255 since we use the as IPv4 subnet numbers. // Persist the numbers across reboots/activation using uuidpairtonum package // When there are no free numbers then reuse the unused numbers. // We try to give the application with IsZedmanager=true appnum zero. package zedrouter import ( "fmt" "github.com/lf-edge/eve/pkg/pillar/types" "github.com/lf-edge/eve/pkg/pillar/uuidpairtonum" "github.com/satori/go.uuid" ) // mapped on base UUID var appNumBase map[string]*types.Bitmap const ( appNumOnUNetType = "appNumOnUnet" ) // Read the existing appNums out of what we published/checkpointed. // Also read what we have persisted before a reboot // Store in reserved map since we will be asked to allocate them later. // Set bit in bitmap. func appNumOnUNetInit(ctx *zedrouterContext) { // initialize the base appNumBase = make(map[string]*types.Bitmap) pubAppNetworkStatus := ctx.pubAppNetworkStatus pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType items := pub.GetAll() for _, item := range items { appNumMap := item.(types.UUIDPairAndIfIdxToNum) if appNumMap.NumType != numType { continue } log.Functionf("appNumOnUNetInit found %v", appNumMap) appNum := appNumMap.Number baseID := appNumMap.BaseID appID := appNumMap.AppID ifIdx := appNumMap.IfIdx // If we have a config for the UUID Pair, we should mark it as // allocated; otherwise mark it as reserved. // XXX however, on startup we are not likely to have any // config yet. baseMap := appNumOnUNetBaseCreate(baseID) if baseMap.IsSet(appNum) { log.Errorf("Bitmap is already set for %s num %d", types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx), appNum) continue } log.Functionf("Reserving appNum %d for %s", appNum, types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) baseMap.Set(appNum) // Clear InUse uuidpairtonum.NumFree(log, pub, baseID, appID, ifIdx) } // In case zedrouter process restarted we fill in InUse from // AppNetworkStatus, underlay network entries items = pubAppNetworkStatus.GetAll() for _, item := range items { status := item.(types.AppNetworkStatus) appID := status.UUIDandVersion.UUID // If we have a config for the UUID we should mark it as // allocated; otherwise mark it as reserved. // XXX however, on startup we are not likely to have any // config yet. for i := range status.UnderlayNetworkList { ulStatus := &status.UnderlayNetworkList[i] baseID := ulStatus.Network baseMap := appNumOnUNetBaseGet(baseID) if baseMap == nil { continue } appNum, err := uuidpairtonum.NumGet(log, pub, baseID, appID, numType, ulStatus.IfIdx) if err != nil { continue } if !baseMap.IsSet(appNum) { log.Fatalf("Bitmap is not set for %s num %d", types.UUIDPairAndIfIdxToNumKey(baseID, appID, ulStatus.IfIdx), appNum) } log.Functionf("Marking InUse for appNum %d", appNum) // Set InUse uuidpairtonum.NumAllocate(log, pub, baseID, appID, appNum, false, numType, ulStatus.IfIdx) } } } // If an entry is not inUse and and its CreateTime were // before the agent started, then we free it up. func appNumMapOnUNetGC(ctx *zedrouterContext) { pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType log.Functionf("appNumOnUNetMapGC") freedCount := 0 items := pub.GetAll() for _, item := range items { appNumMap := item.(types.UUIDPairAndIfIdxToNum) if appNumMap.NumType != numType { continue } if appNumMap.InUse { continue } if appNumMap.CreateTime.After(ctx.agentStartTime) { continue } log.Functionf("appNumMapOnUNetGC: freeing %+v", appNumMap) appNumOnUNetFree(ctx, appNumMap.BaseID, appNumMap.AppID, appNumMap.IfIdx, false) freedCount++ } log.Functionf("appNumMapOnUNetGC freed %d", freedCount) } func appNumOnUNetAllocate(ctx *zedrouterContext, baseID uuid.UUID, appID uuid.UUID, isStatic bool, ifIdx uint32, isZedmanager bool) (int, error) { pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType baseMap := appNumOnUNetBaseCreate(baseID) // Do we already have a number? appNum, err := uuidpairtonum.NumGet(log, pub, baseID, appID, numType, ifIdx) if err == nil { log.Functionf("Found allocated appNum %d for %s", appNum, types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) if !baseMap.IsSet(appNum) { log.Fatalf("Bitmap value(%d) is not set", appNum) } // Set InUse and update time uuidpairtonum.NumAllocate(log, pub, baseID, appID, appNum, false, numType, ifIdx) return appNum, nil } // Find a free number in bitmap; look for zero if isZedmanager if isZedmanager && !baseMap.IsSet(0) { appNum = 0 log.Functionf("Allocating appNum %d for %s isZedmanager", appNum, types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) } else { // XXX could look for non-0xFF bytes first for efficiency appNum = -1 // for static we pick the topmost numbers so avoid consuming // dynamic IP address from a smallish DHCP range. if isStatic { for i := types.BitMapMax; i >= 0; i-- { if !baseMap.IsSet(i) { appNum = i log.Functionf("Allocating appNum %d for %s", appNum, types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) break } } } else { for i := 0; i <= types.BitMapMax; i++ { if !baseMap.IsSet(i) { appNum = i log.Functionf("Allocating appNum %d for %s", appNum, types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) break } } } if appNum == -1 { log.Functionf("Failed to find free appNum for %s. Reusing!", types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) oldAppID, oldAppNum, err := uuidpairtonum.NumGetOldestUnused(log, pub, baseID, numType) if err != nil { return appNum, fmt.Errorf("no free appNum") } log.Functionf("Reuse found appNum %d for %s. Reusing!", oldAppNum, types.UUIDPairAndIfIdxToNumKey(baseID, oldAppID, ifIdx)) uuidpairtonum.NumDelete(log, pub, baseID, oldAppID, ifIdx) baseMap.Clear(oldAppNum) appNum = oldAppNum } } if baseMap.IsSet(appNum) { log.Fatalf("Bitmap is already set for %d", appNum) } baseMap.Set(appNum) uuidpairtonum.NumAllocate(log, pub, baseID, appID, appNum, true, numType, ifIdx) return appNum, nil } func appNumOnUNetFree(ctx *zedrouterContext, baseID uuid.UUID, appID uuid.UUID, ifIdx uint32, fatal bool) { pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType appNum, err := uuidpairtonum.NumGet(log, pub, baseID, appID, numType, ifIdx) if err != nil { if fatal { log.Fatalf("num not found for %s", types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) } else { log.Warnf("num not found for %s", types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) } return } baseMap := appNumOnUNetBaseGet(baseID) if baseMap == nil { uuidpairtonum.NumDelete(log, pub, baseID, appID, ifIdx) return } // Check that number exists in the allocated numbers if !baseMap.IsSet(appNum) { if fatal { log.Fatalf("Bitmap is not set for %d", appNum) } else { log.Warnf("Bitmap is not set for %d", appNum) } } else { baseMap.Clear(appNum) } uuidpairtonum.NumDelete(log, pub, baseID, appID, ifIdx) } func appNumOnUNetClean(ctx *zedrouterContext, baseID uuid.UUID, appID uuid.UUID) { pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType pairs, err := uuidpairtonum.NumGetAll(log, pub, baseID, appID, numType) if err != nil { log.Fatalf("error getting NumGetAll for %s %s: %s", baseID, appID, err) } for _, el := range pairs { ifIdx := el.IfIdx appNum, err := uuidpairtonum.NumGet(log, pub, baseID, appID, numType, ifIdx) if err != nil { log.Fatalf("num not found for %s", types.UUIDPairAndIfIdxToNumKey(baseID, appID, ifIdx)) } baseMap := appNumOnUNetBaseGet(baseID) if baseMap == nil { uuidpairtonum.NumDelete(log, pub, baseID, appID, ifIdx) return } // Check that number exists in the allocated numbers if !baseMap.IsSet(appNum) { log.Fatalf("Bitmap is not set for %d", appNum) } baseMap.Clear(appNum) uuidpairtonum.NumDelete(log, pub, baseID, appID, ifIdx) } } func appNumOnUNetGet(ctx *zedrouterContext, baseID uuid.UUID, appID uuid.UUID, ifIdx uint32) (int, error) { pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType return uuidpairtonum.NumGet(log, pub, baseID, appID, numType, ifIdx) } // returns base bitMap for a given UUID func appNumOnUNetBaseGet(baseID uuid.UUID) *types.Bitmap { if baseMap, exist := appNumBase[baseID.String()]; exist { return baseMap } return nil } // Create application number Base for a given UUID func appNumOnUNetBaseCreate(baseID uuid.UUID) *types.Bitmap { if appNumOnUNetBaseGet(baseID) == nil { log.Functionf("appNumOnUNetBaseCreate (%s)", baseID.String()) appNumBase[baseID.String()] = new(types.Bitmap) } return appNumOnUNetBaseGet(baseID) } // Delete the application number Base for a given UUID func appNumOnUNetBaseDelete(ctx *zedrouterContext, baseID uuid.UUID) { appNumMap := appNumOnUNetBaseGet(baseID) if appNumMap == nil { log.Fatalf("appNumOnUNetBaseDelete: non-existent") } // check whether there are still some apps on // this network pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType items := pub.GetAll() for _, item := range items { appNumMap := item.(types.UUIDPairAndIfIdxToNum) if appNumMap.NumType != numType { continue } if appNumMap.BaseID == baseID { log.Fatalf("appNumOnUNetBaseDelete(%s): remaining: %v", baseID, appNumMap) } } log.Functionf("appNumOnUNetBaseDelete (%s)", baseID.String()) delete(appNumBase, baseID.String()) } // appNumOnUNetRefCount returns the number of (remaining) references to the network func appNumOnUNetRefCount(ctx *zedrouterContext, networkID uuid.UUID) int { appNumMap := appNumOnUNetBaseGet(networkID) if appNumMap == nil { log.Fatalf("appNumOnUNetRefCount: non map") } pub := ctx.pubUUIDPairAndIfIdxToNum numType := appNumOnUNetType items := pub.GetAll() count := 0 for _, item := range items { appNumMap := item.(types.UUIDPairAndIfIdxToNum) if appNumMap.NumType != numType { continue } if appNumMap.BaseID == networkID { log.Functionf("appNumOnUNetRefCount(%s): found: %v", networkID, appNumMap) count++ } } log.Functionf("appNumOnUNetRefCount(%s) found %d", networkID, count) return count }
package main import ( "io/ioutil" "log" "github.com/gin-gonic/gin" "gopkg.in/yaml.v2" ) func check(e error) { if e != nil { panic(e) } } type Config struct { Log struct { Level string `yaml:"level" json:"level"` } `yaml:"log" json:"log"` Mongo struct { Endpoint string `yaml:"endpoint" json:"endpoint"` } `yaml:"mongodb" json:"mongo"` Redis struct { Endpoint string `yaml:"endpoint" json:"endpoint"` } `yaml:"redis" json:"redis"` } func main() { dat, err := ioutil.ReadFile("/config/default.yml") check(err) var data Config yaml.Unmarshal(dat, &data) r := gin.Default() r.GET("/ping", func(c *gin.Context) { c.JSON(200, gin.H{ "config": data, }) }) log.Fatal(r.Run()) }
/* Copyright (C) 2016 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Use to parse oc cluster up -h options. package util import ( "regexp" "strings" ) func ParseOcHelpCommand(cmdOut []byte) []string { ocOptions := []string{} ocOptionRegex := regexp.MustCompile(`(?s)Options(.*)OpenShift images`) matches := ocOptionRegex.FindSubmatch(cmdOut) if matches != nil { tmpOptionsList := string(matches[0]) for _, value := range strings.Split(tmpOptionsList, "\n")[1:] { tmpOption := strings.Split(strings.Split(strings.TrimSpace(value), "=")[0], "--") if len(tmpOption) > 1 { ocOptions = append(ocOptions, tmpOption[1]) } } } else { return nil } return ocOptions } func FlagExist(ocCommandOptions []string, flag string) bool { for _, v := range ocCommandOptions { if v == flag { return true } } return false }
package main import ( "fmt" "github.com/jlb0906/micro-movie/aria2-srv/handler" aria2 "github.com/jlb0906/micro-movie/aria2-srv/proto/aria2" "github.com/jlb0906/micro-movie/aria2-srv/service" "github.com/jlb0906/micro-movie/basic" "github.com/jlb0906/micro-movie/basic/common" "github.com/jlb0906/micro-movie/basic/config" "github.com/micro/cli/v2" "github.com/micro/go-micro/v2" "github.com/micro/go-micro/v2/logger" "github.com/micro/go-micro/v2/registry" "github.com/micro/go-micro/v2/registry/etcd" "github.com/micro/go-plugins/config/source/grpc/v2" microzap "github.com/micro/go-plugins/logger/zap/v2" ) var ( appName = "aria2_srv" cfg = &ariaCfg{} ) type ariaCfg struct { common.AppCfg } func main() { logger.DefaultLogger, _ = microzap.NewLogger() // 初始化配置、数据库等信息 initCfg() // 使用etcd注册 micReg := etcd.NewRegistry(registryOptions) // New Service srv := micro.NewService( micro.Name(cfg.Name), micro.Version(cfg.Version), micro.Registry(micReg), ) // Initialise service srv.Init( micro.Action(func(context *cli.Context) error { service.Init() handler.Init() return nil }), ) // Register Handler aria2.RegisterAria2Handler(srv.Server(), new(handler.Aria2)) // Run service if err := srv.Run(); err != nil { logger.Fatal(err.Error()) } } func registryOptions(ops *registry.Options) { etcdCfg := &common.Etcd{} err := config.C().App("etcd", etcdCfg) if err != nil { panic(err) } ops.Addrs = []string{fmt.Sprintf("%s:%d", etcdCfg.Host, etcdCfg.Port)} } func initCfg() { source := grpc.NewSource( grpc.WithAddress("127.0.0.1:8600"), grpc.WithPath("/micro"), ) basic.Init( config.WithSource(source), config.WithApp(appName)) err := config.C().App(appName, cfg) if err != nil { panic(err) } logger.Infof("[initCfg] 配置 %+v", cfg) }
package webrpc import ( "log" "net/http" "github.com/gorilla/websocket" ) // Server implements an RPC server. type Server struct { chans map[string]*channel onConnect func(c *Conn) upgrader websocket.Upgrader } // Config specifies a configuration for the server. type Config struct { ReadBufferSize, WriteBufferSize int EnableCompression bool } // NewServer creates a new server instance. func NewServer() *Server { return NewServerWithConfig(Config{ ReadBufferSize: 4096, WriteBufferSize: 4096, EnableCompression: true, }) } // NewServerWithConfig creates a new server with config. func NewServerWithConfig(c Config) *Server { return &Server{ chans: map[string]*channel{}, upgrader: websocket.Upgrader{ ReadBufferSize: c.ReadBufferSize, WriteBufferSize: c.WriteBufferSize, EnableCompression: c.EnableCompression, CheckOrigin: func(r *http.Request) bool { return true }, }, } } func (s *Server) getChannel(name string) *channel { ch, ok := s.chans[name] if !ok { ch = newChannel(name) s.chans[name] = ch } return ch } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "Method not allowed", 405) return } ws, err := s.upgrader.Upgrade(w, r, nil) if err != nil { log.Println(err) return } c := newConn(s, ws) go c.writeLoop() if s.onConnect != nil { s.onConnect(c) } c.readLoop() } // OnConnect sets the connection handler for this server. func (s *Server) OnConnect(handler func(c *Conn)) { s.onConnect = handler } // Broadcast sends a message on a given channel. func (s *Server) Broadcast(chname, name string, args ...interface{}) error { msg, err := NewEvent(name, args...) if err != nil { return err } s.getChannel(chname).broadcast(msg, nil) return nil }
// +build !partners package service_test import ( "fmt" "github.com/APTrust/exchange/service" "github.com/APTrust/exchange/util/logger" "github.com/APTrust/exchange/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "io/ioutil" "net/http" "net/url" "testing" "time" ) var port = 8818 var serviceUrl = fmt.Sprintf("http://127.0.0.1:%d", port) var volumeService *service.VolumeService func runService(t *testing.T) { if volumeService == nil { log := logger.DiscardLogger("test_volume_service") volumeService = service.NewVolumeService(port, log) require.NotNil(t, volumeService) go volumeService.Serve() time.Sleep(800 * time.Millisecond) } } func TestNewVolumeService(t *testing.T) { if testutil.RunningInCI() { t.Skip("Skipping volume service test because it looks like we're in the CI environment.") } runService(t) } func TestReserve(t *testing.T) { if testutil.RunningInCI() { t.Skip("Skipping volume service test because it looks like we're in the CI environment.") } runService(t) reserveUrl := fmt.Sprintf("%s/reserve/", serviceUrl) // Start with a good request params := url.Values{ "path": {"/tmp/some_file"}, "bytes": {"8000"}, } resp, err := http.PostForm(reserveUrl, params) require.Nil(t, err) data, err := ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected := `{"Succeeded":true,"ErrorMessage":"","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusOK, resp.StatusCode) // Bad request: no path params = url.Values{ "bytes": {"8000"}, } resp, err = http.PostForm(reserveUrl, params) require.Nil(t, err) data, err = ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected = `{"Succeeded":false,"ErrorMessage":"Param 'path' is required.","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusBadRequest, resp.StatusCode) // Bad request: no value for bytes params = url.Values{ "path": {"/tmp/some_file"}, } resp, err = http.PostForm(reserveUrl, params) require.Nil(t, err) data, err = ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected = `{"Succeeded":false,"ErrorMessage":"Param 'bytes' must be an integer greater than zero.","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusBadRequest, resp.StatusCode) } func TestRelease(t *testing.T) { if testutil.RunningInCI() { t.Skip("Skipping volume service test because it looks like we're in the CI environment.") } runService(t) reserveUrl := fmt.Sprintf("%s/release/", serviceUrl) // Good request params := url.Values{ "path": {"/tmp/some_file"}, } resp, err := http.PostForm(reserveUrl, params) require.Nil(t, err) data, err := ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected := `{"Succeeded":true,"ErrorMessage":"","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusOK, resp.StatusCode) // Bad request - no path params = url.Values{ "useless_param": {"/tmp/some_file"}, } resp, err = http.PostForm(reserveUrl, params) require.Nil(t, err) data, err = ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected = `{"Succeeded":false,"ErrorMessage":"Param 'path' is required.","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusBadRequest, resp.StatusCode) } func TestReport(t *testing.T) { if testutil.RunningInCI() { t.Skip("Skipping volume service test because it looks like we're in the CI environment.") } runService(t) // Reserve a chunk of space with 8000 bytes reserveUrl := fmt.Sprintf("%s/reserve/", serviceUrl) params := url.Values{ "path": {"/tmp/some_file"}, "bytes": {"8000"}, } resp, err := http.PostForm(reserveUrl, params) require.Nil(t, err) data, err := ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() assert.Equal(t, http.StatusOK, resp.StatusCode) // Reserve another chunk with 24000 bytes params = url.Values{ "path": {"/tmp/some_other_file"}, "bytes": {"24000"}, } resp, err = http.PostForm(reserveUrl, params) require.Nil(t, err) data, err = ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() assert.Equal(t, http.StatusOK, resp.StatusCode) reportUrl := fmt.Sprintf("%s/report/", serviceUrl) resp, err = http.Get(reportUrl) require.Nil(t, err) data, err = ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected := `{"Succeeded":false,"ErrorMessage":"Param 'path' is required.","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusBadRequest, resp.StatusCode) reportUrl = fmt.Sprintf("%s/report/?path=/", serviceUrl) resp, err = http.Get(reportUrl) require.Nil(t, err) data, err = ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected = `{"Succeeded":true,"ErrorMessage":"","Data":{"/tmp/some_file":8000,"/tmp/some_other_file":24000}}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusOK, resp.StatusCode) } func TestPing(t *testing.T) { if testutil.RunningInCI() { t.Skip("Skipping volume service test because it looks like we're in the CI environment.") } runService(t) pingUrl := fmt.Sprintf("%s/ping/", serviceUrl) resp, err := http.Get(pingUrl) require.Nil(t, err) data, err := ioutil.ReadAll(resp.Body) assert.Nil(t, err) resp.Body.Close() expected := `{"Succeeded":true,"ErrorMessage":"","Data":null}` assert.Equal(t, expected, string(data)) assert.Equal(t, http.StatusOK, resp.StatusCode) }
package alipay import ( "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" "log" ) type AlipayConfig struct { //↓↓↓↓↓↓↓↓↓↓请在这里配置您的基本信息↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ //合作身份者id,以2088开头的16位纯数字 Partner string //安全检验码,以数字和字母组成的32位字符 Key string //商户的私钥(后缀是.pen)文件相对路径 //如果签名方式设置为“0001”时,请设置该参数 Private_key_path []byte //支付宝公钥(后缀是.pen)文件相对路径 //如果签名方式设置为“0001”时,请设置该参数 Ali_public_key_path []byte //↑↑↑↑↑↑↑↑↑↑请在这里配置您的基本信息↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ //签名方式 不需修改 Sign_type string //字符编码格式 目前支持 gbk 或 utf-8 Input_charset string //ca证书路径地址,用于curl中ssl校验 //请保证cacert.pem文件在当前文件夹目录中 Cacert string //访问模式,根据自己的服务器是否支持ssl访问,若支持请选择https;若不支持请选择http Transport string //商户私钥 Private_key *rsa.PrivateKey //支付宝公钥 (验签) Public_key *rsa.PublicKey //服务类型(移动支付) Service string //卖家支付宝账号 Seller_id string //异步通知URL Notify_url string // Return_url string //支付类型 1 Payment_type string //显示订单消息页面 Show_order_url string //操作中断返回地址 Wap_merchant_url string //页面跳转同步通知页面路径 Wap_callback_url string //Wap_Service Wap_Service string } var AWebConfig = &AlipayConfig{ Partner: "xxxxxxxxxxx", Key: "xxxxxxxxxxx", Sign_type: "MD5", Input_charset: "utf-8", Cacert: "Cacert", Transport: "http", Service: "create_direct_pay_by_user", Seller_id: "xxxxxxxxxxx@gmail.com", Notify_url: "/api/pub/alipayWeb/notify", Return_url: "/api/pub/alipayWeb/return", Payment_type: "1", Show_order_url: "/paymentStatus.html", } var AMobileConfig = &AlipayConfig{ Partner: "xxxxxxxxxxx", Key: "xxxxxxxxxxx", Sign_type: "RSA", Private_key_path: []byte(` -----BEGIN RSA PRIVATE KEY----- xxxxxxxxxxx -----END RSA PRIVATE KEY----- `), Ali_public_key_path: []byte(` -----BEGIN PUBLIC KEY----- xxxxxxxxxxx -----END PUBLIC KEY----- `), Input_charset: "UTF-8", Cacert: "Cacert", Transport: "http", Service: "mobile.securitypay.pay", Seller_id: "xxxxxxxxxxx@gmail.com", Notify_url: "/alipayMobile/notify", Payment_type: "1", } var AWapConfig = &AlipayConfig{ Partner: "xxxxxxxxxxx", Key: "xxxxxxxxxxx", Sign_type: "MD5", Private_key_path: []byte(` -----BEGIN RSA PRIVATE KEY----- xxxxxxxxxxx -----END RSA PRIVATE KEY----- `), Ali_public_key_path: []byte(` -----BEGIN PUBLIC KEY----- xxxxxxxxxxx -----END PUBLIC KEY----- `), Input_charset: "utf-8", // Cacert: "Cacert", Transport: "http", Service: "alipay.wap.auth.authAndExecute", Wap_Service: "alipay.wap.trade.create.direct", Seller_id: "xxxxxxxxxxx@gmail.com", Notify_url: "/api/pub/alipayWap/notify", // Payment_type: "1", Wap_merchant_url: "/api/pub/alipayWap/merchant", //页面跳转同步通知页面路径 Wap_callback_url: "/api/pub/alipayWap/callback", Show_order_url: "/paymentStatus.html", } func InitKeys(alipayConfig *AlipayConfig) error { log.Println("init rsakeys begin") block, _ := pem.Decode(alipayConfig.Private_key_path) if block == nil { log.Println("rsaSign private_key error") return fmt.Errorf("rsaSign pem.Decode error") } var err error alipayConfig.Private_key, err = x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { log.Println("rsaSign ParsePKIXPublicKey error : %v\n", err) return err } block, _ = pem.Decode(alipayConfig.Ali_public_key_path) if block == nil { log.Println("public key error") return err } pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { log.Println("rsaSign ParsePKIXPublicKey error : %v\n", err) return err } alipayConfig.Public_key = pubInterface.(*rsa.PublicKey) log.Println("init rsakeys success ") return err }
package main import ( "encoding/json" "fmt" "io/ioutil" "net/http" "strings" ) func region(ip ...string) string { method := "POST" ipStr, _ := json.Marshal(ip) //payload := strings.NewReader("[\"10.132.123.220\",\"10.134.41.91\"]") payload := strings.NewReader(string(ipStr)) client := &http.Client{} req, err := http.NewRequest(method, "http://localhost:8081/iplocations", payload) if err != nil { fmt.Println(err) } req.Header.Add("Content-Type", "application/json") res, err := client.Do(req) if err != nil { fmt.Println("Query ip location failed:", err) } defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) if err != nil { fmt.Println("Query ip read failed:", err) } fmt.Println(string(body)) return "" }
package hooks import ( "fmt" "os" "github.com/cloudfoundry/libbuildpack" ) type hooks1 struct { libbuildpack.DefaultHook } type hooks2 struct { libbuildpack.DefaultHook } func init() { if os.Getenv("BP_DEBUG") != "" { libbuildpack.AddHook(hooks1{}) libbuildpack.AddHook(hooks2{}) } } func (h hooks1) BeforeCompile(compiler *libbuildpack.Stager) error { fmt.Println("HOOKS 1: BeforeCompile") return nil } func (h hooks2) AfterCompile(compiler *libbuildpack.Stager) error { fmt.Println("HOOKS 2: AfterCompile") return nil }
package devops import "testing" func TestBumpVersion(t *testing.T) { err := BumpVersion("dev") if err != nil { t.Errorf("E! %v", err) } err = BumpVersion("staging") if err != nil { t.Errorf("E! %v", err) } }
// Constantes package main import "fmt" import "math" const s string = "gopher" func main() { fmt.Println("s = ", s) const n = 500 const d = 3e20 / n fmt.Println("d = ", d) fmt.Println("int64(d) = ", int64(d)) fmt.Println("math.Sin(n) = ", math.Sin(n)) }
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright 2019-present PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rocksdb import ( "encoding/binary" "hash" "hash/crc32" ) var ( rocksEndian = binary.LittleEndian rocksCrcTable = crc32.MakeTable(crc32.Castagnoli) ) const cacheLineSize = 64 func differenceOffset(lhs, rhs []byte) int { l := len(lhs) if len(rhs) < len(lhs) { l = len(rhs) } off := 0 for ; off < l; off++ { if lhs[off] != rhs[off] { break } } return off } func encodeVarint32(buf []byte, v uint32) []byte { _ = buf[4] const B = 128 if v < (1 << 7) { buf[0] = byte(v) return buf[:1] } else if v < (1 << 14) { buf[0] = byte(v | B) buf[1] = byte(v >> 7) return buf[:2] } else if v < (1 << 21) { buf[0] = byte(v | B) buf[1] = byte((v >> 7) | B) buf[2] = byte(v >> 14) return buf[:3] } else if v < (1 << 28) { buf[0] = byte(v | B) buf[1] = byte((v >> 7) | B) buf[2] = byte((v >> 14) | B) buf[3] = byte(v >> 21) return buf[:4] } else { buf[0] = byte(v | B) buf[1] = byte((v >> 7) | B) buf[2] = byte((v >> 14) | B) buf[3] = byte((v >> 21) | B) buf[4] = byte(v >> 28) return buf[:5] } } func decodeVarint32(buf []byte) (uint32, int) { result := buf[0] if (result & 128) == 0 { return uint32(result), 1 } return decodeVarint32Slow(buf) } func decodeVarint32Slow(buf []byte) (uint32, int) { var result, shift uint32 var i int for shift <= 28 && i < len(buf) { b := buf[i] i++ if b&128 != 0 { result |= uint32(b&127) << shift } else { result |= uint32(b) << shift return result, i } shift += 7 } return 0, 0 } func encodeVarint64(buf []byte, v uint64) []byte { n := binary.PutUvarint(buf, v) return buf[:n] } func decodeVarint64(buf []byte) (uint64, int) { return binary.Uvarint(buf) } func appendVarint32(buf []byte, v uint32) []byte { var e [5]byte result := encodeVarint32(e[:], v) return append(buf, result...) } const crc32MaskDelta = 0xa282ead8 func maskCrc32(sum uint32) uint32 { return ((sum >> 15) | (sum << 17)) + crc32MaskDelta } func unmaskCrc32(sum uint32) uint32 { rot := sum - crc32MaskDelta return (rot >> 17) | (rot << 15) } func newCrc32() hash.Hash32 { return crc32.New(rocksCrcTable) } func extractUserKey(key []byte) []byte { return key[:len(key)-8] } func rocksHash(data []byte, seed uint32) uint32 { const m = 0xc6a4a793 const r = 24 h := seed ^ uint32(len(data)*m) pos := 0 for ; pos+4 < len(data); pos += 4 { w := rocksEndian.Uint32(data[pos : pos+4]) h += w h *= m h ^= h >> 16 } // Pick up remaining bytes remain := len(data) - pos if remain == 3 { h += uint32(int8(data[2])) << 16 } if remain >= 2 { h += uint32(int8(data[1])) << 8 } if remain >= 1 { h += uint32(int8(data[0])) h *= m h ^= h >> r } return h }
package media import ( "time" uuid "github.com/satori/go.uuid" ) type FileUpload struct { ID uuid.UUID `gorm:"type:char(36); primary_key"` Name string `gorm:"type:text; not null"` Size int64 `gorm:"type:int; not null"` Link string `gorm:"text; not null"` CreatedAt time.Time }
package systemed // 查看启动时的默认 Target func GetDefault(name string) ([]byte, error) { sys := NewSystemed("systemctl") sys.SetArgs("get-default").SetArgs(name) rs, err := sys.Exec() return rs, err } // 设置启动时的默认 Target func SetDefault(name string) ([]byte, error) { sys := NewSystemed("systemctl") sys.SetArgs("set-default").SetArgs(name) rs, err := sys.Exec() return rs, err } // # 切换 Target 时,默认不关闭前一个 Target 启动的进程, // # systemctl isolate 命令改变这种行为, // # 关闭前一个 Target 里面所有不属于后一个 Target 的进程 func Isolate(name string) ([]byte, error) { sys := NewSystemed("systemctl") sys.SetArgs("isolate").SetArgs(name) rs, err := sys.Exec() return rs, err }
package goWeb3 import ( "encoding/json" "errors" "fmt" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/radar-bear/goWeb3/helper" "github.com/sirupsen/logrus" "math/big" "os" "strings" ) // ============= Web3 ============= type Web3 struct { Rpc *helper.EthRPC Accounts []string privateKeyMap map[string]string // address -> privateKey } func NewWeb3(ethereumNodeUrl string) *Web3 { GetChainId() rpc := helper.NewEthRPC(ethereumNodeUrl) return &Web3{rpc, []string{}, map[string]string{}} } // ============= Account ============= func (w *Web3) AddAccount(privateKey string) (accountAddress string, err error) { pk, err := helper.NewPrivateKeyByHex(privateKey) if err != nil { return } accountAddress = strings.ToLower(helper.PubKey2Address(pk.PublicKey)) w.Accounts = append(w.Accounts, accountAddress) w.privateKeyMap[accountAddress] = strings.ToLower(privateKey) return } func (w *Web3) BalanceOf(address string) (balance big.Int, err error) { return w.Rpc.EthGetBalance(address, "latest") } func (w *Web3) NonceOf(address string) (nonce int, err error) { return w.Rpc.EthGetTransactionCount(address, "latest") } // todo: new block channel // ============= Contract ============= type Contract struct { web3 *Web3 abi *abi.ABI address *common.Address } func (w *Web3) NewContract(abiStr string, address string) (contract *Contract, err error) { abi, err := abi.JSON(strings.NewReader(abiStr)) if err != nil { return } commonAddress := common.HexToAddress(address) contract = &Contract{ w, &abi, &commonAddress, } return } // ============= Interactions ============= type SendTxParams struct { FromAddress string GasLimit *big.Int GasPrice *big.Int Nonce uint64 } func (c *Contract) Call(functionName string, args ...interface{}) (resp string, err error) { return c.HistoryCall("latest", functionName, args...) } func (c *Contract) HistoryCall(blockNum string, functionName string, args ...interface{}) (resp string, err error) { var dataByte []byte if args != nil { dataByte, err = c.abi.Pack(functionName, args...) } else { dataByte = c.abi.Methods[functionName].ID() } if err != nil { return } return c.web3.Rpc.EthCall(helper.T{ To: c.address.String(), From: "0x0000000000000000000000000000000000000000", Data: fmt.Sprintf("0x%x", dataByte)}, blockNum, ) } func (c *Contract) Send(params *SendTxParams, value *big.Int, functionName string, args ...interface{}) (resp string, err error) { if _, ok := c.web3.privateKeyMap[strings.ToLower(params.FromAddress)]; !ok { err = errors.New("ACCOUNT_NOT_VALID") return } data, err := c.abi.Pack(functionName, args...) if err != nil { return } tx := types.NewTransaction( params.Nonce, *c.address, value, params.GasLimit.Uint64(), params.GasPrice, data, ) rawData, _ := helper.SignTx(c.web3.privateKeyMap[strings.ToLower(params.FromAddress)], GetChainId(), tx) return c.web3.Rpc.EthSendRawTransaction(rawData) } func (w *Web3) TransferEth(params *SendTxParams, to string, value *big.Int) (resp string, err error) { tx := types.NewTransaction( params.Nonce, common.HexToAddress(to), value, params.GasLimit.Uint64(), params.GasPrice, []byte{}, ) rawData, _ := helper.SignTx(w.privateKeyMap[strings.ToLower(params.FromAddress)], GetChainId(), tx) return w.Rpc.EthSendRawTransaction(rawData) } func (w *Web3) GetRecipt(txHash string) (receipt *helper.TransactionReceipt, err error) { return w.Rpc.EthGetTransactionReceipt(txHash) } // ============= Other Functions ============= func GetGasPriceGwei() (gasPriceInGwei int64, err error) { resp, err := helper.Get("https://ethgasstation.info/json/ethgasAPI.json", "", helper.EmptyKeyPairList, helper.EmptyKeyPairList) if err != nil { return } var dataContainer struct { Fast float64 `json:"fast"` Fastest float64 `json:"fastest"` SafeLow float64 `json:"safeLow"` Average float64 `json:"average"` } json.Unmarshal([]byte(resp), &dataContainer) gasPriceInGwei = int64(dataContainer.Fast / 10) return } func GetChainId() (chainId string) { network := os.Getenv("NETWORK") switch network { case "mainnet": return "1" case "kovan": return "42" default: logrus.Fatalf("%s network not support", network) } return "0" } func HexToAddress(hexString string) common.Address { return common.HexToAddress(hexString) }
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package router import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "tailscale.com/types/logger" ) // NewFakeRouter returns a Router that does nothing when called and // always returns nil errors. func NewFake(logf logger.Logf, _ *device.Device, _ tun.Device) (Router, error) { return fakeRouter{logf: logf}, nil } type fakeRouter struct { logf logger.Logf } func (r fakeRouter) Up() error { r.logf("Warning: fakeRouter.Up: not implemented.") return nil } func (r fakeRouter) Set(cfg *Config) error { r.logf("Warning: fakeRouter.Set: not implemented.") return nil } func (r fakeRouter) Close() error { r.logf("Warning: fakeRouter.Close: not implemented.") return nil }
/* The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... Let us list the factors of the first seven triangle numbers: 1: 1 3: 1,3 6: 1,2,3,6 10: 1,2,5,10 15: 1,3,5,15 21: 1,3,7,21 28: 1,2,4,7,14,28 We can see that 28 is the first triangle number to have over five divisors. What is the value of the first triangle number to have over five hundred divisors? */ package main import ( "fmt" "utils" ) var primes []uint64 func init() { primes = utils.PrimesUpTo(15000) } func main() { // Start at the 500th triangle number. var triangle, n uint64 = 125250, 500 for { factors := factor(triangle) divisors := countDivisors(factors) if divisors > 500 { fmt.Printf("%d with factors %v has %d divisors", triangle, factors, divisors) break } else { n++ triangle += n } } } func factor(n uint64) []uint64 { if n == 1 { return []uint64{} } for i := 0; primes[i] <= n; i++ { p := primes[i] if n%p == 0 { return append(factor(n/p), p) } } return nil } func countDivisors(factors []uint64) int { exp := make(map[uint64]int) for _, prime := range factors { e, ok := exp[prime] if !ok { e = 0 } exp[prime] = e + 1 } divisors := 1 for _, e := range exp { divisors *= e + 1 } return divisors }
package restAPI import ( "errors" "net/url" "reflect" ) func fetchStringParameter(f *url.Values, key string, required bool) (*string, error) { str := (*f)[key] if duplicated(str) { return nil, errors.New(key + " should be only one") } if invalidString(str) { if required { return nil, errors.New(key + " is required") } return nil, nil } return &str[0], nil } func invalidString(str []string) bool { return (str == nil || len(str) == 0 || str[0] == "") } func duplicated(param interface{}) bool { pV := reflect.ValueOf(param) return 1 < pV.Len() }
package config // WorkerConfig holds all the different implementations for a persistence store service type WorkerConfig struct { Converter WorkerConverterConfig }
package goqueue import ( "context" "errors" "fmt" ) var ( ErrorQueueFull = errors.New("queue was full") ErrorQueueEmpty = errors.New("queue was empty") ) type Config struct { PushBlocking bool PopBlocking bool MaxBuffer int64 } type InMemoryQueue struct { Config receiver chan Task } func NewInMemoryQueue(config Config) *InMemoryQueue { return &InMemoryQueue{ Config: config, receiver: make(chan Task, config.MaxBuffer), } } func (i *InMemoryQueue) Push(ctx context.Context, task Task) error { if i.PushBlocking { return i.BlockingPush(ctx, task) } else { // non-blocking return i.NonBlockingPush(ctx, task) } } func (i *InMemoryQueue) BlockingPush(ctx context.Context, task Task) error { select { case <-ctx.Done(): return ctx.Err() case i.receiver <- task: return nil } } func (i *InMemoryQueue) NonBlockingPush(ctx context.Context, task Task) error { select { case <-ctx.Done(): return ctx.Err() case i.receiver <- task: return nil default: return fmt.Errorf("push task failed, %w", ErrorQueueFull) } } func (i *InMemoryQueue) Pop(ctx context.Context) (Task, error) { if i.PopBlocking { return i.BlockingPop(ctx) } else { return i.NonBlockingPop(ctx) } } func (i *InMemoryQueue) BlockingPop(ctx context.Context) (Task, error) { select { case <-ctx.Done(): return nil, ctx.Err() case task := <-i.receiver: return task, nil } } func (i *InMemoryQueue) NonBlockingPop(ctx context.Context) (Task, error) { select { case <-ctx.Done(): return nil, ctx.Err() case task := <-i.receiver: return task, nil default: return nil, fmt.Errorf("pop task failed, %w", ErrorQueueEmpty) } }
// Copyright 2021 Praetorian Security, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package cmd implements a simple command line interface using cobra */ package cmd import ( "io/ioutil" "log" "os" "github.com/praetorian-inc/gokart/analyzers" "github.com/praetorian-inc/gokart/util" "github.com/spf13/cobra" ) var yml string var exitCode bool var remoteModule string var outputPath string var remoteBranch string var keyFile string func init() { goKartCmd.AddCommand(scanCmd) scanCmd.Flags().BoolP("sarif", "s", false, "outputs findings in SARIF form") scanCmd.Flags().BoolP("json", "j", false, "outputs findings in JSON") scanCmd.Flags().BoolP("globalsTainted", "g", false, "marks global variables as dangerous") scanCmd.Flags().BoolP("verbose", "v", false, "outputs full trace of taint analysis") scanCmd.Flags().BoolP("debug", "d", false, "outputs debug logs") scanCmd.Flags().BoolP("exitCode", "x", false, "return non-nil exit code on potential vulnerabilities or scanner failure") scanCmd.Flags().StringVarP(&remoteModule, "remoteModule", "r", "", "Remote gomodule to scan") scanCmd.Flags().StringVarP(&remoteBranch, "remoteBranch", "b", "", "Branch of remote module to scan") scanCmd.Flags().StringVarP(&keyFile, "keyFile", "k", "", "SSH Keyfile to use for ssh authentication for remote git repository scanning") scanCmd.Flags().StringVarP(&yml, "input", "i", "", "input path to custom yml file") scanCmd.Flags().StringVarP(&outputPath, "output", "o", "", "file path to write findings output instead of stdout") goKartCmd.MarkFlagRequired("scan") } var scanCmd = &cobra.Command{ Use: "scan [flags] [directory]", Short: "Scans a Go module directory", Long: ` Scans a Go module directory. To scan the current directory recursively, use gokart scan. To scan a specific directory, use gokart scan <directory>.`, Run: func(cmd *cobra.Command, args []string) { sarif, _ := cmd.Flags().GetBool("sarif") json, _ := cmd.Flags().GetBool("json") globals, _ := cmd.Flags().GetBool("globalsTainted") verbose, _ := cmd.Flags().GetBool("verbose") debug, _ := cmd.Flags().GetBool("debug") exitCode, _ := cmd.Flags().GetBool("exitCode") util.InitConfig(globals, sarif, json, verbose, debug, outputPath, yml, exitCode) // If remoteModule was set, clone the remote repository and scan it if len(remoteModule) != 0 { moduleTempDir, err := ioutil.TempDir(".", "gokart") if err != nil { log.Fatal("Error creating temporary directory: ", err.Error()) } defer util.CleanupModule(moduleTempDir) // Clone the module, if the output format is JSON or SARIF don't print any progress to stdout err = util.CloneModule(moduleTempDir, remoteModule, remoteBranch, keyFile, json || sarif) if err != nil { util.CleanupModule(moduleTempDir) log.Fatal("Error cloning remote repository: ", err.Error()) } // If passing in a module - the other arguments are wiped out! args = append([]string{}, moduleTempDir+"/...") } // recursively scan the current directory if no arguments are passed in if len(args) == 0 { args = append(args, "./...") } results, err := analyzers.Scan(args) // If we have set the flag to return non-zero exit code for when results are found or the scanner fails, return 1 if exitCode && (err != nil || len(results) > 0) { os.Exit(1) } }, }
package util import ( "math/rand" "time" ) var seededGenerator *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) var stringCharset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" func GenerateGameCode() string { b := make([]byte, 6) for i := range b { b[i] = stringCharset[seededGenerator.Intn(len(stringCharset))] } return string(b) } func GeneratePlayerID() string { b := make([]byte, 64) for i := range b { b[i] = stringCharset[seededGenerator.Intn(len(stringCharset))] } return string(b) } func RollDie() int { return seededGenerator.Intn(6) + 1 }
package graphdb // Node represent graph's node type Node struct { Nodetype int16 ID []byte // must be 4byte Value []byte } // nodeKeyBytes return bytes array represent Node's key func nodeKeyBytes(node *Node) []byte { var retbyte [3]byte retbyte[0] = NODE retbyte[1] = byte((node.Nodetype >> 8) & 0xff) retbyte[2] = byte(node.Nodetype & 0xff) ret := append(retbyte[:], node.ID[:]...) return ret } // nodeKeyBytesType return bytes array represent Node's key only type prefix func nodeKeyBytesType(nodetype int16) []byte { var retbyte [3]byte retbyte[0] = NODE retbyte[1] = byte((nodetype >> 8) & 0xff) retbyte[2] = byte(nodetype & 0xff) return retbyte[:] } func bytesNode(k []byte, v []byte) *Node { node := &Node{} key := make([]byte, len(k)) copy(key, k) val := make([]byte, len(v)) copy(val, v) node.Nodetype = int16(key[1])<<8 + int16(key[2]) node.ID = key[3:7] node.Value = val return node }
package main import ( "fmt" "log" "net/http" "time" ) func hello(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("Authorization") body := fmt.Sprintf("Hello World! (Authorization: %s, Time: %s)\n", token, time.Now().Format("2006-01-02 15:04:05 MST")) w.WriteHeader(http.StatusOK) w.Write([]byte(body)) } func main() { http.HandleFunc("/", hello) log.Fatal(http.ListenAndServe(":8080", nil)) }
package worker import ( "github.com/OHopiak/fractal-load-balancer/core" "github.com/jinzhu/gorm" "github.com/labstack/echo/v4" "github.com/pkg/errors" "net" "time" ) type ( Master struct { core.Host `json:"host"` Connected bool `json:"connected"` } Worker struct { core.Host ID *uint Master *Master Server *echo.Echo db *gorm.DB } ) func New(master core.Host, dbConfig core.DatabaseConfig) Worker { w := Worker{ Host: core.Host{ IP: "localhost", Port: 0, }, Server: core.NewServer(), Master: &Master{ Host: master, }, ID: new(uint), } w.configureDatabase(dbConfig) w.routes() return w } func (w Worker) WithIP(IP string) Worker { w.IP = IP return w } func (w Worker) WithPort(port int) Worker { w.Port = port return w } func (w *Worker) Start() { errChan := core.StartServerAsync(w.Server, w.Host) err := w.PostStart() if err != nil { w.Server.Logger.Fatal(err) } w.Server.Logger.Fatal(<-errChan) } func (w *Worker) PostStart() error { counter := 20 for w.Server.Listener == nil && counter > 0 { time.Sleep(200 * time.Millisecond) counter-- } if w.Server.Listener == nil { return errors.New("server start failed") } w.Port = w.Server.Listener.Addr().(*net.TCPAddr).Port err := w.Register() if err != nil { return err } return nil } func (w *Worker) NewTask() core.Task { worker := core.Worker{} w.db.First(&worker, *w.ID) task := core.Task{ Worker: worker, } w.db.Create(&task) return task }
package models import ( "database/sql" "fmt" "github.com/hiraisin/go-postgre/config" ) func CheckLogin(username, password string) error { var obj User var pwd string con := config.CreateCon() sqlStatement := "SELECT id,username,password FROM users where username = ?" err := con.QueryRow(sqlStatement, username).Scan( &obj.Id, &obj.Username, &pwd, ) if err == sql.ErrNoRows { fmt.Println("Username Not Found") return err } if err != nil { fmt.Println("Query Error") return err } match := config.CheckPasswordHash(password, pwd) if !match { fmt.Println("Hash and Password doesn't match") fmt.Println(pwd, password) return err } return nil }
// Writing a basic HTTP server is easy using the // `net/http` package. package main import ( "encoding/json" "fmt" "net/http" cls "github.com/wgliang/tencentcloud-cls-sdk-go" ) // A fundamental concept in `net/http` servers is // *handlers*. A handler is an object implementing the // `http.Handler` interface. A common way to write // a handler is by using the `http.HandlerFunc` adapter // on functions with the appropriate signature. func index(w http.ResponseWriter, req *http.Request) { indexMaps := map[string]cls.Index{ "topic01": { TopicID: "topic01", Effective: false, Rule: cls.Rule{ FullText: cls.FullText{ CaseSensitive: false, }, KeyValue: cls.KeyValue{ CaseSensitive: false, Keys: []string{"key1", "key2"}, Types: []string{"type1", "type2"}, }, }, }, "topic03": { TopicID: "topic03", Effective: false, Rule: cls.Rule{ FullText: cls.FullText{ CaseSensitive: false, }, KeyValue: cls.KeyValue{ CaseSensitive: false, Keys: []string{"key1", "key2"}, Types: []string{"type1", "type2"}, }, }, }, } // Functions serving as handlers take a // `http.ResponseWriter` and a `http.Request` as // arguments. The response writer is used to fill in the // HTTP response. Here our simple response is just // "hello\n". switch req.Method { case "GET": topicID := req.FormValue("topic_id") if v, ok := indexMaps[topicID]; ok { b, err := json.Marshal(v) if err != nil { fmt.Println("error:", err) http.Error(w, "data is error", http.StatusBadRequest) } fmt.Fprintf(w, string(b)) return } else { http.Error(w, "Not find topic.", http.StatusNotFound) return } case "PUT": var index cls.Index err := json.NewDecoder(req.Body).Decode(&index) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } if _, ok := indexMaps[index.TopicID]; ok { fmt.Fprintf(w, "200") return } else { http.Error(w, "Not find topic.", http.StatusNotFound) return } default: http.Error(w, "Method is not supported.", http.StatusNotFound) return } } func structuredlog(w http.ResponseWriter, req *http.Request) { indexMaps := map[string]cls.Index{ "topic01": { TopicID: "topic01", Effective: false, Rule: cls.Rule{ FullText: cls.FullText{ CaseSensitive: false, }, KeyValue: cls.KeyValue{ CaseSensitive: false, Keys: []string{"key1", "key2"}, Types: []string{"type1", "type2"}, }, }, }, "topic03": { TopicID: "topic03", Effective: false, Rule: cls.Rule{ FullText: cls.FullText{ CaseSensitive: false, }, KeyValue: cls.KeyValue{ CaseSensitive: false, Keys: []string{"key1", "key2"}, Types: []string{"type1", "type2"}, }, }, }, } // Functions serving as handlers take a // `http.ResponseWriter` and a `http.Request` as // arguments. The response writer is used to fill in the // HTTP response. Here our simple response is just // "hello\n". switch req.Method { case "GET": topicID := req.FormValue("topic_id") if v, ok := indexMaps[topicID]; ok { b, err := json.Marshal(v) if err != nil { fmt.Println("error:", err) http.Error(w, "data is error", http.StatusBadRequest) } fmt.Fprintf(w, string(b)) return } else { http.Error(w, "Not find topic.", http.StatusNotFound) return } case "PUT": var index cls.Index err := json.NewDecoder(req.Body).Decode(&index) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } if _, ok := indexMaps[index.TopicID]; ok { fmt.Fprintf(w, "200") return } else { http.Error(w, "Not find topic.", http.StatusNotFound) return } case "POST": topicID := req.FormValue("topic_id") if v, ok := indexMaps[topicID]; ok { b, err := json.Marshal(v) if err != nil { fmt.Println("error:", err) http.Error(w, "data is error", http.StatusBadRequest) } fmt.Fprintf(w, string(b)) return } else { http.Error(w, "Not find topic.", http.StatusNotFound) return } default: http.Error(w, "Method is not supported.", http.StatusNotFound) return } } func main() { // We register our handlers on server routes using the // `http.HandleFunc` convenience function. It sets up // the *default router* in the `net/http` package and // takes a function as an argument. http.HandleFunc("/index", index) // http.HandleFunc("/log", log) http.HandleFunc("/structuredlog", structuredlog) // http.HandleFunc("/logset", logset) // http.HandleFunc("/topic", topic) // http.HandleFunc("/log", log) // Finally, we call the `ListenAndServe` with the port // and a handler. `nil` tells it to use the default // router we've just set up. http.ListenAndServe(":8080", nil) }
package tcp import ( "time" ) const ( DEFAULT_PACK_HEAD_LEN = 8 DEFAULT_MAX_BODY_LEN = 16384 - 8 DEFAULT_RECV_BUF_LEN = 1024 * 4 DEFAULT_SEND_BUF_LEN = 1024 * 4 DEFAULT_MAX_PACK_LEN = 1024 * 1024 DEFAULT_KEEP_ALIVE_TIME = time.Second * 60 DEFAULT_RECV_BLOCK_TIME = time.Second * 20 DEFAULT_SEND_BLOCK_TIME = time.Second * 5 DEFAULT_SEND_Q_SIZE = 256 ) const ( SOCKET_VER byte = 0x89 PLAZA_VER = 131073 )
/* Copyright IBM Corporation 2020 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package customizer import ( "fmt" "path/filepath" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" common "github.com/konveyor/move2kube/internal/common" "github.com/konveyor/move2kube/internal/qaengine" irtypes "github.com/konveyor/move2kube/internal/types" qatypes "github.com/konveyor/move2kube/types/qaengine" ) //storageCustomizer customizes storage type storageCustomizer struct { ir *irtypes.IR } const ( alloption string = "Apply for all" ) //customize customizes the storage func (ic *storageCustomizer) customize(ir *irtypes.IR) error { ic.ir = ir ic.convertHostPathToPVC() if len(ir.Storages) == 0 { log.Debugf("Empty storage list. Nothing to customize.") return nil } if ir.TargetClusterSpec.StorageClasses == nil || len(ir.TargetClusterSpec.StorageClasses) == 0 { s := "No storage classes available in the cluster" log.Warnf(s) return fmt.Errorf(s) } claimSvcMap := ic.getPVCs() if len(claimSvcMap) == 0 { log.Debugf("No service with volumes detected. Storage class configuration not required.") return nil } selectedKeys := []string{} for k := range claimSvcMap { selectedKeys = append(selectedKeys, k) } if len(selectedKeys) > 1 { if !ic.shouldConfigureSeparately(selectedKeys) { storageClass := ic.selectStorageClass(ir.TargetClusterSpec.StorageClasses, alloption, []string{}) for _, storage := range ir.Storages { if storage.StorageType == irtypes.PVCKind { storage.PersistentVolumeClaimSpec.StorageClassName = &storageClass } } return nil } } for i, s := range ir.Storages { if svs, ok := claimSvcMap[s.Name]; ok { storageClassName := ic.selectStorageClass(ir.TargetClusterSpec.StorageClasses, s.Name, svs) s.StorageClassName = &storageClassName ir.Storages[i] = s } } return nil } func (ic *storageCustomizer) convertHostPathToPVC() { hostPathsVisited := make(map[string]string) for _, service := range ic.ir.Services { log.Debugf("Service %s has %d volumes", service.Name, len(service.Volumes)) for vi, v := range service.Volumes { if v.HostPath != nil { if name, ok := hostPathsVisited[v.HostPath.Path]; ok { hostPathsVisited[v.HostPath.Path] = "" log.Debugf("Detected host path [%s]", v.HostPath.Path) if !ic.shouldHostPathBeRetained(v.HostPath.Path) { hostPathsVisited[v.HostPath.Path] = v.Name v.VolumeSource = corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: v.Name, }} service.Volumes[vi] = v ic.ir.Services[service.Name] = service storageObj := irtypes.Storage{ StorageType: irtypes.PVCKind, Name: v.Name, PersistentVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ VolumeName: v.Name, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: common.DefaultPVCSize, }, }, }} ic.ir.AddStorage(storageObj) } } else { v.VolumeSource = corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: name, }} service.Volumes[vi] = v ic.ir.Services[service.Name] = service } } } } } func (ic storageCustomizer) shouldHostPathBeRetained(hostPath string) bool { if filepath.IsAbs(hostPath) { return true } problem, err := qatypes.NewConfirmProblem(fmt.Sprintf("Do you want to create PVC for host path [%s]?:", hostPath), []string{"Use PVC for persistent storage wherever applicable"}, false) if err != nil { log.Fatalf("Unable to create problem : %s", err) } problem, err = qaengine.FetchAnswer(problem) if err != nil { log.Fatalf("Unable to fetch answer : %s", err) } ans, err := problem.GetBoolAnswer() if err != nil { log.Fatalf("Unable to get answer : %s", err) } return !ans } func (ic storageCustomizer) shouldConfigureSeparately(claims []string) bool { context := make([]string, 2) context[0] = "Storage classes have to be configured for below claims:" context[1] = fmt.Sprintf("%+v", claims) problem, err := qatypes.NewConfirmProblem("Do you want to configure different storage classes for each claim?", context, false) if err != nil { log.Fatalf("Unable to create problem : %s", err) } problem, err = qaengine.FetchAnswer(problem) if err != nil { log.Fatalf("Unable to fetch answer : %s", err) } ans, err := problem.GetBoolAnswer() if err != nil { log.Fatalf("Unable to get answer : %s", err) } return ans } func (ic storageCustomizer) selectStorageClass(storageClasses []string, claimName string, services []string) string { var desc string if claimName == alloption { desc = "Which storage class to use for all persistent volume claims?" } else { desc = fmt.Sprintf("Which storage class to use for persistent volume claim [%s] used by %+v", claimName, services) } problem, err := qatypes.NewSelectProblem(desc, []string{"If you have a custom cluster, you can use collect to get storage classes from it."}, storageClasses[0], storageClasses) if err != nil { log.Fatalf("Unable to create problem : %s", err) } problem, err = qaengine.FetchAnswer(problem) if err != nil { log.Fatalf("Unable to fetch answer : %s", err) } sc, err := problem.GetStringAnswer() if err != nil { log.Fatalf("Unable to get answer : %s", err) } return sc } func (ic *storageCustomizer) getPVCs() map[string][]string { pvcmap := make(map[string][]string) for _, s := range ic.ir.Storages { if s.StorageType == irtypes.PVCKind { svcList := []string{} for svcName, svc := range ic.ir.Services { for _, v := range svc.Volumes { if v.Name == s.Name { svcList = append(svcList, svcName) break } } } pvcmap[s.Name] = svcList } } return pvcmap }
package middleware import ( "github.com/gin-gonic/gin" ) func TraceHandelMiddleWare() gin.HandlerFunc { return func(c *gin.Context) { headers := []string{ "X-Request-Id", "X-B3-TraceId", "X-B3-SpanId", "X-B3-ParentSpanId", "X-B3-Sampled", "X-B3-Flags", "X-Ot-Span-Context", "appid", "X-Username", } for _, header := range headers { if c.Request.Header.Get(header) != "" { c.Set(header, c.Request.Header.Get(header)) } } } }
package profile import ( // "github.com/bwmarrin/discordgo" // "encoding/json" // "../errors" "encoding/json" "errors" "github.com/gorilla/Schema" // "html" "io/ioutil" "log" "net/http" "net/url" "strconv" "strings" ) const COOKIE_COST = 20 type UserData struct { Cookies int Id string Username string Credits int Profile string Title string } func AddCredits(id string, username string, amount int) error { username = strings.ToLower(username) log.Printf("=== Adding %d credits to %s (%s)", amount, username, id) sendBody, err := url.ParseQuery("username=" + username + "&amount=" + strconv.Itoa(amount) + "&id=" + id) if err != nil { log.Println("Cannot parse query") log.Fatal(err) } resp, err := http.PostForm(API_ENDPOINT+"credits/add/", sendBody) if err != nil { log.Println("didn't scucess the post") log.Fatal(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Println("didn't suceess reading the body") log.Fatal(err) } if body == nil { return nil } else { return errors.New("Unsuccessful") } } func CheckStats(id string) (string, error) { // username = strings.ToLower(username) log.Printf("=== Checking status for: %s", id) resp, err := http.Get(API_ENDPOINT + "find/?id=" + id) if err != nil { log.Println("Unable to hit GET find/?name=") log.Fatal(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Println("Unable to read response body") log.Fatal(err) } log.Printf("%s", body) if string(body) == ERR_NOUSER { log.Println("User doesn't exist.") return "", errors.New(ERR_NOUSER) } return string(body), nil } // func CheckUserExists(username string) bool { // username = strings.ToLower(username) // log.Printf("=== Checking for this user: %s", username) // resp, err := http.Get(API_ENDPOINT + "find/?name=" + username) // } func GiveCookie(giverId string, recipientId string, amount int) error { log.Println("== Giving Cookie ==") log.Printf("From: (%s) To: (%s) Amount: %d", giverId, recipientId, amount) if amount < 0 { return errors.New("Amount below 0") } // Get giver info data, err := CheckStats(giverId) giverInfo := UserData{} err = json.Unmarshal([]byte(data), &giverInfo) if err != nil { log.Printf("\n%v\n", err) } log.Printf("GiverInfo: %+v", giverInfo) // deduct amount from giver if giverInfo.Credits < COOKIE_COST*amount { return errors.New("Not enough credits") } else { giverInfo.Credits -= COOKIE_COST * amount // Update giver info sendInfo := url.Values{} encoder := schema.NewEncoder() err = encoder.Encode(giverInfo, sendInfo) _, err = http.PostForm(API_ENDPOINT+"profile/update/", sendInfo) if err != nil { return errors.New("Unsuccessful credit deduct") } // Get recipient info data, err := CheckStats(recipientId) recipInfo := UserData{} err = json.Unmarshal([]byte(data), &recipInfo) if err != nil { log.Printf("Error: %v", err) } log.Printf("RecipInfo: %+v", recipInfo) recipInfo.Cookies += amount // Update recipient info sendInfo = url.Values{} err = encoder.Encode(recipInfo, sendInfo) _, err = http.PostForm(API_ENDPOINT+"profile/update/", sendInfo) if err != nil { return errors.New("Unsuccessful cookie give") } return nil } } func RegisterUser(id string, username string) (string, error) { username = strings.ToLower(username) log.Printf("== Registering this user: %s (%s)", username, id) // Check that it doesn't currently exist log.Println("Looking for user in database...") resp, err := http.Get(API_ENDPOINT + "find/?name=" + username + "&id=" + id) log.Println("Done") log.Println("Reading GET response...") body, err2 := ioutil.ReadAll(resp.Body) if err2 != nil { log.Println("Unable to read response body") log.Fatal(err2) return "", err2 } log.Println("Done") if string(body) == ERR_USEREXISTS { // Shouldn't find any existing users with same name log.Println("User already exists") return "", errors.New(ERR_USEREXISTS) } else if err != nil { log.Println("Other error from find endpoint") log.Printf("\nError\n%v\n", err) return "", err } // Going ahead w/ account creation log.Println("Calling registration endpoint...") resp, err = http.Get(API_ENDPOINT + "add/user/?name=" + username + "&id=" + id) if err != nil { log.Println("Unable to hit GET add/?name=") log.Fatal(err) return "", errors.New("Unable to hit GET add/?name=") } log.Println("Done") log.Println("Reading response body...") body, err = ioutil.ReadAll(resp.Body) if err != nil { log.Println("Unable to read response body") log.Fatal(err) return "", errors.New("Unable to read response body") } log.Println("Done") log.Printf("\nNew user data:\n%s\n", body) return string(body), nil // } } func SetProfile(id string, profileBody string) (string, error) { log.Printf("== Registering this user: %s ", id) profileBody = url.QueryEscape(profileBody) sendBody, err := url.ParseQuery("id=" + id + "&profile=" + profileBody) if err != nil { log.Println("Parse query error") log.Fatal(err) } resp, err := http.PostForm(API_ENDPOINT+"profile/edit/", sendBody) if err != nil { log.Println("didn't scucess the post") log.Fatal(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Println("didn't suceess reading the body") log.Fatal(err) } log.Printf("Body: %+v", body) if body == nil { return "", nil } else { return "", errors.New("Unsuccessful") } }
package struct_flags import ( "context" "encoding/json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/go-playground/validator.v9" "io/ioutil" "os" "testing" ) func TestFlagSet_UnmarshalFlags(t *testing.T) { type Object struct { String1 string `flag:"string1" usage:"nested.string1"` String2 string `flag:"string2" usage:"nested.string2"` } type Flags struct { String string `flag:"string" usage:"string"` Int int `flag:"int" usage:"int"` Bool bool `flag:"bool" env:"BOOL" usage:"bool"` List []string `flag:"list" usage:"list"` NestedFlags Object `flag:"nested" usage:"nested"` Map map[string]string `flag:"map" usage:"map"` } fs := NewFlagSet("", &Flags{}) flags := Flags{} args, err := fs.UnmarshalFlags([]string{}, &flags) assert.NoError(t, err) assert.Equal(t, []string{}, args) fs = NewFlagSet("", &Flags{String: "default"}) flags = Flags{} args, err = fs.UnmarshalFlags([]string{}, &flags) assert.NoError(t, err) assert.Equal(t, []string{}, args) assert.Equal(t, "default", flags.String) fs = NewFlagSet("", &Flags{String: "default"}) flags = Flags{} args, err = fs.UnmarshalFlags([]string{"--string=test"}, &flags) assert.NoError(t, err) assert.Equal(t, []string{}, args) assert.Equal(t, "test", flags.String) fs = NewFlagSet("", &Flags{Bool: true}) flags = Flags{} args, err = fs.UnmarshalFlags([]string{"--bool=true"}, &flags) assert.NoError(t, err) assert.Equal(t, []string{}, args) assert.Equal(t, true, flags.Bool) fs = NewFlagSet("", &Flags{Int: 2}) flags = Flags{} args, err = fs.UnmarshalFlags([]string{"--int=1", "arg1", "arg2"}, &flags) assert.NoError(t, err) assert.Equal(t, []string{"arg1", "arg2"}, args) assert.Equal(t, 1, flags.Int) fs = NewFlagSet("", &Flags{Int: 2, NestedFlags: Object{ String1: "default", }}) flags = Flags{} args, err = fs.UnmarshalFlags([]string{"--list=a,b,c", "--nested.string2=d", "--map=a=1,b=2", "--map=x=z", "--list=d", "arg1", "arg2"}, &flags) assert.NoError(t, err) assert.Equal(t, []string{"arg1", "arg2"}, args) assert.Equal(t, 2, flags.Int) assert.Equal(t, []string{"a", "b", "c", "d"}, flags.List) assert.Equal(t, "default", flags.NestedFlags.String1) assert.Equal(t, "d", flags.NestedFlags.String2) assert.Equal(t, map[string]string{"a": "1", "b": "2", "x": "z"}, flags.Map) require.NoError(t, os.Setenv("BOOL", "true")) fs = NewFlagSet("", &Flags{}) flags = Flags{} args, err = fs.UnmarshalFlags([]string{}, &flags) assert.NoError(t, err) assert.Equal(t, []string{}, args) assert.Equal(t, true, flags.Bool) } func TestFlagSetUsage(t *testing.T) { type ex1 struct { String string `flag:"string" validate:"required"` } command := NewCommand("cmd", ex1{}, "", func(_ context.Context, flags ex1) error { return nil }) err := command.Execute(context.TODO(), ex1{}) verr, ok := err.(validator.ValidationErrors) require.True(t, ok) f, ok := getStructFieldForError(verr[0], ex1{}) require.True(t, ok) println(f.Tag.Get("flag")) } func TestNestedCommand(t *testing.T) { type cmd struct { String string `flag:"string" validate:"required"` } value := "" command := NewCommand("top", cmd{}, "", func(_ context.Context, flags cmd) error { value = "top " + flags.String return nil }) subCommand := NewCommand("cmd", cmd{}, "", func(_ context.Context, flags cmd) error { value = "sub " + flags.String return nil }) commands := Commands{command, NewCommandGroup("has_sub", "", subCommand)} require.NoError(t, commands.Run(context.TODO(), []string{"<exe>", "top", "--string=1"})) require.Equal(t, "top 1", value) require.NoError(t, commands.Run(context.TODO(), []string{"<exe>", "has_sub", "cmd", "--string=v"})) require.Equal(t, "sub v", value) } func TestCommand_PositionalArgs(t *testing.T) { type cmd struct { String string `flag:"[string]" validate:"required"` Strings []string `flag:"[strings]" validate:"required"` } var result cmd command := NewCommand("cmd [string] [strings]", cmd{}, "", func(_ context.Context, flags cmd) error { result = flags return nil }) commands := Commands{command} require.NoError(t, commands.Run(context.TODO(), []string{"<exe>", "cmd", "value", "remaining1", "remaining2", "-remaining3"})) require.Equal(t, "value", result.String) require.Equal(t, []string{"remaining1", "remaining2", "-remaining3"}, result.Strings) } func TestArgFile(t *testing.T) { type cmd struct { String string `flag:"string" validate:"required"` StringFromEnv string `flag:"stringFromEnv" env:"STRING_FROM_ENV" validate:"required"` IntFromEnvValue string `flag:"intFromEnvValue" validate:"required"` } var argFile *ArgFile var collectedFlags cmd command := NewCommand("cmd", cmd{}, "", func(ctx context.Context, flags cmd) error { argFile = getArgFile(ctx) collectedFlags = flags return nil }) commands := Commands{command} argFileFile, err := ioutil.TempFile("", t.Name()+"-argfile.txt") require.NoError(t, err) argFileFilename := argFileFile.Name() defer argFileFile.Close() defer os.RemoveAll(argFileFilename) argFileData, _ := json.Marshal(ArgFile{ Command: []string{"cmd"}, Args: []string{"--string=test", "--intFromEnvValue=$b"}, Env: []string{ "b=1", "STRING_FROM_ENV=a${b}", }, }) _, err = argFileFile.Write(argFileData) require.NoError(t, err) argFileFile.Close() require.NoError(t, commands.Run(context.TODO(), []string{"<exe>", "@" + argFileFilename})) require.NotNil(t, argFile) require.Equal(t, "test", collectedFlags.String) require.Equal(t, "a1", collectedFlags.StringFromEnv) require.Equal(t, "1", collectedFlags.IntFromEnvValue) }
package main import ( "errors" ) type Cell int const ( Empty Cell = iota Circle Cross ) func (c *Cell) Reversed() Cell { switch *c { case Circle: return Cross case Cross: return Circle default: return Empty } } func (c *Cell) Appearance() rune { switch *c { case Circle: return 'o' case Cross: return 'x' default: return ' ' } } var ( ErrAlreadyPlaced = errors.New("Already placed") ErrIndexOutOfBoard = errors.New("Index out of board") ) type Board [3][3]Cell func NewBoard() *Board { return &Board{} } func (b *Board) Put(x, y int, c Cell) error { if x < 0 || x >= 3 || y < 0 || y >= 3 { return ErrIndexOutOfBoard } if b[x][y] != Empty { return ErrAlreadyPlaced } b[x][y] = c return nil } func (b *Board) Winner() Cell { if b[1][1] != Empty && (b[0][0] == b[1][1] && b[1][1] == b[2][2]) || (b[0][2] == b[1][1] && b[1][1] == b[2][0]) { return b[1][1] } for x := 0; x < 3; x++ { if b[x][0] != Empty && b[x][0] == b[x][1] && b[x][1] == b[x][2] { return b[x][0] } } for y := 0; y < 3; y++ { if b[0][y] != Empty && b[0][y] == b[1][y] && b[1][y] == b[2][y] { return b[0][y] } } return Empty } func (b *Board) Finished() bool { if b.Winner() != Empty { return true } noEmpty := 0 for x := 0; x < 3; x++ { for y := 0; y < 3; y++ { if b[x][y] == Empty { noEmpty++ } } } return noEmpty == 0 }
package payments import ( "fmt" "reflect" "git.zhuzi.me/zzjz/zhuzi-payment/lib/log" "git.zhuzi.me/zzjz/zhuzi-payment/model/payment_order" ) type Wx struct { OrderID string // 订单号 TransactionID string // 支付交易号 TotalFee int // 订单金额(单位分) ReturnData []byte // 返回回调数据 AttachData interface{} // 用户自定义数据 } func InsertOrder(wx *Wx) { log.Print("wx: ", wx) var err error log.Print(fmt.Sprintf("%s %+v\n", wx.AttachData, wx.AttachData)) payment := &payment_order.PaymentOrder{ OrderId: wx.OrderID, TranscationID: wx.TransactionID, PriceTotal: wx.TotalFee, ReturnData: wx.ReturnData, AttachData: wx.AttachData, } _, err = payment.Insert() if err != nil { log.Error("生成订单失败!,err", err.Error()) return } } func struct2map(obj interface{}) map[string]interface{} { var data = make(map[string]interface{}) t := reflect.TypeOf(obj) v := reflect.ValueOf(obj) for i := 0; i < v.NumField(); i++ { // 返回v持有的结构体类型值的字段数 var filedName string if filedName = t.Field(i).Tag.Get("json"); len(filedName) == 0 { if filedName = t.Field(i).Tag.Get("xml"); len(filedName) == 0 { filedName = t.Field(i).Name } } if filedName == "xml" { continue } data[filedName] = v.Field(i).Interface() } return data }
package main import ( "fmt" "strings" ) // 切割字符串 func Split(str string, seq string)[]string { var ret [] string index := strings.Index(str, seq) for index > 0 { ret = append(ret, str[:index]) str = str[index+1:] index = strings.Index(str, seq) } ret = append(ret, str) return ret } func main() { fmt.Println(Split("abcabcdba", "b")) }
package logger import ( "testing" ) func TestConsole(t *testing.T) { lg := NewLogger(10000) lg.SetFuncDepth(2) lg.SetPrefix("consoletest") lg.SetLogger(CONSOLE_PROTOCOL, "") lg.Panic("Panic") lg.Error("error") lg.Warn("warn") lg.Info("info") lg.Debug("debug") t.Log("console test success.") }
package main import "fmt" type contactInfo struct { email string } type person struct { firstName string lastName string contact contactInfo } func main() { mike := person { firstName: "Mike", lastName: "Sanford", contact: contactInfo { email : "mikethecodegeek@gmail.com", }, } mike.updateName("Michael") mike.print() } func (p *person) updateName(firstName string) { (*p).firstName = firstName } func (p person) print() { fmt.Println(p) }
/* * @lc app=leetcode.cn id=1185 lang=golang * * [1185] 一周中的第几天 */ package main import ( "time" ) // @lc code=start func dayOfTheWeek(day int, month int, year int) string { t := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Now().Location()) return t.Weekday().String() } // func main() { // fmt.Println(dayOfTheWeek(31, 8, 2019)) // } // @lc code=end
package common import "fmt" func Pslice(slice *[]int) { for _,v := range *slice { fmt.Printf("%v ", v) } fmt.Printf("\n") } func PsliceS(slice *[]string) { for _,v := range *slice { fmt.Printf("%v ", v) } fmt.Printf("\n") } func Pmap(m *map[string]string) { for k,v := range *m { fmt.Printf("k -> %v, v -> %v \n", k, v) } }
package main type TreeNode struct { Val int Left *TreeNode Right *TreeNode } // 递归 + 外部变量 实现后序遍历 var postorderSequence []int func postorderTraversal(root *TreeNode) []int { postorderSequence = make([]int, 0) postorderTraversalExec(root) return postorderSequence } func postorderTraversalExec(root *TreeNode) { if root == nil { return } postorderTraversalExec(root.Left) postorderTraversalExec(root.Right) postorderSequence = append(postorderSequence, root.Val) } /* 题目链接: https://leetcode-cn.com/problems/binary-tree-postorder-traversal/submissions/ */
package diffbot import ( "testing" "github.com/stretchr/testify/assert" ) func TestArticle(t *testing.T) { c := client() url := "http://www.dailymail.co.uk/sciencetech/article-2355833/Apples-iPhone-5-hated-handset--majority-people-love-Samsung-Galaxy-S4-study-finds.html" in := c.NewArticleInput(url) out, err := c.Article(in) assert.Nil(t, err) assert.NotEqual(t, 0, out.Objects) object := out.Objects[0] assert.NotEqual(t, "", object.Text) assert.NotEqual(t, "", object.Author) assert.NotEqual(t, "", object.Title) }
package main import ( "io" "io/ioutil" "os" "path/filepath" "sort" "strconv" ) const prefix = "───" const middle = "├" const middleParent = "│" const last = "└" func main() { out := os.Stdout if !(len(os.Args) == 2 || len(os.Args) == 3) { panic("usage go run main.go . [-f]") } path := os.Args[1] printFiles := len(os.Args) == 3 && os.Args[2] == "-f" err := dirTree(out, path, printFiles) if err != nil { panic(err.Error()) } } func dirTree(out io.Writer, path string, printFiles bool) (err error) { return walkDir(out, path, printFiles, "") } func walkDir(out io.Writer, path string, printFiles bool, spacePrefix string) (err error) { fileInfos, err := ioutil.ReadDir(path) if err != nil { return err } sort.SliceStable(fileInfos, func(i, j int) bool { return fileInfos[i].Name() < fileInfos[j].Name() }) if !printFiles { var onlyDir []os.FileInfo for _, fileInfo := range fileInfos { if fileInfo.IsDir() { onlyDir = append(onlyDir, fileInfo) } } fileInfos = onlyDir } maxInd := len(fileInfos) - 1 for ind, fileInfo := range fileInfos { var curPrefix string isMiddle := ind < maxInd if isMiddle { curPrefix = middle } else { curPrefix = last } _, err = out.Write([]byte(spacePrefix + curPrefix + prefix + fileInfo.Name() + fileSize(fileInfo) + "\n")) if err != nil { return err } if fileInfo.IsDir() { err = walkDir(out, filepath.Join(path, fileInfo.Name()), printFiles, nextSpacePrefix(spacePrefix, isMiddle)) } if err != nil { return err } } return nil } func fileSize(fileInfo os.FileInfo) (stringSize string) { if !fileInfo.IsDir() { if fileInfo.Size() == 0 { stringSize = " (empty)" } else { stringSize = " (" + strconv.FormatInt(fileInfo.Size(), 10) + "b)" } } return stringSize } func nextSpacePrefix(spacePrefix string, isMiddle bool) string { if isMiddle { spacePrefix = spacePrefix + middleParent + "\t" } else { spacePrefix = spacePrefix + "" + "\t" } return spacePrefix }
package main /** 329. 矩阵中的最长递增路径 给定一个整数矩阵,找出最长递增路径的长度。 对于每个单元格,你可以往上,下,左,右四个方向移动。 你不能在对角线方向上移动或移动到边界外(即不允许环绕)。 示例1: ``` 输入: nums = [ [9,9,4], [6,6,8], [2,1,1] ] 输出: 4 解释: 最长递增路径为 [1, 2, 6, 9]。 ``` 示例2: ``` 输入: nums = [ [3,4,5], [3,2,6], [2,2,1] ] 输出: 4 解释: 最长递增路径是 [3, 4, 5, 6]。注意不允许在对角线方向上移动。 ``` ``` */ /** ...就喜欢做这种矩阵的题 */ var ( rowP = []int{-1, 1, 0, 0} celP = []int{0, 0, 1, -1} ) func LongestIncreasingPath(matrix [][]int) int { row := len(matrix) if row == 0 { return 0 } cel := len(matrix[0]) if cel == 0 { return 0 } tmp := make([][]int, row) for i := 0; i < row; i++ { tmp[i] = make([]int, cel) } maxPathVal := 0 for i := 0; i < row; i++ { for j := 0; j < cel; j++ { maxPathVal = max(maxPathVal, maxPath(i, j, matrix, tmp)) } } return maxPathVal } func maxPath(rowAt int, celAt int, matrix [][]int, tmp [][]int) int { maxPathVal := 1 for i := 0; i < 4; i++ { currRow := rowAt + rowP[i] currCel := celAt + celP[i] if currRow >= 0 && currCel >= 0 && currRow < len(tmp) && currCel < len(tmp[0]) && matrix[currRow][currCel] > matrix[rowAt][celAt] { if tmp[currRow][currCel] != 0 { maxPathVal = max(maxPathVal, tmp[currRow][currCel]+1) } else { maxPathVal = max(maxPathVal, maxPath(currRow, currCel, matrix, tmp)+1) } } } tmp[rowAt][celAt] = maxPathVal return maxPathVal } func max(a, b int) int { if a > b { return a } return b }
package pwh var defaultPWH = New("gox", 10) func Config(publicSalt string, cost int) { defaultPWH.Config(publicSalt, cost) } func Hash(word string, salt string) string { return defaultPWH.Hash(word, salt) } func Match(word string, salt string, hash string) bool { return defaultPWH.Match(word, salt, hash) }
package utils import ( "io" "net/http" "fmt" "io/ioutil" ) func Get(url string,body io.Reader,head,fdata map[string]string)([]byte,error) { return sendHttpRequest("GET",url,body,head,fdata) } func Post(url string,body io.Reader,head,fdata map[string]string)([]byte,error) { return sendHttpRequest("POST",url,body,head,fdata) } func sendHttpRequest(method, url string, body io.Reader, head, fdata map[string]string) ([]byte, error) { req, err := http.NewRequest(method, url, body) if err != nil { return []byte{}, err } cli := http.Client{} resp, err := cli.Do(req) if err != nil { return []byte{}, err } else if resp.StatusCode != http.StatusOK { return []byte{}, fmt.Errorf("http response status code: %v", resp.StatusCode) } out, err := ioutil.ReadAll(resp.Body) if err != nil { return []byte{}, err } return out, nil }
package discord import ( "fmt" ) // User defines a user of Disord type User struct { ID string `json:"id"` Name string `json:"username"` Email string `json:"email"` Verified bool `json:"verified"` Avatar string `json:"avatar"` // TODO: Sometimes int, sometimes string. // Discriminator string `json:"discriminator,string"` } // GetAvatarURL returns the user's avatar URL func (u *User) AvatarURL() string { if u.Avatar != "" { return fmt.Sprintf("%s/%s/avatars/%s.jpg", apiUsers, u.ID, u.Avatar) } return "" } // Ban bans the user from the given server func (u *User) Ban(client *Client, server Server) error { return client.Ban(server, *u) } // Unban unbans the user from the given server func (u *User) Unban(client *Client, server Server) error { return client.Unban(server, *u) } // Kick kicks the user from the given server func (u *User) Kick(client *Client, server Server) error { return client.Kick(server, *u) } // CreatePrivateChannel creates a private channel with this user func (u *User) CreatePrivateChannel(client *Client) (PrivateChannel, error) { return client.CreatePrivateChannel(*u) }
// ----------------------------------------------------------------------------- // Common package used for containing shared code. // ----------------------------------------------------------------------------- package common import ( "fmt" "log" "github.com/streadway/amqp" ) // Constants related to the RabbitMQ URLs. const ( // https://www.rabbitmq.com/uri-spec.html URL_GUEST = "amqp://guest@localhost:5672" URL_ADMIN = "amqp://admin:admin@localhost:5672" ) // Constants related to the RabbitMQ exchanges names. const ( DEFAULT_EXCHANGE = "" // Default direct exchange FANOUT_EXCHANGE = "amq.fanout" // Default fanout exchange DISCOVERY_EXCHANGE = "sensor.discovery" // Custom exchange used for sending discovery requests. WEBAPP_SOURCE_EXCHANGE = "webapp.sources" WEBAPP_READINGS_EXCHANGE = "webapp.readings" WEBAPP_DISCOVERY_EXCHANGE = "webapp.discovery" ) // Constants related to the exchange types. const ( DIRECT = "direct" FANOUT = "fanout" TOPIC = "topic" HEADER = "header" ) // Constants related to the message queues. const ( DISCOVERY_QUEUE = "discovery.queue" PERSISTENCE_QUEUE = "persistence.queue" WEBAPP_DISCOVERY_QUEUE = "webapp.discovery.queue" ) type Event string // Constants related to suppoerted event types. const ( SENSOR_DISCOVER_EVENT = Event("SensorDiscovered") MESSAGE_RECEIVED_EVENT = Event("MessageReceived") ) //----------------------------------------------------------------------------- // NewEvent - Creates specific event based on source event and qualifier. //----------------------------------------------------------------------------- func NewEvent(event Event, qualifier string) Event { return event + Event("_"+qualifier) } // ----------------------------------------------------------------------------- // GetChannel - Helper function that returns amqp channele for the given URL. // // amqp.Connection - network connection between the application and RabbitMQ. // amqp.Channel - provides a path fo communication over connection. // ----------------------------------------------------------------------------- func GetChannel(url string) (*amqp.Connection, *amqp.Channel) { connection, error := amqp.Dial(URL_GUEST) FailOnError(error, "Failed to connect to RabitMQ") channel, error := connection.Channel() FailOnError(error, "Failed to optain a channel") return connection, channel } // ----------------------------------------------------------------------------- // GetQueue - Helper function that returns amqp queue // for the given queue name and associated channel. // // name - name of the requested queue. // amqp.Channel - provides a path fo communication over connection. // ----------------------------------------------------------------------------- func GetQueue(name string, channel *amqp.Channel, autoDelete bool) *amqp.Queue { queue, error := channel.QueueDeclare( name, //name false, //durable autoDelete, //autoDelete false, //exclusive false, //noWait nil) //args FailOnError(error, "Failed to declare a queue") return &queue } // ----------------------------------------------------------------------------- // GetMessageQueue - Helper function that returns message queue whose publishing // is associated with the default exchange. // // amqp.Connection - network connection between the application and RabbitMQ. // amqp.Channel - provides a path fo communication over connection. // amqp.Queue - message queue. // ----------------------------------------------------------------------------- func GetDirectQueue(name string) (*amqp.Connection, *amqp.Channel, *amqp.Queue) { connection, channel := GetChannel(URL_GUEST) queue := GetQueue(name, channel, false) return connection, channel, queue } // ----------------------------------------------------------------------------- // Advertise - Helper function used for publisheshing given name // to the rest of the system using given advertisement queue. // // name - that is advertised to the system. // amqp.Channel - provides a path fo communication over connection. // ----------------------------------------------------------------------------- func Advertise(name string, channel *amqp.Channel) { message := amqp.Publishing{Body: []byte(name)} // Fanout exchange doesn't need queue name to determin where the message goes. // It will send the message to every copy of the queue bound to exchange. // It's up to the consumer to create message queue. channel.Publish(FANOUT_EXCHANGE, "", false, false, message) } // ----------------------------------------------------------------------------- // Send - Helper function used for sending slice of data. // // data - that is to be sent. // amqp.Queue - message queue used for sending data. // amqp.Channel - provides a path fo communication over connection. // ----------------------------------------------------------------------------- func Send(data []byte, queue *amqp.Queue, channel *amqp.Channel) { message := amqp.Publishing{Body: data} channel.Publish(DEFAULT_EXCHANGE, queue.Name, false, false, message) } // ----------------------------------------------------------------------------- // FailOnError - Checks if the error occured and logs while panicking. // ----------------------------------------------------------------------------- func FailOnError(error error, message string) { if error != nil { log.Fatalf("%s: %s", message, error) panic(fmt.Sprintf("%s: %s", message, error)) } }
package main import "math" /* * @lc app=leetcode id=99 lang=golang * * [99] Recover Binary Search Tree */ /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ /* 我喜欢这道题 首先将问题简化,如果给的是一个有序数组,然后打乱某两个元素的次序, 如何找到这两个元素? 例如: 1 2 3 5 4 6 7 ==> 从前往后迭代,第一次发现后者小于前者,就假设后者和前者交换了元素,记录为first和second ==> 如果第二次发现后者小于前者,就把second更新为第二次发现的后者 */ var prev *TreeNode var first, second *TreeNode var findAll bool // 关键:不改变指针,仅交换结点的值 func recoverTree(root *TreeNode) { prev = &TreeNode{math.MinInt64, nil, nil} first, second = nil, nil findAll = false helper99(root) first.Val, second.Val = second.Val, first.Val } func helper99(root *TreeNode) { if root == nil || findAll { return } helper99(root.Left) if first == nil && root.Val < prev.Val { first = prev second = root } else if first != nil && root.Val < prev.Val { second = root findAll = true } prev = root helper99(root.Right) }
package rpc_user import ( "context" "fmt" "github.com/bqxtt/book_online/rpc/clients/rpc_user/userpb" "testing" ) func TestUserService(t *testing.T) { Init() resp, err := UserServiceClient.Register(context.Background(), &userpb.RegisterRequest{ UserAuth: &userpb.UserAuth{ UserId: 321, PwdDigest: "333", }, User: &userpb.User{ UserId: 321, Name: "bqx", }, }) if err != nil { fmt.Printf("error: %v", err) } else { fmt.Println(resp) } }
package factories import ( "github.com/barrydev/api-3h-shop/src/common/connect" "github.com/barrydev/api-3h-shop/src/connections" "github.com/barrydev/api-3h-shop/src/model" ) func FindCustomer(query *connect.QueryMySQL) ([]*model.Customer, error) { connection := connections.Mysql.GetConnection() queryString := ` SELECT _id, phone, address, full_name, email, updated_at FROM customers ` var args []interface{} if query != nil { queryString += query.QueryString args = query.Args } stmt, err := connection.Prepare(queryString) if err != nil { return nil, err } defer stmt.Close() rows, err := stmt.Query(args...) if err != nil { return nil, err } defer rows.Close() var listCustomer []*model.Customer for rows.Next() { _customer := model.Customer{} err = rows.Scan( &_customer.RawId, &_customer.RawPhone, &_customer.RawAddress, &_customer.RawFullName, &_customer.RawEmail, &_customer.RawUpdatedAt, ) if err != nil { return nil, err } _customer.FillResponse() listCustomer = append(listCustomer, &_customer) } if err = rows.Err(); err != nil { return nil, err } return listCustomer, nil }
package setr import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document01100104 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:setr.011.001.04 Document"` Message *SubscriptionOrderCancellationRequestV04 `xml:"SbcptOrdrCxlReq"` } func (d *Document01100104) AddMessage() *SubscriptionOrderCancellationRequestV04 { d.Message = new(SubscriptionOrderCancellationRequestV04) return d.Message } // Scope // The SubscriptionOrderCancellationRequest message is sent by an instructing party, for example, an investment manager or its authorised representative, to the executing party, for example, a transfer agent, to request the cancellation of a previously sent SubscriptionOrder. // Usage // The SubscriptionOrderCancellationRequest message is used to request the cancellation of one or more individual orders. // There is no amendment, but a cancellation and re-instruct policy. // To request the cancellation of one or more individual orders, the order reference of each individual order listed in the original SubscriptionOrder message is specified in the order reference element. The message identification of the SubscriptionOrder message which contains the individual orders to be cancelled may also be quoted in PreviousReference but this is not recommended. // The deadline and acceptance of a cancellation request is subject to a service level agreement (SLA). This cancellation message is a cancellation request. There is no automatic acceptance of the cancellation. // The rejection or acceptance of a SubscriptionOrderCancellationRequest is made using an OrderCancellationStatusReport message. type SubscriptionOrderCancellationRequestV04 struct { // Reference that uniquely identifies the message from a business application standpoint. MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"` // Collective reference identifying a set of messages. PoolReference *iso20022.AdditionalReference9 `xml:"PoolRef,omitempty"` // Reference to a linked message that was previously sent. PreviousReference *iso20022.AdditionalReference8 `xml:"PrvsRef,omitempty"` // Reference assigned to a set of orders or trades in order to link them together. MasterReference *iso20022.Max35Text `xml:"MstrRef,omitempty"` // Identification of the individual order to be cancelled. OrderReferences []*iso20022.InvestmentFundOrder9 `xml:"OrdrRefs"` // Information provided when the message is a copy of a previous message. CopyDetails *iso20022.CopyInformation4 `xml:"CpyDtls,omitempty"` } func (s *SubscriptionOrderCancellationRequestV04) AddMessageIdentification() *iso20022.MessageIdentification1 { s.MessageIdentification = new(iso20022.MessageIdentification1) return s.MessageIdentification } func (s *SubscriptionOrderCancellationRequestV04) AddPoolReference() *iso20022.AdditionalReference9 { s.PoolReference = new(iso20022.AdditionalReference9) return s.PoolReference } func (s *SubscriptionOrderCancellationRequestV04) AddPreviousReference() *iso20022.AdditionalReference8 { s.PreviousReference = new(iso20022.AdditionalReference8) return s.PreviousReference } func (s *SubscriptionOrderCancellationRequestV04) SetMasterReference(value string) { s.MasterReference = (*iso20022.Max35Text)(&value) } func (s *SubscriptionOrderCancellationRequestV04) AddOrderReferences() *iso20022.InvestmentFundOrder9 { newValue := new(iso20022.InvestmentFundOrder9) s.OrderReferences = append(s.OrderReferences, newValue) return newValue } func (s *SubscriptionOrderCancellationRequestV04) AddCopyDetails() *iso20022.CopyInformation4 { s.CopyDetails = new(iso20022.CopyInformation4) return s.CopyDetails }
// models/book.go package models import ( "context" "log" "net/http" "time" "github.com/gin-gonic/gin" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" ) // Book Data Structure type Book struct { // ID string `bson:"_id, omitempty"` Title string `bson:"title, omitempty"` Author string `bson:"author, omitempty"` CreatedAt time.Time `bson:"created_at, omitempty"` UpdatedAt time.Time `bson:"updated_at, omitempty"` } // Database Instance var collection *mongo.Collection // BookCollection for define collection used for Book model func BookCollection(c *mongo.Database) { collection = c.Collection("books_collection") } // GetAllBooks for get list of all book in db func GetAllBooks(c *gin.Context) { books := []Book{} cursor, err := collection.Find(context.TODO(), bson.M{}) // Handling error if err != nil { log.Printf("Error while getting all books, Reason: %v\n", err) c.JSON(http.StatusInternalServerError, gin.H{ "status": http.StatusInternalServerError, "message": "Something went wron", }) return } // Iterate through the returned curson for cursor.Next(context.TODO()) { var book Book cursor.Decode(&book) books = append(books, book) } log.Printf("Successfully GET all book") c.JSON(http.StatusOK, gin.H{ "status": http.StatusOK, "message": "All Todos", "data": books, }) return } // CreateBook for inserting book into db func CreateBook(c *gin.Context) { var book Book c.BindJSON(&book) title := book.Title author := book.Author newBook := Book{ // ID: guuid.New().String(), Title: title, Author: author, CreatedAt: time.Now(), UpdatedAt: time.Now(), } _, err := collection.InsertOne(context.TODO(), newBook) if err != nil { log.Printf("Error while inserting new book into db, Reason %v\n", err) // Return 500 c.JSON(http.StatusInternalServerError, gin.H{ "status": http.StatusInternalServerError, "message": "Something went wrong", }) return } log.Printf("Successfully POST a Book") c.JSON(http.StatusCreated, gin.H{ "status": http.StatusCreated, "message": "Successfully Create Book", }) return }
package public const ( ValidatorKey = "ValidatorKey" TranslatorKey = "TranslatorKey" )
package config import ( "errors" "fmt" "os" "strings" ) type Environment struct { DB DB Mastodon Mastodon Queue Queue Port string UserID int64 } type DB struct { Host string Database string Username string Password string } type Mastodon struct { ServerURL string AccessToken string } type Queue struct { Host string Username string Password string } func Get() (Environment, error) { var missing []string var env Environment for k, v := range map[string]*string{ "DB_HOST": &env.DB.Host, "DB_DATABASE": &env.DB.Database, "DB_USERNAME": &env.DB.Username, "DB_PASSWORD": &env.DB.Password, "MASTODON_SERVER_URL": &env.Mastodon.ServerURL, "MASTODON_ACCESS_TOKEN": &env.Mastodon.AccessToken, "MQ_HOST": &env.Queue.Host, "MQ_USERNAME": &env.Queue.Username, "MQ_PASSWORD": &env.Queue.Password, "PORT": &env.Port, } { *v = os.Getenv(k) if *v == "" { missing = append(missing, k) } } for k, v := range map[string]*int64{ "USER_ID": &env.UserID, } { s := os.Getenv(k) if s == "" { missing = append(missing, k) continue } fmt.Sscanf(s, "%d", v) } if len(missing) > 0 { return env, errors.New("missing env(s): " + strings.Join(missing, ", ")) } return env, nil }
package resources import ( . "goa.design/goa/v3/dsl" ) var JWTSec = JWTSecurity("jwt", func() { Description("JWT Authentication Security") }) var APIKeyAuth = APIKeySecurity("api_key", func() { Description("API Key for users API") })
package utils import "time" func JustifyTime(source time.Time, adjustment time.Time) time.Time { return time.Date(adjustment.Year(), adjustment.Month(), adjustment.Day(), source.Hour(), source.Minute(), 0, 0, time.UTC) }
package workqueue import ( "context" "database/sql" "sync" "time" "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "gopkg.in/tomb.v2" "git.scc.kit.edu/sdm/lsdf-checksum/meda" ) type transactionState int8 const ( transactionStateUninitialised transactionState = iota transactionStateIdle transactionStateActive transactionStateClosed ) var ( txOptionsReadUncommitted = &sql.TxOptions{ Isolation: sql.LevelReadUncommitted, } ) type transactionerConfig struct { DB *meda.DB `yaml:"-"` MaxTransactionSize int MaxTransactionLifetime time.Duration } type transactioner struct { Config *transactionerConfig ctx context.Context updateFileCount int insertChecksumWarningCount int // txMutex protects txState and somewhat protects tx, txQueryCount and // insertChecksumWarningStmt. // The fields tx, txQueryCount and insertChecksumWarningStmt may only be // read or written when holding the mutex or if txState == active only by // the goroutine maintaining txState. txMutex sync.Mutex txState transactionState tx *sqlx.Tx // insertChecksumWarningStmt may be nil even if tx is set. insertChecksumWarningStmt *sqlx.NamedStmt // txQueryCount tracks the number of writing (!) queries executed within // the transaction. txQueryCount int tomb *tomb.Tomb cancel context.CancelFunc interfaceSlicesPool sync.Pool } // newTransactioner creates and returns an transactioner from ctx and config. // ctx is used as the transaction context for all transactions begun by the // transactioner. func newTransactioner(ctx context.Context, config *transactionerConfig) *transactioner { return &transactioner{ Config: config, ctx: ctx, } } func (t *transactioner) FetchFilesByIDs(ctx context.Context, fileIDs []uint64) ([]meda.File, error) { t.txMutex.Lock() t.txState = transactionStateActive t.txMutex.Unlock() defer func() { t.txMutex.Lock() if t.txState == transactionStateActive { t.txState = transactionStateIdle } t.txMutex.Unlock() }() if t.tx == nil { err := t.beginTx(ctx) if err != nil { return nil, errors.Wrap(err, "(*transactioner).FetchFilesByIDs") } } files, err := t.Config.DB.FilesFetchFilesByIDs(ctx, t.tx, fileIDs) if err != nil { return nil, errors.Wrap(err, "(*transactioner).FetchFilesByIDs") } // t.txQueryCount += 1 // if t.txQueryCount >= t.Config.MaxTransactionSize { // err := t.commitTx() // if err != nil { // return errors.Wrap(err, "(*transactioner).InsertChecksumWarning") // } // } return files, nil } func (t *transactioner) AppendFilesByIDs(files []meda.File, ctx context.Context, fileIDs []uint64) ([]meda.File, error) { t.txMutex.Lock() t.txState = transactionStateActive t.txMutex.Unlock() defer func() { t.txMutex.Lock() if t.txState == transactionStateActive { t.txState = transactionStateIdle } t.txMutex.Unlock() }() if t.tx == nil { err := t.beginTx(ctx) if err != nil { return files, errors.Wrap(err, "(*transactioner).FetchFilesByIDs") } } files, err := t.Config.DB.FilesAppendFilesByIDs(files, ctx, t.tx, fileIDs) if err != nil { return files, errors.Wrap(err, "(*transactioner).FetchFilesByIDs") } // t.txQueryCount += 1 // if t.txQueryCount >= t.Config.MaxTransactionSize { // err := t.commitTx() // if err != nil { // return errors.Wrap(err, "(*transactioner).InsertChecksumWarning") // } // } return files, nil } func (t *transactioner) InsertChecksumWarning(ctx context.Context, checksumWarning *meda.ChecksumWarning) error { t.txMutex.Lock() t.txState = transactionStateActive t.txMutex.Unlock() defer func() { t.txMutex.Lock() if t.txState == transactionStateActive { t.txState = transactionStateIdle } t.txMutex.Unlock() }() if t.tx == nil { err := t.beginTx(ctx) if err != nil { return errors.Wrap(err, "(*transactioner).InsertChecksumWarning") } } if t.insertChecksumWarningStmt == nil { err := t.prepInsertChecksumWarningStmt(ctx) if err != nil { return errors.Wrap(err, "(*transactioner).InsertChecksumWarning") } } _, err := t.insertChecksumWarningStmt.ExecContext(ctx, &checksumWarning) if err != nil { return errors.Wrap(err, "(*transactioner).InsertChecksumWarning") } t.insertChecksumWarningCount += 1 t.txQueryCount += 1 if t.txQueryCount >= t.Config.MaxTransactionSize { err := t.commitTx() if err != nil { return errors.Wrap(err, "(*transactioner).InsertChecksumWarning") } } return nil } func (t *transactioner) UpdateFilesChecksums(ctx context.Context, files []meda.File, runID uint64) error { if len(files) == 0 { return nil } t.txMutex.Lock() t.txState = transactionStateActive t.txMutex.Unlock() defer func() { t.txMutex.Lock() if t.txState == transactionStateActive { t.txState = transactionStateIdle } t.txMutex.Unlock() }() if t.tx == nil { err := t.beginTx(ctx) if err != nil { return errors.Wrap(err, "(*transactioner).UpdateFilesChecksums") } } update, fileIDs, err := t.buildUpdate(files, runID) if err != nil { return errors.Wrap(err, "(*transactioner).UpdateFilesChecksums") } _, err = update.RunWith(t.tx).ExecContext(ctx) t.returnInterfaceSliceToPool(fileIDs) if err != nil { return errors.Wrap(err, "(*transactioner).UpdateFilesChecksums: exec query") } t.updateFileCount += 1 t.txQueryCount += 1 if t.txQueryCount >= t.Config.MaxTransactionSize { err := t.commitTx() if err != nil { return errors.Wrap(err, "(*transactioner).UpdateFilesChecksums") } } return nil } func (t *transactioner) buildUpdate(files []meda.File, runID uint64) (squirrel.UpdateBuilder, []interface{}, error) { checksumCaseBuilder := squirrel.Case("id") fileIDs := t.getInterfaceSliceFromPool() fileIDs = append(fileIDs, make([]interface{}, len(files))...) for ind, _ := range files { // Pointer to file in files, don't copy file := &files[ind] checksumCaseBuilder = checksumCaseBuilder.When( squirrel.Expr(squirrel.Placeholders(1), file.ID), squirrel.Expr(squirrel.Placeholders(1), file.Checksum), ) fileIDs[ind] = file.ID } checksumCaseSql, checksumCaseArgs, err := checksumCaseBuilder.ToSql() if err != nil { t.returnInterfaceSliceToPool(fileIDs) return squirrel.UpdateBuilder{}, nil, errors.Wrap(err, "(*transactioner).buildUpdate") } update := squirrel.Update(t.Config.DB.FilesTableName()). Set("to_be_read", 0). Set("to_be_compared", 0). Set("checksum", squirrel.Expr(checksumCaseSql, checksumCaseArgs...)). Set("last_read", runID). Where("id IN ("+squirrel.Placeholders(len(fileIDs))+")", fileIDs...) return update, fileIDs, nil } func (t *transactioner) transactionCommitter() error { timer := time.NewTimer(t.Config.MaxTransactionLifetime) select { case <-timer.C: break case <-t.tomb.Dying(): return tomb.ErrDying } tx, insertChecksumWarningStmt, err := t.stealTransaction(t.tomb.Context(nil)) if err != nil { // TODO // tx and insertChecksumWarningStmt are now potentially idling because ctx was closed return errors.Wrap(err, "(*transactioner).transactionCommitter") } else if tx == nil { // No open transaction stolen, there is no need for closing return nil } var retErr error if insertChecksumWarningStmt != nil { err = insertChecksumWarningStmt.Close() if err != nil && retErr == nil { retErr = errors.Wrap(err, "(*transactioner).transactionCommitter: close insert checksum warning statement") } } err = tx.Commit() if err != nil && retErr == nil { retErr = errors.Wrap(err, "(*transactioner).transactionCommitter: commit transaction") } return retErr } func (t *transactioner) stealTransaction(ctx context.Context) (*sqlx.Tx, *sqlx.NamedStmt, error) { var tx *sqlx.Tx var insertChecksumWarningStmt *sqlx.NamedStmt var noTxn bool timer := time.NewTimer(0) done := ctx.Done() for { t.txMutex.Lock() if t.txState == transactionStateIdle { tx, insertChecksumWarningStmt = t.tx, t.insertChecksumWarningStmt t.tx, t.txState, t.txQueryCount = nil, transactionStateUninitialised, 0 t.insertChecksumWarningStmt = nil } else if t.txState == transactionStateActive { // wait } else { // tx does not represent an open transaction noTxn = true } t.txMutex.Unlock() if tx != nil { break } else if noTxn { break } timer.Reset(500 * time.Microsecond) select { case <-timer.C: continue case <-done: // Exhaust timer if !timer.Stop() { <-timer.C } return tx, insertChecksumWarningStmt, ctx.Err() } } return tx, insertChecksumWarningStmt, nil } func (t *transactioner) getInterfaceSliceFromPool() []interface{} { sl := t.interfaceSlicesPool.Get() if sl == nil { return nil } return sl.([]interface{}) } func (t *transactioner) returnInterfaceSliceToPool(sl []interface{}) { if cap(sl) == 0 { return } sl = sl[:0] t.interfaceSlicesPool.Put(sl) } func (t *transactioner) UpdateFileCount() int { return t.updateFileCount } func (t *transactioner) InsertChecksumWarningCount() int { return t.insertChecksumWarningCount } func (t *transactioner) Commit() error { return t.commitTx() } func (t *transactioner) Close() error { var retErr error var tx *sqlx.Tx var insertChecksumWarningStmt *sqlx.NamedStmt if t.cancel != nil { t.cancel() // No context, so cannot wait for caller signalled cancel event <-t.tomb.Dead() } t.txMutex.Lock() if t.txState == transactionStateIdle || t.txState == transactionStateActive { tx, insertChecksumWarningStmt = t.tx, t.insertChecksumWarningStmt t.tx, t.txState, t.txQueryCount = nil, transactionStateUninitialised, 0 t.insertChecksumWarningStmt = nil } t.txMutex.Unlock() if insertChecksumWarningStmt != nil { err := insertChecksumWarningStmt.Close() if err != nil && retErr == nil { retErr = errors.Wrap(err, "(*transactioner).Close: close insert checksum warning statement") } } if tx != nil { err := tx.Rollback() if err != nil && retErr == nil { retErr = errors.Wrap(err, "(*transactioner).Close: rollback transaction") } } return retErr } // beginTx begins a new transaction and sets t.tx. // t.txQueryCount is resetted as well. // txState == active must be held by the caller of this method. func (t *transactioner) beginTx(_ context.Context) error { tx, err := t.Config.DB.BeginTxx(t.ctx, txOptionsReadUncommitted) if err != nil { return errors.Wrap(err, "(*transactioner).beginTx: begin transaction") } t.tx, t.txQueryCount = tx, 0 tombCtx, cancel := context.WithCancel(t.ctx) t.cancel = cancel t.tomb, _ = tomb.WithContext(tombCtx) t.tomb.Go(t.transactionCommitter) return nil } // prepInsertChecksumWarningStmt prepares and sets t.insertChecksumWarningStmt. // txState == active must be held by the caller of this method. func (t *transactioner) prepInsertChecksumWarningStmt(ctx context.Context) error { insertChecksumWarningStmt, err := t.Config.DB.ChecksumWarningsPrepareInsert(ctx, t.tx) if err != nil { return errors.Wrap(err, "(*transactioner).prepInsertChecksumWarningStmt") } t.insertChecksumWarningStmt = insertChecksumWarningStmt return nil } func (t *transactioner) commitTx() error { var retErr error var tx *sqlx.Tx var insertChecksumWarningStmt *sqlx.NamedStmt if t.cancel != nil { t.cancel() // No context, so cannot wait for caller signalled cancel event <-t.tomb.Dead() } t.txMutex.Lock() if t.txState == transactionStateIdle || t.txState == transactionStateActive { tx, insertChecksumWarningStmt = t.tx, t.insertChecksumWarningStmt t.tx, t.txState, t.txQueryCount = nil, transactionStateUninitialised, 0 t.insertChecksumWarningStmt = nil } t.txMutex.Unlock() if insertChecksumWarningStmt != nil { err := insertChecksumWarningStmt.Close() if err != nil && retErr == nil { retErr = errors.Wrap(err, "(*transactioner).commitTx: close insert checksum warning statement") } } if tx != nil { err := tx.Commit() if err != nil { // TODO check for conflicts, re-perform transaction if retErr == nil { retErr = errors.Wrap(err, "(*transactioner).commitTx: commit transaction") } } } return retErr }