text
stringlengths
11
4.05M
package bootstrap import ( "context" "fmt" "io" "net/http" "strings" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/tppgit/we_service/core" "github.com/tppgit/we_service/pkg/services" "google.golang.org/grpc" ) func RegisterGrpcService(c context.Context, s *grpc.Server, mux *http.ServeMux, address string, opts []grpc.DialOption) { //EchoProto core.RegisterWServiceServer(s, services.Server) gwmux := runtime.NewServeMux() mux.Handle("/", gwmux) mux.HandleFunc("/swagger.json", func(w http.ResponseWriter, req *http.Request) { io.Copy(w, strings.NewReader(core.Swagger)) }) mux.HandleFunc("/upload", services.CreateFileUploadHandling().UploadFilterHandler) err := core.RegisterWServiceHandlerFromEndpoint(c, gwmux, address, opts) if err != nil { fmt.Printf("serve: %v\n", err) return } }
package models import ( "fmt" "time" "github.com/rs/zerolog" ) // Pre-loaded users for demonstration purposes var initialUsers = []User{ { FirstName: "Rob", LastName: "Pike", }, { FirstName: "Ken", LastName: "Thompson", }, { FirstName: "Robert", LastName: "Griesemer", }, { FirstName: "Russ", MiddleInitial: "S", LastName: "Cox", }, } // UserDB is a package level variable acting as an in-memory user database var UserDB UserStorage func init() { for _, y := range initialUsers { UserDB.AddUser(y) } } // User represents a user of the system type User struct { ID int `json:"id"` FirstName string `json:"first_name"` MiddleInitial string `json:"middle_initial,omitempty"` LastName string `json:"last_name"` CreatedAt *time.Time `json:"-"` UpdatedAt *time.Time `json:"-"` } // UserStorage ... type UserStorage struct { Users []User Log *zerolog.Logger } // AddUser will add a user if it doesn't already exist or return an error func (us *UserStorage) AddUser(u User) (*User, error) { nextID := len(us.Users) + 1 // ID begins with 1 u.ID = nextID for _, y := range us.Users { if y.FirstName == u.FirstName && y.LastName == y.LastName { // Not yet supporting multiple users of same name return nil, fmt.Errorf("user with that name already exists") } } u.CreatedAt = &[]time.Time{time.Now().UTC()}[0] u.UpdatedAt = &[]time.Time{time.Now().UTC()}[0] us.Users = append(us.Users, u) return &u, nil } // GetUserByID returns the user record matching privided ID func (us UserStorage) GetUserByID(id int) (*User, error) { for _, y := range us.Users { if y.ID == id { return &y, nil } } return nil, fmt.Errorf("user not found") } // GetUserByName will return the first user matching firstName and LastName // This may not work in the real world since names are not unique func (us UserStorage) GetUserByName(firstName string, lastName string) (*User, error) { for _, y := range us.Users { if y.FirstName == firstName && y.LastName == lastName { return &y, nil } } return nil, fmt.Errorf("user not found") } // GetUsers returns the slice of all users func (us UserStorage) GetUsers() []User { us.Log.Debug().Msg("Getting all users from collection.") return us.Users } // UpdateUser will overwrite current user record with new data func (us *UserStorage) UpdateUser(u User) error { for i := range us.Users { if us.Users[i].ID == u.ID { // Currently no partial updates supported since all struct fields are required us.Users[i] = u us.Users[i].UpdatedAt = &[]time.Time{time.Now().UTC()}[0] return nil } } return fmt.Errorf("update failed likely due to missing or incorrect id") }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package placement import ( "encoding/hex" "encoding/json" "fmt" "regexp" "strings" "github.com/pingcap/tidb/util/codec" "gopkg.in/yaml.v2" ) // PeerRoleType is the expected peer type of the placement rule. type PeerRoleType string const ( // Voter can either match a leader peer or follower peer. Voter PeerRoleType = "voter" // Leader matches a leader. Leader PeerRoleType = "leader" // Follower matches a follower. Follower PeerRoleType = "follower" // Learner matches a learner. Learner PeerRoleType = "learner" ) // RuleGroupConfig defines basic config of rule group type RuleGroupConfig struct { ID string `json:"id"` Index int `json:"index"` Override bool `json:"override"` } // Rule is the core placement rule struct. Check https://github.com/tikv/pd/blob/master/server/schedule/placement/rule.go. type Rule struct { GroupID string `json:"group_id"` ID string `json:"id"` Index int `json:"index,omitempty"` Override bool `json:"override,omitempty"` StartKeyHex string `json:"start_key"` EndKeyHex string `json:"end_key"` Role PeerRoleType `json:"role"` Count int `json:"count"` Constraints Constraints `json:"label_constraints,omitempty"` LocationLabels []string `json:"location_labels,omitempty"` } var _ json.Marshaler = (*TiFlashRule)(nil) var _ json.Unmarshaler = (*TiFlashRule)(nil) // TiFlashRule extends Rule with other necessary fields. type TiFlashRule struct { GroupID string ID string Index int Override bool Role PeerRoleType Count int Constraints Constraints LocationLabels []string IsolationLevel string StartKey []byte EndKey []byte } type tiFlashRule struct { GroupID string `json:"group_id"` ID string `json:"id"` Index int `json:"index,omitempty"` Override bool `json:"override,omitempty"` Role PeerRoleType `json:"role"` Count int `json:"count"` Constraints Constraints `json:"label_constraints,omitempty"` LocationLabels []string `json:"location_labels,omitempty"` IsolationLevel string `json:"isolation_level,omitempty"` StartKeyHex string `json:"start_key"` EndKeyHex string `json:"end_key"` } // MarshalJSON implements json.Marshaler interface for TiFlashRule. func (r *TiFlashRule) MarshalJSON() ([]byte, error) { return json.Marshal(&tiFlashRule{ GroupID: r.GroupID, ID: r.ID, Index: r.Index, Override: r.Override, Role: r.Role, Count: r.Count, Constraints: r.Constraints, LocationLabels: r.LocationLabels, IsolationLevel: r.IsolationLevel, StartKeyHex: hex.EncodeToString(codec.EncodeBytes(nil, r.StartKey)), EndKeyHex: hex.EncodeToString(codec.EncodeBytes(nil, r.EndKey)), }) } // UnmarshalJSON implements json.Unmarshaler interface for TiFlashRule. func (r *TiFlashRule) UnmarshalJSON(bytes []byte) error { var rule tiFlashRule if err := json.Unmarshal(bytes, &rule); err != nil { return err } *r = TiFlashRule{ GroupID: rule.GroupID, ID: rule.ID, Index: rule.Index, Override: rule.Override, Role: rule.Role, Count: rule.Count, Constraints: rule.Constraints, LocationLabels: rule.LocationLabels, IsolationLevel: rule.IsolationLevel, } startKey, err := hex.DecodeString(rule.StartKeyHex) if err != nil { return err } endKey, err := hex.DecodeString(rule.EndKeyHex) if err != nil { return err } _, r.StartKey, err = codec.DecodeBytes(startKey, nil) if err != nil { return err } _, r.EndKey, err = codec.DecodeBytes(endKey, nil) return err } // NewRule constructs *Rule from role, count, and constraints. It is here to // consistent the behavior of creating new rules. func NewRule(role PeerRoleType, replicas uint64, cnst Constraints) *Rule { return &Rule{ Role: role, Count: int(replicas), Constraints: cnst, } } var wrongSeparatorRegexp = regexp.MustCompile(`[^"':]+:\d`) func getYamlMapFormatError(str string) error { if !strings.Contains(str, ":") { return ErrInvalidConstraintsMappingNoColonFound } if wrongSeparatorRegexp.MatchString(str) { return ErrInvalidConstraintsMappingWrongSeparator } return nil } // NewRules constructs []*Rule from a yaml-compatible representation of // 'array' or 'dict' constraints. // Refer to https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-24-placement-rules-in-sql.md. func NewRules(role PeerRoleType, replicas uint64, cnstr string) ([]*Rule, error) { rules := []*Rule{} cnstbytes := []byte(cnstr) constraints1, err1 := NewConstraintsFromYaml(cnstbytes) if err1 == nil { rules = append(rules, NewRule(role, replicas, constraints1)) return rules, nil } constraints2 := map[string]int{} err2 := yaml.UnmarshalStrict(cnstbytes, &constraints2) if err2 == nil { if replicas != 0 { return rules, fmt.Errorf("%w: should not specify replicas=%d when using dict syntax", ErrInvalidConstraintsRelicas, replicas) } for labels, cnt := range constraints2 { if cnt <= 0 { if err := getYamlMapFormatError(string(cnstbytes)); err != nil { return rules, err } return rules, fmt.Errorf("%w: count of labels '%s' should be positive, but got %d", ErrInvalidConstraintsMapcnt, labels, cnt) } } for labels, cnt := range constraints2 { labelConstraints, err := NewConstraints(strings.Split(labels, ",")) if err != nil { return rules, err } rules = append(rules, NewRule(role, uint64(cnt), labelConstraints)) } return rules, nil } return nil, fmt.Errorf("%w: should be [constraint1, ...] (error %s), {constraint1: cnt1, ...} (error %s), or any yaml compatible representation", ErrInvalidConstraintsFormat, err1, err2) } // Clone is used to duplicate a RuleOp for safe modification. // Note that it is a shallow copy: Constraints is not cloned. func (r *Rule) Clone() *Rule { n := &Rule{} *n = *r return n } func (r *Rule) String() string { return fmt.Sprintf("%+v", *r) }
package main import ( "fmt" "log" "github.com/kataras/iris" "github.com/neverlock/utility/random" "golang.org/x/net/websocket" ) /* Native messages no need to import the iris-ws.js to the ./templates.client.html Use of: OnMessage and EmitMessage */ type clientPage struct { Title string Host string } type BookingAPI struct { *iris.Context } func main() { iris.Static("/js", "./static/js", 1) iris.Get("/", func(ctx *iris.Context) { ctx.Render("client.html", clientPage{"Client Page", ctx.HostString()}) }) iris.API("/booking", BookingAPI{}) // the path which the websocket client should listen/registed to -> iris.Config.Websocket.Endpoint = "/my_endpoint" ws := iris.Websocket // get the websocket server ws.OnConnection(func(c iris.WebsocketConnection) { c.OnMessage(func(data []byte) { message := string(data) //c.To(iris.Broadcast).EmitMessage([]byte("Message from: " + c.ID() + "-> " + message)) //c.EmitMessage([]byte("Me: " + message)) height := random.Int(1, 100) width := random.Int(1, 300) js := fmt.Sprintf("{\"From\":\"%s\",\"H\":%d,\"W\":%d,\"MSG\":\"%s\"}", c.ID(), height, width, message) c.To(iris.Broadcast).EmitMessage([]byte(js)) c.EmitMessage([]byte(js)) //c.To(myChatRoom).Emit("chat", js) }) c.OnDisconnect(func() { fmt.Printf("\nConnection with ID: %s has been disconnected!", c.ID()) }) }) iris.Listen(":8080") } func (u BookingAPI) Get() { u.Write("Get from /booking") origin := "http://104.238.149.36:8080/" url := "ws://104.238.149.36:8080/my_endpoint" ws, err := websocket.Dial(url, "", origin) if err != nil { log.Fatal(err) } /* Fm := "Test from" Msg := "dummy msg" js1 := fmt.Sprintf("{\"From\":\"%s\",\"MSG\":\"%s\"}", Fm, Msg) */ //height := random.Int(1, 100) //width := random.Int(1, 300) //js := fmt.Sprintf("{\"From\":\"%s\",\"H\":%d,\"W\":%d,\"MSG\":\"%s\"}", c.ID(), height, width, message) if _, err := ws.Write([]byte("")); err != nil { log.Fatal(err) } var msg = make([]byte, 512) var n int if n, err = ws.Read(msg); err != nil { log.Fatal(err) } fmt.Printf("Received: %s.\n", msg[:n]) }
package main import ( "log" "net/http" "fmt" "io/ioutil" "strconv" "../ZFic" ) func main() { ZFIC, Sucess := ZFic.Load() if Sucess != nil { log.Fatal("Error Loading Server: " + Sucess.Error()) } ZF := http.NewServeMux() ZF.HandleFunc("/", ZFic.MainPage) // the front page ZF.HandleFunc("/a", ZFic.Archive) // archive and search ZF.HandleFunc("/u/", ZFic.UserPage) // user pages ZF.HandleFunc("/login/", ZFic.Login) // login page ZF.HandleFunc("/logout/", ZFic.LogOut) // logout page (duh) ZF.HandleFunc("/signup/", ZFic.SignUp) // signup page ZF.HandleFunc("/settings/", ZFic.UserSettingsPage) // settings page ZF.HandleFunc("/f/", ZFic.Fic) // story handler, aka fanfiction ZF.HandleFunc("/static/", http.FileServer(http.Dir("../Zfic")).ServeHTTP) // css and fonts ZF.HandleFunc("/pic/", http.FileServer(http.Dir("../Zfic")).ServeHTTP) // images fmt.Println("Starting server...") if Debug() { fmt.Println("Started debug...") ZF.HandleFunc("/reinstallmembers", ZFic.Reinstallmembers) ZF.HandleFunc("/killallsessions", ZFic.Killallsessions) } if Show() { ZF.HandleFunc("/show/", ZFic.ShowRoom) ZF.HandleFunc("/aoz/", ZFic.AoZ) } log.Fatal(http.ListenAndServe(ZFIC.Address()+":"+ZFIC.Port(), ZF)) } func Debug() bool { file, err := ioutil.ReadFile("debug") if err != nil { return false } else { b, err := strconv.ParseBool(string(file)) if err != nil { return false } return b } } func Show() bool { file, err := ioutil.ReadFile("show") if err != nil { return false } else { b, err := strconv.ParseBool(string(file)) if err != nil { return false } return b } }
package main import "practice/urlShort/helpers" // 入口函数 func main() { shortUrl := helpers.ShortUrl{} longUrl := "http://www.google.com" shortUrl.Do(longUrl) }
package blast import ( "context" "fmt" "io" "github.com/pkg/errors" ) func (b *Blaster) startMainLoop(ctx context.Context) { b.mainWait.Add(1) b.mainChannel = make(chan struct{}) go func() { defer fmt.Fprintln(b.out, "Exiting main loop") defer b.mainWait.Done() for { select { case <-ctx.Done(): return case <-b.mainChannel: for { record, err := b.dataReader.Read() if err != nil { if err == io.EOF { fmt.Fprintln(b.out, "Found end of data file") // finish gracefully close(b.dataFinishedChannel) return } b.errorChannel <- errors.WithStack(err) return } b.workerChannel <- workDef{Record: record} break } } } }() } type workDef struct { Record []string }
package goSolution import "testing" func TestScheduleCourse(t *testing.T) { courses := [][]int {{100,200},{200,1300},{1000,1250},{2000,3200}} AssertEqual(t, 3, scheduleCourse(courses)) courses = [][]int {{1, 2}, {1, 2}, {2, 4}} AssertEqual(t, 3, scheduleCourse(courses)) courses = [][]int {{3, 2}, {4, 3}} AssertEqual(t, 0, scheduleCourse(courses)) }
package fin import ( "log" "os" "strings" "time" "github.com/valyala/fasthttp" ) var logger *log.Logger func SimpleLogger() HandlerFunc { return func(c *Context) { if logger == nil { logger = log.New(os.Stdout, "[fin]", 0) } start := time.Now() c.Next() end := time.Now() logger.Printf("%v | %3d | %8v | %15s |%-7s %#v\n", end.Format("2006/01/02 - 15:04:05"), c.Response.StatusCode(), end.Sub(start), c.RemoteIP(), string(c.Method()), string(c.Path()), ) } } func Logger() HandlerFunc { return func(c *Context) { start := time.Now() c.Next() end := time.Now() kvs := []interface{}{ "method", string(c.Method()), "path", string(c.Path()), "time", end.Format("2006/01/02 15:04:05"), "status", c.Response.StatusCode(), "cost", end.Sub(start), "remote_ip", c.RemoteIP(), "request_body", string(c.PostBody()), } contentType := string(c.Response.Header.Peek(fasthttp.HeaderContentType)) if strings.Contains(contentType, "application/json") { kvs = append(kvs, "response", string(c.Response.Body())) } c.engine.logger.Infow("http request with", kvs...) } }
package routingproxy import ( "net/http" "net/http/httputil" "net/url" "regexp" ) // RoutingProxy is an HTTP Handler that uses a ReverseProxy and allows // to modify the requests and answers based on the routers type RoutingProxy struct { Proxy *httputil.ReverseProxy requestModifiers []RequestModifier } // AddRequestModifier adds a RequestModifier function to the RoutingProxy func (rp *RoutingProxy) AddRequestModifier(rm RequestModifier) error { regex, err := regexp.Compile(rm.MatchingPath) if err == nil { rm.pathRegex = regex rp.requestModifiers = append(rp.requestModifiers, rm) } return err } // NewRoutingProxy returns a new RoutingProxy with an embedded httputil.ReverseProxy func NewRoutingProxy(target *url.URL) *RoutingProxy { targetQuery := target.RawQuery routingProxy := RoutingProxy{} director := func(req *http.Request) { req.URL.Scheme = target.Scheme req.URL.Host = target.Host req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) if targetQuery == "" || req.URL.RawQuery == "" { req.URL.RawQuery = targetQuery + req.URL.RawQuery } else { req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery } if _, ok := req.Header["User-Agent"]; !ok { // explicitly disable User-Agent so it's not set to default value req.Header.Set("User-Agent", "") } for _, rm := range routingProxy.requestModifiers { rm.modifyRequest(req) } } responseModifier := func(resp *http.Response) (err error) { for _, rm := range routingProxy.requestModifiers { if err := rm.modifyResponse(resp); err != nil { return err } } return nil } routingProxy.Proxy = &httputil.ReverseProxy{Director: director} routingProxy.Proxy.ModifyResponse = responseModifier return &routingProxy }
package framework import ( "math" "strings" "testing" ) func TestRound(t *testing.T) { down := Round(0.49) up := Round(0.5) if math.Abs(down-0.) >= floatPrecision { t.Errorf("framework.Round should round %v down", down) } if math.Abs(up-1.) >= floatPrecision { t.Errorf("framework.Round should round %v up", up) } } func TestFloatEq(t *testing.T) { if FloatEq(0.5, 0.49) { t.Errorf("framework.FloatEq should say false") } if !FloatEq(0.5, 0.49+0.01) { t.Errorf("framework.FloatEq should say true") } } func TestFloatLs(t *testing.T) { if FloatLs(0.5, 0.5) { t.Errorf("framework.FloatLs should say false") } if !FloatLs(0.5, 0.51) { t.Errorf("framework.FloatLs should say true") } if FloatLs(0.51, 0.5) { t.Errorf("framework.FloatLs should say false") } } func TestFloatLe(t *testing.T) { if !FloatLe(0.5, 0.5) { t.Errorf("framework.FloatLe should say true") } if !FloatLe(0.5, 0.51) { t.Errorf("framework.FloatLe should say true") } if FloatLe(0.51, 0.5) { t.Errorf("framework.FloatLe should say false") } } func TestFloatGt(t *testing.T) { if FloatGt(0.5, 0.5) { t.Errorf("framework.FloatGt should say false") } if FloatGt(0.5, 0.51) { t.Errorf("framework.FloatGt should say false") } if !FloatGt(0.51, 0.5) { t.Errorf("framework.FloatGt should say true") } } func TestFloatGe(t *testing.T) { if !FloatGe(0.5, 0.5) { t.Errorf("framework.FloatGe should say true") } if FloatGe(0.5, 0.51) { t.Errorf("framework.FloatGe should say false") } if !FloatGe(0.51, 0.5) { t.Errorf("framework.FloatGe should say true") } } func TestGetBinaryPath(t *testing.T) { path := GetBinaryPath() if !strings.Contains(path, "hub") { t.Errorf("TestGetBinaryPath: Something went wrong during path calculation: %v", path) } } func TestGetRootPath(t *testing.T) { path := GetRootPath() if !strings.Contains(path, "hub") { t.Errorf("TestGetRootPath: Something went wrong during path calculation: %v", path) } } func TestGetHtmlTemplatePath(t *testing.T) { path := GetHtmlTemplatePath("test.html") if !strings.Contains(path, "hub") || !strings.Contains(path, "html") || !strings.HasSuffix(path, "test.html") { t.Errorf("TestGetHtmlTemplatePath: Something went wrong during path calculation: %v", path) } } func TestGetUserDataPath(t *testing.T) { path := GetUserDataPath("test.html") if !strings.Contains(path, "hub") || !strings.Contains(path, "user_data") || !strings.HasSuffix(path, "test.html") { t.Errorf("TestGetUserDataPath: Something went wrong during path calculation: %v", path) } }
package middleware import ( "log" "github.com/gin-gonic/gin" ) func SampleMiddleware() gin.HandlerFunc { return func(c *gin.Context) { log.Println("before logic") c.Next() log.Println("after logic") } }
package main_test import ( "Barracks/data" "Barracks/rank" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "reflect" "time" ) func createMockUser(index int, contest *data.Contest) (user data.User) { user = data.User{ ID: uint(index + 10), Name: "user0" + string(int('0')+index+1), StrId: "user0" + string(int('0')+index+1), GroupName: "group01", IsAdmin: false, ContestID: contest.ID, } return } func createMockProblem(index int, contest *data.Contest) (problem data.Problem) { problem = data.Problem{ ID: uint(index), Code: string(int('A') + index), ContestID: contest.ID, } return } func createMockSubmission(id uint, result int, problem data.Problem, user data.User, contest *data.Contest, t time.Time, offset time.Duration) (submission data.Submission) { submission = data.Submission{ ID: id, Result: result, ProblemID: problem.ID, UserID: user.ID, ContestID: contest.ID, CreatedAt: time.Now().Add(offset), } return } var _ = Describe("Rank", func() { Context("if contest, problems and users are prepared", func() { var ( contest *data.Contest users *[]data.User problems *[]data.Problem ) BeforeEach(func() { By("Creating contest instance") contest = &data.Contest{ ID: 1, Name: "shake17", Start: time.Now().Add(-30 * time.Minute), End: time.Now().Add(4*time.Hour + 30*time.Minute), } By("Creating user instances") users = &[]data.User{} for i := 0; i < 5; i++ { *users = append(*users, createMockUser(i, contest)) } By("Creating problem instances") problems = &[]data.Problem{} for i := 0; i < 5; i++ { *problems = append(*problems, createMockProblem(i, contest)) } }) Context("when gives contest, problems and users to rank package,", func() { It("should init rank data.", func() { By("Initiating rank data") nowDate := time.Now() rank.InitData(contest, users, problems) r := rank.MyRankData Expect(r.CalcAt).To(BeTemporally("~", nowDate)) Expect(r.UserRows).To(HaveLen(len(*users))) Expect(reflect.DeepEqual(r.ContestInfo, contest)).To(Equal(true)) u := rank.MyRankData.UserRows[0] Expect(u.ProblemStatuses).To(HaveLen(len(*problems))) }) Context("After initiating", func() { BeforeEach(func() { By("Initiating rank data") rank.InitData(contest, users, problems) }) It("should have correct rank data according to given submissions.", func() { By("Giving five submissions with four wrong and one correct") t := time.Now() submissions := &[]data.Submission{ createMockSubmission(10, data.WRONG_ANSWER, (*problems)[0], (*users)[0], contest, t, 3*time.Second), createMockSubmission(11, data.MEMORY_LIMIT_EXCEED, (*problems)[0], (*users)[0], contest, t, 5*time.Second), createMockSubmission(12, data.RUNTIME_ERROR, (*problems)[1], (*users)[1], contest, t, 7*time.Second), createMockSubmission(13, data.ACCEPTED, (*problems)[0], (*users)[0], contest, t, 10*time.Second), createMockSubmission(14, data.TIME_LIMIT_EXCEED, (*problems)[1], (*users)[1], contest, t, 12*time.Second), } rank.AddSubmissions(submissions) userRows := &rank.MyRankData.UserRows firstProblemIndex := rank.MyRankData.ProblemMap[(*problems)[0].ID] secondProblemIndex := rank.MyRankData.ProblemMap[(*problems)[1].ID] firstUserPenaltyEst := 30*time.Minute + 10*time.Second + 2*20*time.Minute Expect((*userRows)[0].ProblemStatuses[firstProblemIndex].WrongCount).To(Equal(uint(2))) Expect((*userRows)[0].Penalty).To(BeNumerically("~", firstUserPenaltyEst, 10*time.Millisecond)) Expect((*userRows)[1].ProblemStatuses[secondProblemIndex].WrongCount).To(Equal(uint(2))) Expect((*userRows)[1].Penalty).To(Equal(time.Duration(0))) eRanks := []uint{1, 2, 2, 2, 2} for index, eRank := range eRanks { Expect((*userRows)[index].Rank).To(Equal(uint(eRank))) } By("Third user has accept by one try") submissions = &[]data.Submission{ createMockSubmission(15, data.ACCEPTED, (*problems)[0], (*users)[2], contest, t, 15*time.Second), } rank.AddSubmissions(submissions) Expect((*userRows)[2].ProblemStatuses[firstProblemIndex].WrongCount).To(Equal(uint(0))) Expect((*userRows)[2].Penalty).To(BeNumerically("~", 30*time.Minute+15*time.Second, 10*time.Millisecond)) eRanks = []uint{2, 3, 1, 3, 3} for index, eRank := range eRanks { Expect((*userRows)[index].Rank).To(Equal(uint(eRank))) } By("First user has accept second Problem by second try and second user has accept first problem by second try") submissions = &[]data.Submission{ createMockSubmission(16, data.RUNTIME_ERROR, (*problems)[1], (*users)[0], contest, t, 17*time.Second), createMockSubmission(17, data.ACCEPTED, (*problems)[1], (*users)[1], contest, t, 19*time.Second), createMockSubmission(18, data.ACCEPTED, (*problems)[1], (*users)[0], contest, t, 22*time.Second), } rank.AddSubmissions(submissions) eRanks = []uint{1, 3, 2, 4, 4} firstUserPenaltyEst += 30*time.Minute + 22*time.Second + 20*time.Minute Expect((*userRows)[0].Penalty).To(BeNumerically("~", firstUserPenaltyEst, 10*time.Millisecond)) Expect((*userRows)[0].ProblemStatuses[secondProblemIndex].WrongCount).To(Equal(uint(1))) for index, eRank := range eRanks { Expect((*userRows)[index].Rank).To(Equal(uint(eRank))) } }) }) }) }) })
package thread import "TechnoParkDBProject/internal/app/thread/models" type Usecase interface { CreateThread(thread *models.Thread) (*models.Thread, error) FindThreadBySlug(slug string) (*models.Thread, error) GetThreadsByForumSlug(forumSlug, since, desc string, limit int) ([]*models.Thread, error) GetThreadBySlugOrID(slugOrID string) (*models.Thread, error) UpdateTreads(slugOrID string, th *models.Thread) (*models.Thread, error) }
package lambda import ( "context" "encoding/json" "fmt" "github.com/aws/aws-lambda-go/lambda" "github.com/epsagon/epsagon-go/epsagon" "github.com/epsagon/epsagon-go/protocol" "github.com/epsagon/epsagon-go/tracer" "github.com/queueup-dev/qup-io/v2/envvar" ) // The following two types are added to introduce naming/type conventions. type InitialWrapper func(handler interface{}) lambda.Handler type LambdaWrapper func(handler lambda.Handler) (lambda.Handler, error) // The the Epsagon wrapped handler function is uncommon, and we wrap it separately. type EpsagonHandler struct { epsagonWrappedHandler func(ctx context.Context, payload json.RawMessage) (interface{}, error) } // Compile time check that the EpsagonHandler struct implements the lambda.Handler interface. var _ lambda.Handler = EpsagonHandler{} func (w EpsagonHandler) Invoke(ctx context.Context, payload []byte) ([]byte, error) { if w.epsagonWrappedHandler == nil { panic(fmt.Errorf("handler function is not set")) } result, err := w.epsagonWrappedHandler(ctx, payload) if err != nil { tracer.AddException(&protocol.Exception{ Type: "wrapper", Message: fmt.Sprintf("Error in wrapper: error in response: %v", err), Time: tracer.GetTimestamp(), }) return nil, err } marshalledResult, err := json.Marshal(result) if err != nil { tracer.AddException(&protocol.Exception{ Type: "wrapper", Message: fmt.Sprintf("Error in wrapper: failed to convert response: %v", err), Time: tracer.GetTimestamp(), }) return nil, err } return marshalledResult, nil } // Start the lambda using the Epsagon wrapper. // The handler has to adhere to one of the handler type signatures // as described in the aws documentation of lambda.Start(). func StartLambda(handler interface{}) func() { return func() { lambda.StartHandler(wrapEpsagon(handler)) } } func wrapHandler(handler interface{}) lambda.Handler { return wrapEpsagon(handler) } func wrapEpsagon(handler interface{}) lambda.Handler { return EpsagonHandler{ epsagonWrappedHandler: epsagon.WrapLambdaHandler( epsagon.NewTracerConfig(envvar.Must("EPSAGON_APP_ID"), envvar.Must("EPSAGON_TOKEN")), handler, ).(func(ctx context.Context, payload json.RawMessage) (interface{}, error)), } }
package main import ( "fmt" "strconv" "strings" "github.com/Jeffail/gabs/v2" "github.com/urfave/cli/v2" ) var cmdGet cli.Command var cmdContains cli.Command type getOptions struct { json *gabs.Container path string delimiter string } func init() { cmdGet = cli.Command{ Name: "get", Usage: "extract an path from a json file", Action: actionGet, Flags: []cli.Flag{ &flagFile, &flagPath, &flagDelimiter, &flagPretty, }, } } func actionGet(c *cli.Context) error { j, err := readInput(c.String("file")) if err != nil { return err } options := getOptions{ json: j, path: c.String("path"), delimiter: getDelimiter(c.String("delimiter")), } j, err = get(options) if err != nil { return err } switch j.Data().(type) { case string: fmt.Printf("%s", j.Data()) default: if pretty { fmt.Println(j.StringIndent("", " ")) } else { fmt.Println(j.String()) } } return nil } // get retrieves a path from a JSON structure. // The path is specified in dotted notation: // {"foo":{"bar":{"baz":"xyz"}}} = foo.bar.baz // {"foo":{"bar":["a","b","c"]}} = foo.bar.2 // {"foo":{"bar":{"baz":"xyz"}}} = foo.bar.baz=xyz // {"foo":{"bar":{"baz":"xyz"}}} = foo.* // {"foo":{"bar":[{"a":"b"},{"c":"d"}]}} = foo.bar.*.a func get(options getOptions) (*gabs.Container, error) { var err error var value string j := options.json pathPieces := strings.Split(options.path, options.delimiter) for i := 0; i < len(pathPieces); i++ { p := pathPieces[i] // Check if a value was specified kv := strings.Split(p, "=") if len(kv) > 1 { p = kv[0] if len(kv) > 2 { value = strings.Join(kv[1:], "=") } else { value = kv[1] } } debug.Printf("Path piece: %+v", p) debug.Printf("Path value: %+v", value) if _, ok := j.Data().([]interface{}); ok { debug.Printf("%+v is an array", j) if p == "*" { debug.Printf("glob used") children := j.Children() for _, c := range children { debug.Printf("Child: %+v", c) newPath := strings.Join(pathPieces[i+1:], ".") debug.Printf("New path: %+v", newPath) newOptions := getOptions{ json: c, path: newPath, delimiter: options.delimiter, } if j, err := get(newOptions); err != nil { continue } else { return j, nil } } } else { j, err = checkArray(j, p) if err != nil { return nil, err } } } else { j = j.Path(p) } // if a value was given, see if the returned value matches // if the returned value is an array, check and see if the value exists in the array if value != "" { j, err = compareValues(j, value) if err != nil { return j, err } } } if j.Data() == nil { return nil, fmt.Errorf("No match found.") } return j, nil } // if the path piece is a number: // check and see if it can be used as an array index // if the current path is not an array, use it as a key func checkArray(j *gabs.Container, pathPiece string) (*gabs.Container, error) { i, err := strconv.Atoi(pathPiece) if err != nil { return nil, fmt.Errorf("Non-numerical index.") } if i < 0 { return nil, fmt.Errorf("Array index out of bounds.") } if i > len(j.Data().([]interface{})) { return nil, fmt.Errorf("Array index out of bounds.") } return j.Index(i), nil } func compareValues(j *gabs.Container, value string) (*gabs.Container, error) { debug.Printf("[compareValues] j = %+v, v = %+v\n", j, value) var err error switch j.Data().(type) { case []interface{}: if value == "[]" { return j, nil } array := j.Data().([]interface{}) for i, _ := range array { _, err = compareValues(j.Index(i), value) if err == nil { return j.Index(i), nil } } case map[string]interface{}: if value == "{}" { return j, nil } case string: if value == j.Data().(string) { return j, nil } default: if valueFloat64, err := strconv.ParseFloat(value, 64); err == nil { if jFloat64, ok := j.Data().(float64); ok { if valueFloat64 == jFloat64 { return j, nil } } } if value == j.String() { return j, nil } } return nil, fmt.Errorf("No match found.") }
package main type ListNode struct { Val int Next *ListNode } func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { dummyHead := &ListNode{-1, nil} cur := dummyHead p1, p2, carry := l1, l2, 0 for p1 != nil || p2 != nil || carry != 0 { if p1 != nil { carry += p1.Val p1 = p1.Next } if p2 != nil { carry += p2.Val p2 = p2.Next } cur.Next = &ListNode{carry % 10, nil} cur = cur.Next carry /= 10 } return dummyHead.Next } /* 题目链接: https://leetcode-cn.com/problems/add-two-numbers/submissions/ 两数相加 */ /* 总结 1. 这题的链表已经是逆序的,所以直接相加就可以了。如果不是逆序的,那么必须先把两个链表 翻转,再执行相加操作,最后再把得出的结果链表翻转,那这就是答案了。 */
package main import ( "log" "net/http" "time" ) type helloHandler struct{} func (_ *helloHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hello world!")) } // func main() { // http.Handle("/", &helloHandler{}) // log.Println("Staring HTTP server ...") // log.Fatal(http.ListenAndServe(":4000", nil)) // } // 服务复用器 // func main() { // log.Println("Stasrting HTTP server ...") // log.Fatal(http.ListenAndServe(":4000", &helloHandler{})) // } // htt.Server // 改写 func main() { mux := http.NewServeMux() mux.Handle("/", &helloHandler{}) mux.Handle("/timeout", func(w http.ResponseWriter, r *http.Request) { time.Sleep(2) w.Write([]byte("test test")) }) server := &http.Server{ Addr: ":4000", Handler: mux, WriteTimeout: 2 * time.Second, } log.Println("Starting HTTP server ...") log.Fatal(server.ListenAndServe()) }
//数组实现队列 package main import ( "errors" "fmt" ) type SliceQueue struct { slice []int front int rear int } //判断队列是否为空 func (p *SliceQueue) IsEmpty() bool{ return p.front == p.rear } //获取队列长度 func (p *SliceQueue) Size() int{ return p.rear - p.front } //获取队列首元素 func (p *SliceQueue) Top() int{ if p.IsEmpty() { panic(errors.New("队列已经为空")) } return p.slice[0] } //新增队列元素 func (p *SliceQueue) Push(data int) { p.slice = append(p.slice, data) p.rear++ } //获取队列元素 func (p *SliceQueue) Pop() int{ if p.IsEmpty() { panic(errors.New("队列已经为空")) } head := p.slice[p.front] p.front++ p.slice = p.slice[1:] return head } func (p *SliceQueue) PrintQueue(queue *SliceQueue) { for _,v := range queue.slice { fmt.Println(v) } } func main() { SliceMode() } func SliceMode() { defer func() { if err := recover(); err != nil { fmt.Println(err) } }() sliceQueue := &SliceQueue{slice:make([]int, 0)} sliceQueue.Push(1) sliceQueue.Push(2) sliceQueue.Push(3) sliceQueue.Push(4) fmt.Println("队列大小为:", sliceQueue.Size(), "队列头部元素为:", sliceQueue.Top()) sliceQueue.PrintQueue(sliceQueue) fmt.Print("执行出队列: ") sliceQueue.Pop() fmt.Println("队列大小为:", sliceQueue.Size(), "队列头部元素为:", sliceQueue.Top()) sliceQueue.PrintQueue(sliceQueue) fmt.Print("执行出队列: ") sliceQueue.Pop() fmt.Println("队列大小为:", sliceQueue.Size(), "队列头部元素为:", sliceQueue.Top()) sliceQueue.PrintQueue(sliceQueue) }
package bier import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "strconv" "strings" ) type Params struct { Token string ResponseURL string Location string Radius int UserName string } type TextBlock struct { Type string `json:"type"` Text string `json:"text"` } type AccessoryBlock struct { Type string `json:"type"` ImageURL string `json:"image_url"` AltText string `json:"alt_text"` } type Block struct { Type string `json:"type"` Text *TextBlock `json:"text,omitempty"` Accessory *AccessoryBlock `json:"accessory,omitempty"` } type SlackMessage struct { ResponseType string `json:"response_type"` Blocks []Block `json:"blocks"` } type Business struct { Name string `json:"name"` ImageURL string `json:"image_url"` URL string `json:"url"` ReviewCount int `json:"review_count"` Price string `json:"price"` Rating float32 Location struct { DisplayAddress []string `json:"display_address"` } `json:"location"` } type YelpResponse struct { Businesses []Business `json:"businesses"` } // Only allow request from this domain const slackOrigin = "hooks.slack.com" // Yelp business search base uri const apiBase = "https://api.yelp.com/v3/businesses/search" // Env vars var slackToken = os.Getenv("SLACK_TOKEN") var apiKey = os.Getenv("API_KEY") func postToSlack(url string, blocks []Block) error { log.Println("Posting message to slack") body := SlackMessage{ ResponseType: "in_channel", Blocks: blocks, } data, err := json.Marshal(body) if err != nil { fmt.Printf("Failed to marshal json: %s", err.Error()) return err } req, _ := http.NewRequest("POST", url, bytes.NewBuffer(data)) req.Header.Add("Content-Type", "application/json") client := http.Client{} _, err = client.Do(req) if err != nil { fmt.Printf("Failed to post to slack: %s", err.Error()) return err } return nil } func buildBusinessBlocks(params *Params, businesses []Business) []Block { log.Println("Building business blocks") blocks := []Block{ Block{ Type: "section", Text: &TextBlock{ "mrkdwn", fmt.Sprintf("*Ok @%s, here are some breweries near %s*", params.UserName, params.Location), }, }, Block{ Type: "divider", }, } for _, b := range businesses { text := fmt.Sprintf( "*%s %s:* %.1f ⭐ (%d reviews)\n%s\n\n%s", b.Name, b.Price, b.Rating, b.ReviewCount, strings.Join(b.Location.DisplayAddress, " "), b.URL, ) blocks = append(blocks, Block{ Type: "section", Text: &TextBlock{"mrkdwn", text}, Accessory: &AccessoryBlock{"image", b.ImageURL, "alt text"}, }, ) } return blocks } func postNotFound(params *Params) error { log.Println("Did not find any breweries") msg := fmt.Sprintf( "*Sorry we couldn't find any breweries in %s. "+ "Try increasing your search radius*", params.Location, ) blocks := []Block{ { Type: "section", Text: &TextBlock{ Type: "mrkdwn", Text: msg, }, }, } return postToSlack(params.ResponseURL, blocks) } func getYelpResults(params *Params) ([]Business, error) { log.Println("Calling yelp api") yelpReq, _ := http.NewRequest("GET", apiBase, bytes.NewBuffer([]byte(""))) yelpReq.Header.Add("Authorization", fmt.Sprintf("Bearer %s", apiKey)) yelpReq.Header.Add("Content-Type", "application/json") q := yelpReq.URL.Query() q.Add("location", params.Location) q.Add("radius", fmt.Sprintf("%d", params.Radius)) q.Add("categories", "breweries") q.Add("limit", "5") q.Add("sort_by", "rating") yelpReq.URL.RawQuery = q.Encode() client := http.Client{} resp, err := client.Do(yelpReq) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } var data YelpResponse if err := json.Unmarshal(body, &data); err != nil { return nil, err } return data.Businesses, nil } func parseParams(params url.Values) (*Params, error) { log.Println("Parsing params") token := params.Get("token") responseURL := params.Get("response_url") userName := params.Get("user_name") text := strings.TrimSpace(params.Get("text")) location := "91205" radiusMi := 5.0 parts := strings.Split(text, ",") if len(parts) > 0 { if len(parts) > 1 { radRaw := strings.TrimSpace(parts[len(parts)-1]) rad, err := strconv.ParseFloat(radRaw, 64) if err == nil { radiusMi = rad } location = strings.TrimSpace(strings.Join(parts[:len(parts)-1], ",")) } else { location = strings.TrimSpace(parts[0]) } } if radiusMi > 24 { return nil, errors.New("Maximum radius is 24 miles") } //convert miles to meters radius := int(radiusMi / 0.00062137) return &Params{token, responseURL, location, radius, userName}, nil } func Bier(w http.ResponseWriter, r *http.Request) { log.Println("Request received") // Set CORS headers for the preflight request if r.Method == http.MethodOptions { w.Header().Set("Access-Control-Allow-Origin", slackOrigin) w.Header().Set("Access-Control-Allow-Methods", "POST") w.Header().Set("Access-Control-Max-Age", "3600") w.WriteHeader(http.StatusNoContent) return } // Set main request headers. w.Header().Set("Access-Control-Allow-Origin", slackOrigin) body, err := ioutil.ReadAll(r.Body) if err != nil { log.Println("Failed to read request body") http.Error(w, "Bad request", http.StatusBadRequest) return } bodyValues, err := url.ParseQuery(fmt.Sprintf("%s", body)) if err != nil { log.Println("Failed to decode body query string") http.Error(w, "Bad request", http.StatusBadRequest) return } params, err := parseParams(bodyValues) if err != nil { w.WriteHeader(http.StatusOK) w.Write([]byte(err.Error())) return } if params.Token != slackToken { log.Println("Unauthorized request") return } // Immediately let slack know we have a valid request w.WriteHeader(http.StatusOK) businesses, err := getYelpResults(params) if err != nil { log.Printf("Error getting brewery data: %s", err.Error()) w.Write([]byte("Internal Server Error")) return } if len(businesses) == 0 { if err := postNotFound(params); err != nil { log.Printf("Failed to send empty list message to slack: %s", err.Error()) w.Write([]byte("Internal Server Error")) } return } blocks := buildBusinessBlocks(params, businesses) if err := postToSlack(params.ResponseURL, blocks); err != nil { log.Printf("Failed to post brewery results to slack: %s", err.Error()) w.Write([]byte("Internal Server Error")) return } }
package minedive import ( "context" crand "crypto/rand" b64 "encoding/base64" "errors" "fmt" "io" "log" "math/rand" "net/http" "strings" "sync" "time" json "encoding/json" "golang.org/x/crypto/nacl/secretbox" "nhooyr.io/websocket" "nhooyr.io/websocket/wsjson" ) type MinediveServer struct { clients []*MinediveClient clientsMutex *sync.Mutex nextID uint64 idMutex *sync.Mutex ServeMux http.ServeMux Dispatch func(*MinediveClient, Cell) exits map[string]*MinediveClient exitsRWMutex *sync.RWMutex guards map[string]*MinediveClient guardsRWMutex *sync.RWMutex } //MinediveClient is the view the has of a connected client type MinediveClient struct { ID string Name string TKID string SecretKey [32]byte //internally used??? PublicKey [32]byte //internally used??? PubK [32]byte Nonce [24]byte RemoteAddr string Ws *websocket.Conn Exit bool Guard bool } //GetAlias return the name a peer is seen behind another peer func (gw *MinediveClient) GetAlias(username string) (string, error) { var alias string var err error enc := secretbox.Seal(gw.Nonce[:], []byte(username), &gw.Nonce, &gw.SecretKey) alias = b64.StdEncoding.EncodeToString(enc) return alias, err } func (s *MinediveServer) InitMinediveServer() { s.clientsMutex = &sync.Mutex{} s.idMutex = &sync.Mutex{} s.exitsRWMutex = &sync.RWMutex{} s.guardsRWMutex = &sync.RWMutex{} s.guards = make(map[string]*MinediveClient) s.exits = make(map[string]*MinediveClient) //s.ServeMux.HandleFunc("/ws", s.minediveAccept) log.Println("MinediveServer initialized") } func (s *MinediveServer) AddExit(cli *MinediveClient) { s.exitsRWMutex.Lock() s.exits[cli.Name] = cli s.exitsRWMutex.Unlock() } func (s *MinediveServer) DelExit(cliName string) { s.exitsRWMutex.Lock() delete(s.exits, cliName) s.exitsRWMutex.Unlock() } //XXX recheck for skipped func (s *MinediveServer) GetExit(avoid map[string]bool) (cli *MinediveClient, err error) { cli = nil s.exitsRWMutex.RLock() skip := rand.Intn(len(s.exits)) i := 0 for cn := range s.exits { if i > skip { _, ok := avoid[cn] if ok == false { cli = s.exits[cn] goto UNLOCK } } i++ } i = 0 for cn := range s.exits { if i <= skip { _, ok := avoid[cn] if ok == false { cli = s.exits[cn] goto UNLOCK } } i++ } UNLOCK: s.exitsRWMutex.RUnlock() if cli == nil { return nil, errors.New("no exits available") } return cli, nil } func (s *MinediveServer) AddGuard(cli *MinediveClient) { s.guardsRWMutex.Lock() s.guards[cli.Name] = cli s.guardsRWMutex.Unlock() } func (s *MinediveServer) DelGuard(cliName string) { s.guardsRWMutex.Lock() delete(s.guards, cliName) s.guardsRWMutex.Unlock() } func (s *MinediveServer) GetExits() []string { s.exitsRWMutex.RLock() o := make([]string, len(s.exits)) i := 0 for k := range s.exits { o[i] = k i++ } s.exitsRWMutex.RUnlock() return o } func (s *MinediveServer) GetGuards() []string { s.guardsRWMutex.RLock() o := make([]string, len(s.guards)) i := 0 for k := range s.guards { o[i] = k i++ } s.guardsRWMutex.RUnlock() return o } func (s *MinediveServer) GetGuard(avoid map[string]bool) (cli *MinediveClient, err error) { cli = nil s.guardsRWMutex.RLock() skip := rand.Intn(len(s.guards)) i := 0 for cn := range s.guards { if i > skip { _, ok := avoid[cn] if ok == false { cli = s.guards[cn] goto UNLOCK } } i++ } i = 0 for cn := range s.guards { if i <= skip { _, ok := avoid[cn] if ok == false { cli = s.guards[cn] goto UNLOCK } } i++ } UNLOCK: s.guardsRWMutex.RUnlock() if cli == nil { return nil, errors.New("no guards available") } return cli, nil } func (s *MinediveServer) MinediveAccept(w http.ResponseWriter, r *http.Request) { var remoteAddr string host := r.Header.Get("X-Real-IP") if host != "" { port := strings.Split(r.RemoteAddr, ":") if len(port) == 2 { remoteAddr = fmt.Sprintf("%s:%s", host, port[1]) } else { remoteAddr = fmt.Sprintf("%s(%s)", host, r.RemoteAddr) } } else { remoteAddr = r.RemoteAddr } log.Printf("minediveAccept invoked from %s\n", remoteAddr) opts := websocket.AcceptOptions{} opts.InsecureSkipVerify = true opts.Subprotocols = append(opts.Subprotocols, "json") //opts.OriginPatters //log.Println("subproto", opts.Subprotocols) ws, err := websocket.Accept(w, r, &opts) if err != nil { log.Println(err) return } defer ws.Close(websocket.StatusGoingAway, "") var cli MinediveClient s.idMutex.Lock() cli.ID = fmt.Sprintf("%d", s.nextID) s.nextID++ s.idMutex.Unlock() cli.Ws = ws cli.RemoteAddr = remoteAddr if _, err := io.ReadFull(crand.Reader, cli.SecretKey[:]); err != nil { log.Println(err) websocket.CloseStatus(err) } s.clientsMutex.Lock() s.clients = append(s.clients, &cli) s.clientsMutex.Unlock() for { var jmsg Cell ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) err := wsjson.Read(ctx, ws, &jmsg) if err != nil { status := websocket.CloseStatus(err) //log.Println("err", err, "status", status) if status == -1 { //log.Println("READ ERROR", err) } s.DeleteClientByName(cli.Name) cli.Ws.Close(websocket.StatusAbnormalClosure, "") //not really needed but... log.Printf("%s %s disconnected (%d)\n", cli.ID, cli.Name, status) cancel() return } s.Dispatch(&cli, jmsg) cancel() } } func (s *MinediveServer) DeleteClientByName(name string) error { var c *MinediveClient s.clientsMutex.Lock() len := len(s.clients) for n := range s.clients { c = s.clients[n] if c.Name == name { s.clients[n] = s.clients[len-1] s.clients[len-1] = nil s.clients = s.clients[:len-1] s.clientsMutex.Unlock() if c.Exit { s.DelExit(c.Name) } if c.Guard { s.DelGuard(c.Name) } return nil } } s.clientsMutex.Unlock() return errors.New("Client not found") } func (s *MinediveServer) GetClientByName(name string) (*MinediveClient, error) { var c *MinediveClient s.clientsMutex.Lock() for n := range s.clients { c = s.clients[n] if c.Name == name { s.clientsMutex.Unlock() return c, nil } } s.clientsMutex.Unlock() return nil, errors.New("Client not found") } func (s *MinediveServer) dumpClients() { s.clientsMutex.Lock() if len(s.clients) == 0 { log.Println("dump clients: empty") } for n := range s.clients { log.Println("dump clients", n, s.clients[n].Name) } s.clientsMutex.Unlock() } func (s *MinediveServer) GetOtherPeer(cli *MinediveClient) (*MinediveClient, error) { s.clientsMutex.Lock() if len(s.clients) > 1 { i := rand.Intn(len(s.clients)) c := s.clients[i] s.clientsMutex.Unlock() if c == cli { return cli, errors.New("getOtherPeer: same peer") } return c, nil } s.clientsMutex.Unlock() return cli, errors.New("getOtherPeer: no peers") } func jb64(j interface{}) (str string, err error) { t, err := json.Marshal(j) if err != nil { return "", err } return b64.StdEncoding.EncodeToString(t), nil } func (s *MinediveServer) SendPeer(cli *MinediveClient) { var c2 *MinediveClient var m1, m2 Cell var p1, p2 Cell var err error c2, err = s.GetOtherPeer(cli) if err != nil { m1.Type = "userlist" wsjson.Write(context.Background(), cli.Ws, m1) return } log.Println("other peer found", c2.Name) p1.Type = "user" p1.D0 = cli.Name p1.D1, err = c2.GetAlias(cli.Name) if cli.Exit { p1.D2 = "e" } if err != nil { log.Println(err) } p2.D0 = c2.Name p2.D1, err = cli.GetAlias(c2.Name) if c2.Exit { p2.D2 = "e" } if err != nil { log.Println(err) } m1.Type = "userlist" log.Println(p2) m1.D1, err = jb64(p2) if err != nil { log.Println(err) return } log.Println(m1.D1) log.Println(m1) m1.D0 = "0" wsjson.Write(context.Background(), cli.Ws, m1) log.Println("sent", p2.D0, "to", cli.Name) m2.Type = "userlist" m2.D1, err = jb64(p1) if err != nil { log.Println(err) return } m2.D0 = "1" wsjson.Write(context.Background(), c2.Ws, m2) log.Println("sent", p1.D0, "to", c2.Name) } func (s *MinediveServer) DecryptAlias(alias string, gwName string) (string, error) { var encrypted, decrypted []byte var decryptNonce [24]byte gw, err := s.GetClientByName(gwName) if err != nil { log.Println(err) return "", err } encrypted, err = b64.StdEncoding.DecodeString(alias) copy(decryptNonce[:], encrypted[:24]) decrypted, ok := secretbox.Open(nil, encrypted[24:], &decryptNonce, &gw.SecretKey) if ok != true { return "", errors.New("decryption failed") } a, err := s.GetClientByName(string(decrypted)) if err != nil { log.Println(err) return "nil", err } return b64.StdEncoding.EncodeToString(a.PublicKey[:]), nil } func (s *MinediveServer) FwdToTarget(m *Cell) { s.clientsMutex.Lock() var c *MinediveClient for n := range s.clients { c = s.clients[n] if c.Name == m.D1 { wsjson.Write(context.Background(), c.Ws, m) } } s.clientsMutex.Unlock() } func (s *MinediveServer) SendKey(c *MinediveClient, req *Cell) { var m Cell if c.Name == req.D1 { log.Println("Client is his own GW") return } aliasKey, err := s.DecryptAlias(req.D0, req.D1) if err != nil { log.Println(err) return } m.Type = "key" m.D0 = req.D0 m.D1 = aliasKey log.Println("Sending Message: ", m) wsjson.Write(context.Background(), c.Ws, m) }
package notice import ( "io/ioutil" "net/http" "net/url" ) type EmailNoticer struct { config *EmailConfig } func (this *EmailNoticer) SendEmail(recipient, subject, content string) (string, error) { payload := this.newPayload(recipient, subject, content) resp, err := http.PostForm(this.config.Addr, payload) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } return string(body), nil } func (this *EmailNoticer) newPayload(recipient, subject, content string) url.Values { v := url.Values{} v.Add("task", this.config.User) v.Add("key", this.config.Password) v.Add("email_destinations", recipient) v.Add("email_subject", subject) v.Add("email_content", content) return v } func NewEmailNoticer(config *EmailConfig) *EmailNoticer { en := EmailNoticer{} if config == nil { en.config = NewEmailConfig() } else { en.config = config } return &en }
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" iampb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/iam/iam_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam" ) // ServiceAccountServer implements the gRPC interface for ServiceAccount. type ServiceAccountServer struct{} // ProtoToServiceAccountActasResources converts a ServiceAccountActasResources object from its proto representation. func ProtoToIamServiceAccountActasResources(p *iampb.IamServiceAccountActasResources) *iam.ServiceAccountActasResources { if p == nil { return nil } obj := &iam.ServiceAccountActasResources{} for _, r := range p.GetResources() { obj.Resources = append(obj.Resources, *ProtoToIamServiceAccountActasResourcesResources(r)) } return obj } // ProtoToServiceAccountActasResourcesResources converts a ServiceAccountActasResourcesResources object from its proto representation. func ProtoToIamServiceAccountActasResourcesResources(p *iampb.IamServiceAccountActasResourcesResources) *iam.ServiceAccountActasResourcesResources { if p == nil { return nil } obj := &iam.ServiceAccountActasResourcesResources{ FullResourceName: dcl.StringOrNil(p.GetFullResourceName()), } return obj } // ProtoToServiceAccount converts a ServiceAccount resource from its proto representation. func ProtoToServiceAccount(p *iampb.IamServiceAccount) *iam.ServiceAccount { obj := &iam.ServiceAccount{ Name: dcl.StringOrNil(p.GetName()), Project: dcl.StringOrNil(p.GetProject()), UniqueId: dcl.StringOrNil(p.GetUniqueId()), Email: dcl.StringOrNil(p.GetEmail()), DisplayName: dcl.StringOrNil(p.GetDisplayName()), Description: dcl.StringOrNil(p.GetDescription()), OAuth2ClientId: dcl.StringOrNil(p.GetOauth2ClientId()), ActasResources: ProtoToIamServiceAccountActasResources(p.GetActasResources()), Disabled: dcl.Bool(p.GetDisabled()), } return obj } // ServiceAccountActasResourcesToProto converts a ServiceAccountActasResources object to its proto representation. func IamServiceAccountActasResourcesToProto(o *iam.ServiceAccountActasResources) *iampb.IamServiceAccountActasResources { if o == nil { return nil } p := &iampb.IamServiceAccountActasResources{} sResources := make([]*iampb.IamServiceAccountActasResourcesResources, len(o.Resources)) for i, r := range o.Resources { sResources[i] = IamServiceAccountActasResourcesResourcesToProto(&r) } p.SetResources(sResources) return p } // ServiceAccountActasResourcesResourcesToProto converts a ServiceAccountActasResourcesResources object to its proto representation. func IamServiceAccountActasResourcesResourcesToProto(o *iam.ServiceAccountActasResourcesResources) *iampb.IamServiceAccountActasResourcesResources { if o == nil { return nil } p := &iampb.IamServiceAccountActasResourcesResources{} p.SetFullResourceName(dcl.ValueOrEmptyString(o.FullResourceName)) return p } // ServiceAccountToProto converts a ServiceAccount resource to its proto representation. func ServiceAccountToProto(resource *iam.ServiceAccount) *iampb.IamServiceAccount { p := &iampb.IamServiceAccount{} p.SetName(dcl.ValueOrEmptyString(resource.Name)) p.SetProject(dcl.ValueOrEmptyString(resource.Project)) p.SetUniqueId(dcl.ValueOrEmptyString(resource.UniqueId)) p.SetEmail(dcl.ValueOrEmptyString(resource.Email)) p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName)) p.SetDescription(dcl.ValueOrEmptyString(resource.Description)) p.SetOauth2ClientId(dcl.ValueOrEmptyString(resource.OAuth2ClientId)) p.SetActasResources(IamServiceAccountActasResourcesToProto(resource.ActasResources)) p.SetDisabled(dcl.ValueOrEmptyBool(resource.Disabled)) return p } // applyServiceAccount handles the gRPC request by passing it to the underlying ServiceAccount Apply() method. func (s *ServiceAccountServer) applyServiceAccount(ctx context.Context, c *iam.Client, request *iampb.ApplyIamServiceAccountRequest) (*iampb.IamServiceAccount, error) { p := ProtoToServiceAccount(request.GetResource()) res, err := c.ApplyServiceAccount(ctx, p) if err != nil { return nil, err } r := ServiceAccountToProto(res) return r, nil } // applyIamServiceAccount handles the gRPC request by passing it to the underlying ServiceAccount Apply() method. func (s *ServiceAccountServer) ApplyIamServiceAccount(ctx context.Context, request *iampb.ApplyIamServiceAccountRequest) (*iampb.IamServiceAccount, error) { cl, err := createConfigServiceAccount(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return s.applyServiceAccount(ctx, cl, request) } // DeleteServiceAccount handles the gRPC request by passing it to the underlying ServiceAccount Delete() method. func (s *ServiceAccountServer) DeleteIamServiceAccount(ctx context.Context, request *iampb.DeleteIamServiceAccountRequest) (*emptypb.Empty, error) { cl, err := createConfigServiceAccount(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteServiceAccount(ctx, ProtoToServiceAccount(request.GetResource())) } // ListIamServiceAccount handles the gRPC request by passing it to the underlying ServiceAccountList() method. func (s *ServiceAccountServer) ListIamServiceAccount(ctx context.Context, request *iampb.ListIamServiceAccountRequest) (*iampb.ListIamServiceAccountResponse, error) { cl, err := createConfigServiceAccount(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } resources, err := cl.ListServiceAccount(ctx, request.GetProject()) if err != nil { return nil, err } var protos []*iampb.IamServiceAccount for _, r := range resources.Items { rp := ServiceAccountToProto(r) protos = append(protos, rp) } p := &iampb.ListIamServiceAccountResponse{} p.SetItems(protos) return p, nil } func createConfigServiceAccount(ctx context.Context, service_account_file string) (*iam.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return iam.NewClient(conf), nil }
package types type header struct { signature [4]byte version byte format byte luacData [6]byte cintSize byte sizetSize byte instructionSize byte luaIntegerSize byte luaNumberSize byte luacInt int64 luacNum float64 } type BinaryChunk struct { header sizeUpvalues byte mainFunc *Prototype }
package auth type ServicePrincipal struct { ApplicationId string Password string Tenant string DisplayName string // no usage as of yet Name string // no usage as of yet }
package sheet_logic import ( "hub/sheet_logic/sheet_logic_types" ) type IntMultiplication struct { GrammarElement BinaryOperationInt } func (i *IntMultiplication) CalculateInt(g GrammarContext) (result int64, err error) { leftVal, errL := i.GetLeftArg().CalculateInt(g) rightVal, errR := i.GetRightArg().CalculateInt(g) if err = getFirstError(errL, errR); err == nil { result = leftVal * rightVal } return } func NewIntMultiplication(name string) *IntMultiplication { return &IntMultiplication{ &grammarElementImpl{name, sheet_logic_types.IntMultiplication}, DefaultBinaryOperationIntImpl()} } type FloatMultiplication struct { GrammarElement BinaryOperationFloat } func (f *FloatMultiplication) CalculateFloat(g GrammarContext) (result float64, err error) { leftVal, errL := f.GetLeftArg().CalculateFloat(g) rightVal, errR := f.GetRightArg().CalculateFloat(g) if err = getFirstError(errL, errR); err == nil { result = leftVal * rightVal } return } func NewFloatMultiplication(name string) *FloatMultiplication { return &FloatMultiplication{ &grammarElementImpl{name, sheet_logic_types.FloatMultiplication}, DefaultBinaryOperationFloatImpl()} }
package main import ( "fmt" "net/http" "sync" ) func main() { stats := map[string]int{} // topic->count totalDone := 0 valMutex := sync.Mutex{} http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.URL.Query().Get("clear") != "" { valMutex.Lock() stats = map[string]int{} totalDone = 0 valMutex.Unlock() return } if topic := r.URL.Query().Get("done"); topic != "" { // done=TOPIC val := 0 valMutex.Lock() if t, ok := stats[topic]; ok { val = t } stats[topic] = val + 1 totalDone++ valMutex.Unlock() return } valMutex.Lock() response := fmt.Sprintf("Total done: %d\n", totalDone) for topic, val := range stats { response += fmt.Sprintf(" %s : %d\n", topic, val) } valMutex.Unlock() w.Write([]byte(response)) }) fmt.Print("Listening on port 8080\n") http.ListenAndServe(":8080", nil) }
package cls import ( "testing" "time" ) func TestClSCleint_UploadLog(t *testing.T) { k1, v1, k2, v2 := "key1", "value1", "key2", "value2" t1 := time.Now().Unix() type fields struct { SecretId string SecretKey string Host string } type args struct { logTopicID string logGroupList LogGroupList hash string compress bool } tests := []struct { name string fields fields args args wantErr bool }{ { name: "test01", fields: fields{ SecretId: "secret-id01", SecretKey: "secret-key01", Host: "127.0.0.1:8080", }, args: args{ logTopicID: "topic01", logGroupList: LogGroupList{ LogGroupList: []*LogGroup{ &LogGroup{ Logs: []*Log{ &Log{ Time: &t1, Contents: []*Log_Content{ &Log_Content{ Key: &k1, Value: &v1, }, &Log_Content{ Key: &k2, Value: &v2, }, }, }, }, }, }, }, hash: "", compress: false, }, wantErr: false, }, { name: "test02", fields: fields{ SecretId: "secret-id01", SecretKey: "secret-key01", Host: "127.0.0.1:8080", }, args: args{ logTopicID: "topic02", logGroupList: LogGroupList{ LogGroupList: []*LogGroup{ &LogGroup{ Logs: []*Log{ &Log{ Time: &t1, Contents: []*Log_Content{ &Log_Content{ Key: &k1, Value: &v1, }, &Log_Content{ Key: &k2, Value: &v2, }, }, }, }, }, }, }, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cls := &ClSCleint{ SecretId: tt.fields.SecretId, SecretKey: tt.fields.SecretKey, Host: tt.fields.Host, } if err := cls.UploadLog(tt.args.logTopicID, tt.args.logGroupList, tt.args.hash, tt.args.compress); (err != nil) != tt.wantErr { t.Errorf("ClSCleint.UploadLog() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestClSCleint_GetLogStart(t *testing.T) { type fields struct { SecretId string SecretKey string Host string } type args struct { logTopicID string start string } tests := []struct { name string fields fields args args wantCursor string wantErr bool }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cls := &ClSCleint{ SecretId: tt.fields.SecretId, SecretKey: tt.fields.SecretKey, Host: tt.fields.Host, } gotCursor, err := cls.GetLogStart(tt.args.logTopicID, tt.args.start) if (err != nil) != tt.wantErr { t.Errorf("ClSCleint.GetLogStart() error = %v, wantErr %v", err, tt.wantErr) return } if gotCursor != tt.wantCursor { t.Errorf("ClSCleint.GetLogStart() = %v, want %v", gotCursor, tt.wantCursor) } }) } } func TestClSCleint_SearchLog(t *testing.T) { type fields struct { SecretId string SecretKey string Host string } type args struct { requestDataMap map[string]string } tests := []struct { name string fields fields args args want string wantErr bool }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cls := &ClSCleint{ SecretId: tt.fields.SecretId, SecretKey: tt.fields.SecretKey, Host: tt.fields.Host, } got, err := cls.SearchLog(tt.args.requestDataMap) if (err != nil) != tt.wantErr { t.Errorf("ClSCleint.SearchLog() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { t.Errorf("ClSCleint.SearchLog() = %v, want %v", got, tt.want) } }) } } func TestClSCleint_DowloadLog(t *testing.T) { type fields struct { SecretId string SecretKey string Host string } type args struct { logTopicID string cursor string count string } tests := []struct { name string fields fields args args wantErr bool }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cls := &ClSCleint{ SecretId: tt.fields.SecretId, SecretKey: tt.fields.SecretKey, Host: tt.fields.Host, } if err := cls.DowloadLog(tt.args.logTopicID, tt.args.cursor, tt.args.count); (err != nil) != tt.wantErr { t.Errorf("ClSCleint.DowloadLog() error = %v, wantErr %v", err, tt.wantErr) } }) } }
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "bytes" "cmp" "context" "fmt" "math" "slices" "strconv" "strings" "sync" "sync/atomic" "time" "unsafe" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/diagnosticspb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/executor/internal/builder" "github.com/pingcap/tidb/executor/internal/calibrateresource" "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/executor/internal/pdhelper" "github.com/pingcap/tidb/executor/internal/querywatch" "github.com/pingcap/tidb/executor/internal/vecgroupchecker" "github.com/pingcap/tidb/executor/lockstats" executor_metrics "github.com/pingcap/tidb/executor/metrics" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" plannerutil "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/cteutil" "github.com/pingcap/tidb/util/dbterror/exeerrors" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/rowcodec" "github.com/pingcap/tidb/util/tiflash" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" clientkv "github.com/tikv/client-go/v2/kv" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/txnkv" "github.com/tikv/client-go/v2/txnkv/txnsnapshot" clientutil "github.com/tikv/client-go/v2/util" ) // executorBuilder builds an Executor from a Plan. // The InfoSchema must not change during execution. type executorBuilder struct { ctx sessionctx.Context is infoschema.InfoSchema err error // err is set when there is error happened during Executor building process. hasLock bool Ti *TelemetryInfo // isStaleness means whether this statement use stale read. isStaleness bool txnScope string readReplicaScope string inUpdateStmt bool inDeleteStmt bool inInsertStmt bool inSelectLockStmt bool // forDataReaderBuilder indicates whether the builder is used by a dataReaderBuilder. // When forDataReader is true, the builder should use the dataReaderTS as the executor read ts. This is because // dataReaderBuilder can be used in concurrent goroutines, so we must ensure that getting the ts should be thread safe and // can return a correct value even if the session context has already been destroyed forDataReaderBuilder bool dataReaderTS uint64 // Used when building MPPGather. encounterUnionScan bool } // CTEStorages stores resTbl and iterInTbl for CTEExec. // There will be a map[CTEStorageID]*CTEStorages in StmtCtx, // which will store all CTEStorages to make all shared CTEs use same the CTEStorages. type CTEStorages struct { ResTbl cteutil.Storage IterInTbl cteutil.Storage Producer *cteProducer } func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo) *executorBuilder { txnManager := sessiontxn.GetTxnManager(ctx) return &executorBuilder{ ctx: ctx, is: is, Ti: ti, isStaleness: staleread.IsStmtStaleness(ctx), txnScope: txnManager.GetTxnScope(), readReplicaScope: txnManager.GetReadReplicaScope(), } } // MockPhysicalPlan is used to return a specified executor in when build. // It is mainly used for testing. type MockPhysicalPlan interface { plannercore.PhysicalPlan GetExecutor() exec.Executor } // MockExecutorBuilder is a wrapper for executorBuilder. // ONLY used in test. type MockExecutorBuilder struct { *executorBuilder } // NewMockExecutorBuilderForTest is ONLY used in test. func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo) *MockExecutorBuilder { return &MockExecutorBuilder{ executorBuilder: newExecutorBuilder(ctx, is, ti)} } // Build builds an executor tree according to `p`. func (b *MockExecutorBuilder) Build(p plannercore.Plan) exec.Executor { return b.build(p) } func (b *executorBuilder) build(p plannercore.Plan) exec.Executor { switch v := p.(type) { case nil: return nil case *plannercore.Change: return b.buildChange(v) case *plannercore.CheckTable: return b.buildCheckTable(v) case *plannercore.RecoverIndex: return b.buildRecoverIndex(v) case *plannercore.CleanupIndex: return b.buildCleanupIndex(v) case *plannercore.CheckIndexRange: return b.buildCheckIndexRange(v) case *plannercore.ChecksumTable: return b.buildChecksumTable(v) case *plannercore.ReloadExprPushdownBlacklist: return b.buildReloadExprPushdownBlacklist(v) case *plannercore.ReloadOptRuleBlacklist: return b.buildReloadOptRuleBlacklist(v) case *plannercore.AdminPlugins: return b.buildAdminPlugins(v) case *plannercore.DDL: return b.buildDDL(v) case *plannercore.Deallocate: return b.buildDeallocate(v) case *plannercore.Delete: return b.buildDelete(v) case *plannercore.Execute: return b.buildExecute(v) case *plannercore.Trace: return b.buildTrace(v) case *plannercore.Explain: return b.buildExplain(v) case *plannercore.PointGetPlan: return b.buildPointGet(v) case *plannercore.BatchPointGetPlan: return b.buildBatchPointGet(v) case *plannercore.Insert: return b.buildInsert(v) case *plannercore.ImportInto: return b.buildImportInto(v) case *plannercore.LoadData: return b.buildLoadData(v) case *plannercore.LoadStats: return b.buildLoadStats(v) case *plannercore.LockStats: return b.buildLockStats(v) case *plannercore.UnlockStats: return b.buildUnlockStats(v) case *plannercore.IndexAdvise: return b.buildIndexAdvise(v) case *plannercore.PlanReplayer: return b.buildPlanReplayer(v) case *plannercore.PhysicalLimit: return b.buildLimit(v) case *plannercore.Prepare: return b.buildPrepare(v) case *plannercore.PhysicalLock: return b.buildSelectLock(v) case *plannercore.CancelDDLJobs: return b.buildCancelDDLJobs(v) case *plannercore.PauseDDLJobs: return b.buildPauseDDLJobs(v) case *plannercore.ResumeDDLJobs: return b.buildResumeDDLJobs(v) case *plannercore.ShowNextRowID: return b.buildShowNextRowID(v) case *plannercore.ShowDDL: return b.buildShowDDL(v) case *plannercore.PhysicalShowDDLJobs: return b.buildShowDDLJobs(v) case *plannercore.ShowDDLJobQueries: return b.buildShowDDLJobQueries(v) case *plannercore.ShowDDLJobQueriesWithRange: return b.buildShowDDLJobQueriesWithRange(v) case *plannercore.ShowSlow: return b.buildShowSlow(v) case *plannercore.PhysicalShow: return b.buildShow(v) case *plannercore.Simple: return b.buildSimple(v) case *plannercore.PhysicalSimpleWrapper: return b.buildSimple(&v.Inner) case *plannercore.Set: return b.buildSet(v) case *plannercore.SetConfig: return b.buildSetConfig(v) case *plannercore.PhysicalSort: return b.buildSort(v) case *plannercore.PhysicalTopN: return b.buildTopN(v) case *plannercore.PhysicalUnionAll: return b.buildUnionAll(v) case *plannercore.Update: return b.buildUpdate(v) case *plannercore.PhysicalUnionScan: return b.buildUnionScanExec(v) case *plannercore.PhysicalHashJoin: return b.buildHashJoin(v) case *plannercore.PhysicalMergeJoin: return b.buildMergeJoin(v) case *plannercore.PhysicalIndexJoin: return b.buildIndexLookUpJoin(v) case *plannercore.PhysicalIndexMergeJoin: return b.buildIndexLookUpMergeJoin(v) case *plannercore.PhysicalIndexHashJoin: return b.buildIndexNestedLoopHashJoin(v) case *plannercore.PhysicalSelection: return b.buildSelection(v) case *plannercore.PhysicalHashAgg: return b.buildHashAgg(v) case *plannercore.PhysicalStreamAgg: return b.buildStreamAgg(v) case *plannercore.PhysicalProjection: return b.buildProjection(v) case *plannercore.PhysicalMemTable: return b.buildMemTable(v) case *plannercore.PhysicalTableDual: return b.buildTableDual(v) case *plannercore.PhysicalApply: return b.buildApply(v) case *plannercore.PhysicalMaxOneRow: return b.buildMaxOneRow(v) case *plannercore.Analyze: return b.buildAnalyze(v) case *plannercore.PhysicalTableReader: return b.buildTableReader(v) case *plannercore.PhysicalTableSample: return b.buildTableSample(v) case *plannercore.PhysicalIndexReader: return b.buildIndexReader(v) case *plannercore.PhysicalIndexLookUpReader: return b.buildIndexLookUpReader(v) case *plannercore.PhysicalWindow: return b.buildWindow(v) case *plannercore.PhysicalShuffle: return b.buildShuffle(v) case *plannercore.PhysicalShuffleReceiverStub: return b.buildShuffleReceiverStub(v) case *plannercore.SQLBindPlan: return b.buildSQLBindExec(v) case *plannercore.SplitRegion: return b.buildSplitRegion(v) case *plannercore.PhysicalIndexMergeReader: return b.buildIndexMergeReader(v) case *plannercore.SelectInto: return b.buildSelectInto(v) case *plannercore.AdminShowTelemetry: return b.buildAdminShowTelemetry(v) case *plannercore.AdminResetTelemetryID: return b.buildAdminResetTelemetryID(v) case *plannercore.PhysicalCTE: return b.buildCTE(v) case *plannercore.PhysicalCTETable: return b.buildCTETableReader(v) case *plannercore.CompactTable: return b.buildCompactTable(v) default: if mp, ok := p.(MockPhysicalPlan); ok { return mp.GetExecutor() } b.err = exeerrors.ErrUnknownPlan.GenWithStack("Unknown Plan %T", p) return nil } } func (b *executorBuilder) buildCancelDDLJobs(v *plannercore.CancelDDLJobs) exec.Executor { e := &CancelDDLJobsExec{ CommandDDLJobsExec: &CommandDDLJobsExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, execute: ddl.CancelJobs, }, } return e } func (b *executorBuilder) buildPauseDDLJobs(v *plannercore.PauseDDLJobs) exec.Executor { e := &PauseDDLJobsExec{ CommandDDLJobsExec: &CommandDDLJobsExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, execute: ddl.PauseJobs, }, } return e } func (b *executorBuilder) buildResumeDDLJobs(v *plannercore.ResumeDDLJobs) exec.Executor { e := &ResumeDDLJobsExec{ CommandDDLJobsExec: &CommandDDLJobsExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, execute: ddl.ResumeJobs, }, } return e } func (b *executorBuilder) buildChange(v *plannercore.Change) exec.Executor { return &ChangeExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), ChangeStmt: v.ChangeStmt, } } func (b *executorBuilder) buildShowNextRowID(v *plannercore.ShowNextRowID) exec.Executor { e := &ShowNextRowIDExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tblName: v.TableName, } return e } func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) exec.Executor { // We get Info here because for Executors that returns result set, // next will be called after transaction has been committed. // We need the transaction to get Info. e := &ShowDDLExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), } var err error ownerManager := domain.GetDomain(e.Ctx()).DDL().OwnerManager() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) e.ddlOwnerID, err = ownerManager.GetOwnerID(ctx) cancel() if err != nil { b.err = err return nil } session, err := e.GetSysSession() if err != nil { b.err = err return nil } ddlInfo, err := ddl.GetDDLInfoWithNewTxn(session) e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) if err != nil { b.err = err return nil } e.ddlInfo = ddlInfo e.selfID = ownerManager.ID() return e } func (b *executorBuilder) buildShowDDLJobs(v *plannercore.PhysicalShowDDLJobs) exec.Executor { loc := b.ctx.GetSessionVars().Location() ddlJobRetriever := DDLJobRetriever{TZLoc: loc} e := &ShowDDLJobsExec{ jobNumber: int(v.JobNumber), is: b.is, BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), DDLJobRetriever: ddlJobRetriever, } return e } func (b *executorBuilder) buildShowDDLJobQueries(v *plannercore.ShowDDLJobQueries) exec.Executor { e := &ShowDDLJobQueriesExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, } return e } func (b *executorBuilder) buildShowDDLJobQueriesWithRange(v *plannercore.ShowDDLJobQueriesWithRange) exec.Executor { e := &ShowDDLJobQueriesWithRangeExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), offset: v.Offset, limit: v.Limit, } return e } func (b *executorBuilder) buildShowSlow(v *plannercore.ShowSlow) exec.Executor { e := &ShowSlowExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), ShowSlow: v.ShowSlow, } return e } // buildIndexLookUpChecker builds check information to IndexLookUpReader. func buildIndexLookUpChecker(b *executorBuilder, p *plannercore.PhysicalIndexLookUpReader, e *IndexLookUpExecutor) { is := p.IndexPlans[0].(*plannercore.PhysicalIndexScan) fullColLen := len(is.Index.Columns) + len(p.CommonHandleCols) if !e.isCommonHandle() { fullColLen++ } e.dagPB.OutputOffsets = make([]uint32, fullColLen) for i := 0; i < fullColLen; i++ { e.dagPB.OutputOffsets[i] = uint32(i) } ts := p.TablePlans[0].(*plannercore.PhysicalTableScan) e.handleIdx = ts.HandleIdx e.ranges = ranger.FullRange() tps := make([]*types.FieldType, 0, fullColLen) for _, col := range is.Columns { // tps is used to decode the index, we should use the element type of the array if any. tps = append(tps, col.FieldType.ArrayType()) } if !e.isCommonHandle() { tps = append(tps, types.NewFieldType(mysql.TypeLonglong)) } e.checkIndexValue = &checkIndexValue{idxColTps: tps} colNames := make([]string, 0, len(is.IdxCols)) for i := range is.IdxCols { colNames = append(colNames, is.Columns[i].Name.L) } if cols, missingColOffset := table.FindColumns(e.table.Cols(), colNames, true); missingColOffset >= 0 { b.err = plannercore.ErrUnknownColumn.GenWithStack("Unknown column %s", is.Columns[missingColOffset].Name.O) } else { e.idxTblCols = cols } } func (b *executorBuilder) buildCheckTable(v *plannercore.CheckTable) exec.Executor { noMVIndexOrPrefixIndex := true for _, idx := range v.IndexInfos { if idx.MVIndex { noMVIndexOrPrefixIndex = false break } for _, col := range idx.Columns { if col.Length != types.UnspecifiedLength { noMVIndexOrPrefixIndex = false break } } if !noMVIndexOrPrefixIndex { break } } if b.ctx.GetSessionVars().FastCheckTable && noMVIndexOrPrefixIndex { e := &FastCheckTableExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dbName: v.DBName, table: v.Table, indexInfos: v.IndexInfos, is: b.is, err: &atomic.Pointer[error]{}, } return e } readerExecs := make([]*IndexLookUpExecutor, 0, len(v.IndexLookUpReaders)) for _, readerPlan := range v.IndexLookUpReaders { readerExec, err := buildNoRangeIndexLookUpReader(b, readerPlan) if err != nil { b.err = errors.Trace(err) return nil } buildIndexLookUpChecker(b, readerPlan, readerExec) readerExecs = append(readerExecs, readerExec) } e := &CheckTableExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dbName: v.DBName, table: v.Table, indexInfos: v.IndexInfos, is: b.is, srcs: readerExecs, exitCh: make(chan struct{}), retCh: make(chan error, len(readerExecs)), checkIndex: v.CheckIndex, } return e } func buildIdxColsConcatHandleCols(tblInfo *model.TableInfo, indexInfo *model.IndexInfo, hasGenedCol bool) []*model.ColumnInfo { var pkCols []*model.IndexColumn if tblInfo.IsCommonHandle { pkIdx := tables.FindPrimaryIndex(tblInfo) pkCols = pkIdx.Columns } columns := make([]*model.ColumnInfo, 0, len(indexInfo.Columns)+len(pkCols)) if hasGenedCol { columns = tblInfo.Columns } else { for _, idxCol := range indexInfo.Columns { if tblInfo.PKIsHandle && tblInfo.GetPkColInfo().Offset == idxCol.Offset { continue } columns = append(columns, tblInfo.Columns[idxCol.Offset]) } } if tblInfo.IsCommonHandle { for _, c := range pkCols { columns = append(columns, tblInfo.Columns[c.Offset]) } return columns } if tblInfo.PKIsHandle { columns = append(columns, tblInfo.Columns[tblInfo.GetPkColInfo().Offset]) return columns } handleOffset := len(columns) handleColsInfo := &model.ColumnInfo{ ID: model.ExtraHandleID, Name: model.ExtraHandleName, Offset: handleOffset, } handleColsInfo.FieldType = *types.NewFieldType(mysql.TypeLonglong) columns = append(columns, handleColsInfo) return columns } func (b *executorBuilder) buildRecoverIndex(v *plannercore.RecoverIndex) exec.Executor { tblInfo := v.Table.TableInfo t, err := b.is.TableByName(v.Table.Schema, tblInfo.Name) if err != nil { b.err = err return nil } idxName := strings.ToLower(v.IndexName) index := tables.GetWritableIndexByName(idxName, t) if index == nil { b.err = errors.Errorf("secondary index `%v` is not found in table `%v`", v.IndexName, v.Table.Name.O) return nil } var hasGenedCol bool for _, iCol := range index.Meta().Columns { if tblInfo.Columns[iCol.Offset].IsGenerated() { hasGenedCol = true } } cols := buildIdxColsConcatHandleCols(tblInfo, index.Meta(), hasGenedCol) e := &RecoverIndexExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), columns: cols, containsGenedCol: hasGenedCol, index: index, table: t, physicalID: t.Meta().ID, } sessCtx := e.Ctx().GetSessionVars().StmtCtx e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, index.Meta(), e.columns) return e } func buildHandleColsForExec(sctx *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo, allColInfo []*model.ColumnInfo) plannercore.HandleCols { if !tblInfo.IsCommonHandle { extraColPos := len(allColInfo) - 1 intCol := &expression.Column{ Index: extraColPos, RetType: types.NewFieldType(mysql.TypeLonglong), } return plannercore.NewIntHandleCols(intCol) } tblCols := make([]*expression.Column, len(tblInfo.Columns)) for i := 0; i < len(tblInfo.Columns); i++ { c := tblInfo.Columns[i] tblCols[i] = &expression.Column{ RetType: &c.FieldType, ID: c.ID, } } pkIdx := tables.FindPrimaryIndex(tblInfo) for i, c := range pkIdx.Columns { tblCols[c.Offset].Index = len(idxInfo.Columns) + i } return plannercore.NewCommonHandleCols(sctx, tblInfo, pkIdx, tblCols) } func (b *executorBuilder) buildCleanupIndex(v *plannercore.CleanupIndex) exec.Executor { tblInfo := v.Table.TableInfo t, err := b.is.TableByName(v.Table.Schema, tblInfo.Name) if err != nil { b.err = err return nil } idxName := strings.ToLower(v.IndexName) var index table.Index for _, idx := range t.Indices() { if idx.Meta().State != model.StatePublic { continue } if idxName == idx.Meta().Name.L { index = idx break } } if index == nil { b.err = errors.Errorf("secondary index `%v` is not found in table `%v`", v.IndexName, v.Table.Name.O) return nil } e := &CleanupIndexExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), columns: buildIdxColsConcatHandleCols(tblInfo, index.Meta(), false), index: index, table: t, physicalID: t.Meta().ID, batchSize: 20000, } sessCtx := e.Ctx().GetSessionVars().StmtCtx e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, index.Meta(), e.columns) return e } func (b *executorBuilder) buildCheckIndexRange(v *plannercore.CheckIndexRange) exec.Executor { tb, err := b.is.TableByName(v.Table.Schema, v.Table.Name) if err != nil { b.err = err return nil } e := &CheckIndexRangeExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), handleRanges: v.HandleRanges, table: tb.Meta(), is: b.is, } idxName := strings.ToLower(v.IndexName) for _, idx := range tb.Indices() { if idx.Meta().Name.L == idxName { e.index = idx.Meta() e.startKey = make([]types.Datum, len(e.index.Columns)) break } } return e } func (b *executorBuilder) buildChecksumTable(v *plannercore.ChecksumTable) exec.Executor { e := &ChecksumTableExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tables: make(map[int64]*checksumContext), done: false, } startTs, err := b.getSnapshotTS() if err != nil { b.err = err return nil } for _, t := range v.Tables { e.tables[t.TableInfo.ID] = newChecksumContext(t.DBInfo, t.TableInfo, startTs) } return e } func (b *executorBuilder) buildReloadExprPushdownBlacklist(_ *plannercore.ReloadExprPushdownBlacklist) exec.Executor { base := exec.NewBaseExecutor(b.ctx, nil, 0) return &ReloadExprPushdownBlacklistExec{base} } func (b *executorBuilder) buildReloadOptRuleBlacklist(_ *plannercore.ReloadOptRuleBlacklist) exec.Executor { base := exec.NewBaseExecutor(b.ctx, nil, 0) return &ReloadOptRuleBlacklistExec{BaseExecutor: base} } func (b *executorBuilder) buildAdminPlugins(v *plannercore.AdminPlugins) exec.Executor { base := exec.NewBaseExecutor(b.ctx, nil, 0) return &AdminPluginsExec{BaseExecutor: base, Action: v.Action, Plugins: v.Plugins} } func (b *executorBuilder) buildDeallocate(v *plannercore.Deallocate) exec.Executor { base := exec.NewBaseExecutor(b.ctx, nil, v.ID()) base.SetInitCap(chunk.ZeroCapacity) e := &DeallocateExec{ BaseExecutor: base, Name: v.Name, } return e } func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) exec.Executor { if !b.inSelectLockStmt { b.inSelectLockStmt = true defer func() { b.inSelectLockStmt = false }() } b.hasLock = true if b.err = b.updateForUpdateTS(); b.err != nil { return nil } src := b.build(v.Children()[0]) if b.err != nil { return nil } if !b.ctx.GetSessionVars().InTxn() { // Locking of rows for update using SELECT FOR UPDATE only applies when autocommit // is disabled (either by beginning transaction with START TRANSACTION or by setting // autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked. // See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html return src } e := &SelectLockExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), src), Lock: v.Lock, tblID2Handle: v.TblID2Handle, tblID2PhysTblIDCol: v.TblID2PhysTblIDCol, } // filter out temporary tables because they do not store any record in tikv and should not write any lock is := e.Ctx().GetInfoSchema().(infoschema.InfoSchema) for tblID := range e.tblID2Handle { tblInfo, ok := is.TableByID(tblID) if !ok { b.err = errors.Errorf("Can not get table %d", tblID) } if tblInfo.Meta().TempTableType != model.TempTableNone { delete(e.tblID2Handle, tblID) } } return e } func (b *executorBuilder) buildLimit(v *plannercore.PhysicalLimit) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } n := int(mathutil.Min(v.Count, uint64(b.ctx.GetSessionVars().MaxChunkSize))) base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) base.SetInitCap(n) e := &LimitExec{ BaseExecutor: base, begin: v.Offset, end: v.Offset + v.Count, } childUsedSchema := markChildrenUsedCols(v.Schema().Columns, v.Children()[0].Schema())[0] e.columnIdxsUsedByChild = make([]int, 0, len(childUsedSchema)) for i, used := range childUsedSchema { if used { e.columnIdxsUsedByChild = append(e.columnIdxsUsedByChild, i) } } if len(e.columnIdxsUsedByChild) == len(childUsedSchema) { e.columnIdxsUsedByChild = nil // indicates that all columns are used. LimitExec will improve performance for this condition. } return e } func (b *executorBuilder) buildPrepare(v *plannercore.Prepare) exec.Executor { base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) base.SetInitCap(chunk.ZeroCapacity) return &PrepareExec{ BaseExecutor: base, name: v.Name, sqlText: v.SQLText, } } func (b *executorBuilder) buildExecute(v *plannercore.Execute) exec.Executor { e := &ExecuteExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), is: b.is, name: v.Name, usingVars: v.Params, stmt: v.Stmt, plan: v.Plan, outputNames: v.OutputNames(), } failpoint.Inject("assertExecutePrepareStatementStalenessOption", func(val failpoint.Value) { vs := strings.Split(val.(string), "_") assertTS, assertReadReplicaScope := vs[0], vs[1] staleread.AssertStmtStaleness(b.ctx, true) ts, err := sessiontxn.GetTxnManager(b.ctx).GetStmtReadTS() if err != nil { panic(e) } if strconv.FormatUint(ts, 10) != assertTS || assertReadReplicaScope != b.readReplicaScope { panic("execute prepare statement have wrong staleness option") } }) return e } func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) exec.Executor { e := &ShowExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), Tp: v.Tp, CountWarningsOrErrors: v.CountWarningsOrErrors, DBName: model.NewCIStr(v.DBName), Table: v.Table, Partition: v.Partition, Column: v.Column, IndexName: v.IndexName, ResourceGroupName: model.NewCIStr(v.ResourceGroupName), Flag: v.Flag, Roles: v.Roles, User: v.User, is: b.is, Full: v.Full, IfNotExists: v.IfNotExists, GlobalScope: v.GlobalScope, Extended: v.Extended, Extractor: v.Extractor, ImportJobID: v.ImportJobID, } if e.Tp == ast.ShowMasterStatus { // show master status need start ts. if _, err := e.Ctx().Txn(true); err != nil { b.err = err } } return e } func (b *executorBuilder) buildSimple(v *plannercore.Simple) exec.Executor { switch s := v.Statement.(type) { case *ast.GrantStmt: return b.buildGrant(s) case *ast.RevokeStmt: return b.buildRevoke(s) case *ast.BRIEStmt: return b.buildBRIE(s, v.Schema()) case *ast.CreateUserStmt, *ast.AlterUserStmt: var lockOptions []*ast.PasswordOrLockOption if b.Ti.AccountLockTelemetry == nil { b.Ti.AccountLockTelemetry = &AccountLockTelemetryInfo{} } b.Ti.AccountLockTelemetry.CreateOrAlterUser++ if stmt, ok := v.Statement.(*ast.CreateUserStmt); ok { lockOptions = stmt.PasswordOrLockOptions } else if stmt, ok := v.Statement.(*ast.AlterUserStmt); ok { lockOptions = stmt.PasswordOrLockOptions } if len(lockOptions) > 0 { // Multiple lock options are supported for the parser, but only the last one option takes effect. for i := len(lockOptions) - 1; i >= 0; i-- { if lockOptions[i].Type == ast.Lock { b.Ti.AccountLockTelemetry.LockUser++ break } else if lockOptions[i].Type == ast.Unlock { b.Ti.AccountLockTelemetry.UnlockUser++ break } } } case *ast.CalibrateResourceStmt: return &calibrateresource.Executor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), 0), WorkloadType: s.Tp, OptionList: s.DynamicCalibrateResourceOptionList, } case *ast.AddQueryWatchStmt: return &querywatch.AddExecutor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), 0), QueryWatchOptionList: s.QueryWatchOptionList, } case *ast.LoadDataActionStmt: return &LoadDataActionExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), tp: s.Tp, jobID: s.JobID, } case *ast.ImportIntoActionStmt: return &ImportIntoActionExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), tp: s.Tp, jobID: s.JobID, } } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) base.SetInitCap(chunk.ZeroCapacity) e := &SimpleExec{ BaseExecutor: base, Statement: v.Statement, IsFromRemote: v.IsFromRemote, is: b.is, staleTxnStartTS: v.StaleTxnStartTS, } return e } func (b *executorBuilder) buildSet(v *plannercore.Set) exec.Executor { base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) base.SetInitCap(chunk.ZeroCapacity) e := &SetExecutor{ BaseExecutor: base, vars: v.VarAssigns, } return e } func (b *executorBuilder) buildSetConfig(v *plannercore.SetConfig) exec.Executor { return &SetConfigExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), p: v, } } func (b *executorBuilder) buildInsert(v *plannercore.Insert) exec.Executor { b.inInsertStmt = true if b.err = b.updateForUpdateTS(); b.err != nil { return nil } selectExec := b.build(v.SelectPlan) if b.err != nil { return nil } var baseExec exec.BaseExecutor if selectExec != nil { baseExec = exec.NewBaseExecutor(b.ctx, nil, v.ID(), selectExec) } else { baseExec = exec.NewBaseExecutor(b.ctx, nil, v.ID()) } baseExec.SetInitCap(chunk.ZeroCapacity) ivs := &InsertValues{ BaseExecutor: baseExec, Table: v.Table, Columns: v.Columns, Lists: v.Lists, GenExprs: v.GenCols.Exprs, allAssignmentsAreConstant: v.AllAssignmentsAreConstant, hasRefCols: v.NeedFillDefaultValue, SelectExec: selectExec, rowLen: v.RowLen, } err := ivs.initInsertColumns() if err != nil { b.err = err return nil } ivs.fkChecks, b.err = buildFKCheckExecs(b.ctx, ivs.Table, v.FKChecks) if b.err != nil { return nil } ivs.fkCascades, b.err = b.buildFKCascadeExecs(ivs.Table, v.FKCascades) if b.err != nil { return nil } if v.IsReplace { return b.buildReplace(ivs) } insert := &InsertExec{ InsertValues: ivs, OnDuplicate: append(v.OnDuplicate, v.GenCols.OnDuplicates...), } return insert } func (b *executorBuilder) buildImportInto(v *plannercore.ImportInto) exec.Executor { tbl, ok := b.is.TableByID(v.Table.TableInfo.ID) if !ok { b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID) return nil } if !tbl.Meta().IsBaseTable() { b.err = plannercore.ErrNonUpdatableTable.GenWithStackByArgs(tbl.Meta().Name.O, "LOAD") return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) exec, err := newImportIntoExec(base, b.ctx, v, tbl) if err != nil { b.err = err return nil } return exec } func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) exec.Executor { tbl, ok := b.is.TableByID(v.Table.TableInfo.ID) if !ok { b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID) return nil } if !tbl.Meta().IsBaseTable() { b.err = plannercore.ErrNonUpdatableTable.GenWithStackByArgs(tbl.Meta().Name.O, "LOAD") return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) worker, err := NewLoadDataWorker(b.ctx, v, tbl) if err != nil { b.err = err return nil } return &LoadDataExec{ BaseExecutor: base, loadDataWorker: worker, FileLocRef: v.FileLocRef, } } func (b *executorBuilder) buildLoadStats(v *plannercore.LoadStats) exec.Executor { e := &LoadStatsExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), info: &LoadStatsInfo{v.Path, b.ctx}, } return e } func (b *executorBuilder) buildLockStats(v *plannercore.LockStats) exec.Executor { e := &lockstats.LockExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), Tables: v.Tables, } return e } func (b *executorBuilder) buildUnlockStats(v *plannercore.UnlockStats) exec.Executor { e := &lockstats.UnlockExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), Tables: v.Tables, } return e } func (b *executorBuilder) buildIndexAdvise(v *plannercore.IndexAdvise) exec.Executor { e := &IndexAdviseExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), IsLocal: v.IsLocal, indexAdviseInfo: &IndexAdviseInfo{ Path: v.Path, MaxMinutes: v.MaxMinutes, MaxIndexNum: v.MaxIndexNum, LineFieldsInfo: v.LineFieldsInfo, Ctx: b.ctx, }, } return e } func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) exec.Executor { if v.Load { e := &PlanReplayerLoadExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), info: &PlanReplayerLoadInfo{Path: v.File, Ctx: b.ctx}, } return e } if v.Capture { e := &PlanReplayerExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), CaptureInfo: &PlanReplayerCaptureInfo{ SQLDigest: v.SQLDigest, PlanDigest: v.PlanDigest, }, } return e } if v.Remove { e := &PlanReplayerExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), CaptureInfo: &PlanReplayerCaptureInfo{ SQLDigest: v.SQLDigest, PlanDigest: v.PlanDigest, Remove: true, }, } return e } e := &PlanReplayerExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), DumpInfo: &PlanReplayerDumpInfo{ Analyze: v.Analyze, Path: v.File, ctx: b.ctx, HistoricalStatsTS: v.HistoricalStatsTS, }, } if v.ExecStmt != nil { e.DumpInfo.ExecStmts = []ast.StmtNode{v.ExecStmt} } else { e.BaseExecutor = exec.NewBaseExecutor(b.ctx, nil, v.ID()) } return e } func (*executorBuilder) buildReplace(vals *InsertValues) exec.Executor { replaceExec := &ReplaceExec{ InsertValues: vals, } return replaceExec } func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) exec.Executor { e := &GrantExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), Privs: grant.Privs, ObjectType: grant.ObjectType, Level: grant.Level, Users: grant.Users, WithGrant: grant.WithGrant, AuthTokenOrTLSOptions: grant.AuthTokenOrTLSOptions, is: b.is, } return e } func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) exec.Executor { e := &RevokeExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), ctx: b.ctx, Privs: revoke.Privs, ObjectType: revoke.ObjectType, Level: revoke.Level, Users: revoke.Users, is: b.is, } return e } func (b *executorBuilder) setTelemetryInfo(v *plannercore.DDL) { if v == nil || b.Ti == nil { return } switch s := v.Statement.(type) { case *ast.AlterTableStmt: if len(s.Specs) > 1 { b.Ti.UseMultiSchemaChange = true } for _, spec := range s.Specs { switch spec.Tp { case ast.AlterTableDropFirstPartition: if b.Ti.PartitionTelemetry == nil { b.Ti.PartitionTelemetry = &PartitionTelemetryInfo{} } b.Ti.PartitionTelemetry.UseDropIntervalPartition = true case ast.AlterTableAddLastPartition: if b.Ti.PartitionTelemetry == nil { b.Ti.PartitionTelemetry = &PartitionTelemetryInfo{} } b.Ti.PartitionTelemetry.UseAddIntervalPartition = true case ast.AlterTableExchangePartition: b.Ti.UseExchangePartition = true case ast.AlterTableReorganizePartition: if b.Ti.PartitionTelemetry == nil { b.Ti.PartitionTelemetry = &PartitionTelemetryInfo{} } b.Ti.PartitionTelemetry.UseReorganizePartition = true } } case *ast.CreateTableStmt: if s.Partition == nil || strings.EqualFold(b.ctx.GetSessionVars().EnableTablePartition, "OFF") { break } p := s.Partition if b.Ti.PartitionTelemetry == nil { b.Ti.PartitionTelemetry = &PartitionTelemetryInfo{} } b.Ti.PartitionTelemetry.TablePartitionMaxPartitionsNum = mathutil.Max(p.Num, uint64(len(p.Definitions))) b.Ti.PartitionTelemetry.UseTablePartition = true switch p.Tp { case model.PartitionTypeRange: if p.Sub == nil { if len(p.ColumnNames) > 0 { b.Ti.PartitionTelemetry.UseTablePartitionRangeColumns = true if len(p.ColumnNames) > 1 { b.Ti.PartitionTelemetry.UseTablePartitionRangeColumnsGt1 = true } if len(p.ColumnNames) > 2 { b.Ti.PartitionTelemetry.UseTablePartitionRangeColumnsGt2 = true } if len(p.ColumnNames) > 3 { b.Ti.PartitionTelemetry.UseTablePartitionRangeColumnsGt3 = true } } else { b.Ti.PartitionTelemetry.UseTablePartitionRange = true } if p.Interval != nil { b.Ti.PartitionTelemetry.UseCreateIntervalPartition = true } } case model.PartitionTypeHash: if p.Sub == nil { b.Ti.PartitionTelemetry.UseTablePartitionHash = true } case model.PartitionTypeList: enable := b.ctx.GetSessionVars().EnableListTablePartition if p.Sub == nil && enable { if len(p.ColumnNames) > 0 { b.Ti.PartitionTelemetry.UseTablePartitionListColumns = true } else { b.Ti.PartitionTelemetry.UseTablePartitionList = true } } } case *ast.FlashBackToTimestampStmt: b.Ti.UseFlashbackToCluster = true } } func (b *executorBuilder) buildDDL(v *plannercore.DDL) exec.Executor { b.setTelemetryInfo(v) e := &DDLExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), stmt: v.Statement, is: b.is, tempTableDDL: temptable.GetTemporaryTableDDL(b.ctx), } return e } // buildTrace builds a TraceExec for future executing. This method will be called // at build(). func (b *executorBuilder) buildTrace(v *plannercore.Trace) exec.Executor { t := &TraceExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), stmtNode: v.StmtNode, builder: b, format: v.Format, optimizerTrace: v.OptimizerTrace, optimizerTraceTarget: v.OptimizerTraceTarget, } if t.format == plannercore.TraceFormatLog && !t.optimizerTrace { return &SortExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), t), ByItems: []*plannerutil.ByItems{ {Expr: &expression.Column{ Index: 0, RetType: types.NewFieldType(mysql.TypeTimestamp), }}, }, schema: v.Schema(), } } return t } // buildExplain builds a explain executor. `e.rows` collects final result to `ExplainExec`. func (b *executorBuilder) buildExplain(v *plannercore.Explain) exec.Executor { explainExec := &ExplainExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), explain: v, } if v.Analyze { if b.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl == nil { b.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl = execdetails.NewRuntimeStatsColl(nil) } // If the resource group name is not empty, we could collect and display the RU // runtime stats for analyze executor. resourceGroupName := b.ctx.GetSessionVars().ResourceGroupName // Try to register the RU runtime stats for analyze executor. if store, ok := b.ctx.GetStore().(interface { CreateRURuntimeStats(uint64) *clientutil.RURuntimeStats }); len(resourceGroupName) > 0 && ok { // StartTS will be used to identify this SQL, so that the runtime stats could // aggregate the RU stats beneath the KV storage client. startTS, err := b.getSnapshotTS() if err != nil { b.err = err return nil } explainExec.ruRuntimeStats = store.CreateRURuntimeStats(startTS) } explainExec.analyzeExec = b.build(v.TargetPlan) } return explainExec } func (b *executorBuilder) buildSelectInto(v *plannercore.SelectInto) exec.Executor { child := b.build(v.TargetPlan) if b.err != nil { return nil } return &SelectIntoExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), child), intoOpt: v.IntoOpt, LineFieldsInfo: v.LineFieldsInfo, } } func (b *executorBuilder) buildUnionScanExec(v *plannercore.PhysicalUnionScan) exec.Executor { oriEncounterUnionScan := b.encounterUnionScan b.encounterUnionScan = true defer func() { b.encounterUnionScan = oriEncounterUnionScan }() reader := b.build(v.Children()[0]) if b.err != nil { return nil } return b.buildUnionScanFromReader(reader, v) } // buildUnionScanFromReader builds union scan executor from child executor. // Note that this function may be called by inner workers of index lookup join concurrently. // Be careful to avoid data race. func (b *executorBuilder) buildUnionScanFromReader(reader exec.Executor, v *plannercore.PhysicalUnionScan) exec.Executor { // If reader is union, it means a partition table and we should transfer as above. if x, ok := reader.(*UnionExec); ok { for i, child := range x.AllChildren() { x.SetChildren(i, b.buildUnionScanFromReader(child, v)) if b.err != nil { return nil } } return x } us := &UnionScanExec{BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), reader)} // Get the handle column index of the below Plan. us.handleCols = v.HandleCols us.mutableRow = chunk.MutRowFromTypes(retTypes(us)) // If the push-downed condition contains virtual column, we may build a selection upon reader originReader := reader if sel, ok := reader.(*SelectionExec); ok { reader = sel.Children(0) } us.collators = make([]collate.Collator, 0, len(us.columns)) for _, tp := range retTypes(us) { us.collators = append(us.collators, collate.GetCollator(tp.GetCollate())) } startTS, err := b.getSnapshotTS() sessionVars := b.ctx.GetSessionVars() if err != nil { b.err = err return nil } switch x := reader.(type) { case *MPPGather: us.desc = false us.keepOrder = false us.conditions, us.conditionsWithVirCol = plannercore.SplitSelCondsWithVirtualColumn(v.Conditions) us.columns = x.columns us.table = x.table us.virtualColumnIndex = x.virtualColumnIndex us.handleCachedTable(b, x, sessionVars, startTS) case *TableReaderExecutor: us.desc = x.desc us.keepOrder = x.keepOrder us.conditions, us.conditionsWithVirCol = plannercore.SplitSelCondsWithVirtualColumn(v.Conditions) us.columns = x.columns us.table = x.table us.virtualColumnIndex = x.virtualColumnIndex us.handleCachedTable(b, x, sessionVars, startTS) case *IndexReaderExecutor: us.desc = x.desc us.keepOrder = x.keepOrder for _, ic := range x.index.Columns { for i, col := range x.columns { if col.Name.L == ic.Name.L { us.usedIndex = append(us.usedIndex, i) break } } } us.conditions, us.conditionsWithVirCol = plannercore.SplitSelCondsWithVirtualColumn(v.Conditions) us.columns = x.columns us.table = x.table us.handleCachedTable(b, x, sessionVars, startTS) case *IndexLookUpExecutor: us.desc = x.desc us.keepOrder = x.keepOrder for _, ic := range x.index.Columns { for i, col := range x.columns { if col.Name.L == ic.Name.L { us.usedIndex = append(us.usedIndex, i) break } } } us.conditions, us.conditionsWithVirCol = plannercore.SplitSelCondsWithVirtualColumn(v.Conditions) us.columns = x.columns us.table = x.table us.virtualColumnIndex = buildVirtualColumnIndex(us.Schema(), us.columns) us.handleCachedTable(b, x, sessionVars, startTS) case *IndexMergeReaderExecutor: if len(x.byItems) != 0 { us.keepOrder = x.keepOrder us.desc = x.byItems[0].Desc for _, item := range x.byItems { c, ok := item.Expr.(*expression.Column) if !ok { b.err = errors.Errorf("Not support non-column in orderBy pushed down") return nil } for i, col := range x.columns { if col.ID == c.ID { us.usedIndex = append(us.usedIndex, i) break } } } } us.conditions, us.conditionsWithVirCol = plannercore.SplitSelCondsWithVirtualColumn(v.Conditions) us.columns = x.columns us.table = x.table us.virtualColumnIndex = buildVirtualColumnIndex(us.Schema(), us.columns) default: // The mem table will not be written by sql directly, so we can omit the union scan to avoid err reporting. return originReader } return us } type bypassDataSourceExecutor interface { dataSourceExecutor setDummy() } func (us *UnionScanExec) handleCachedTable(b *executorBuilder, x bypassDataSourceExecutor, vars *variable.SessionVars, startTS uint64) { tbl := x.Table() if tbl.Meta().TableCacheStatusType == model.TableCacheStatusEnable { cachedTable := tbl.(table.CachedTable) // Determine whether the cache can be used. leaseDuration := time.Duration(variable.TableCacheLease.Load()) * time.Second cacheData, loading := cachedTable.TryReadFromCache(startTS, leaseDuration) if cacheData != nil { vars.StmtCtx.ReadFromTableCache = true x.setDummy() us.cacheTable = cacheData } else if loading { return } else { if !b.inUpdateStmt && !b.inDeleteStmt && !b.inInsertStmt && !vars.StmtCtx.InExplainStmt { store := b.ctx.GetStore() cachedTable.UpdateLockForRead(context.Background(), store, startTS, leaseDuration) } } } } // buildMergeJoin builds MergeJoinExec executor. func (b *executorBuilder) buildMergeJoin(v *plannercore.PhysicalMergeJoin) exec.Executor { leftExec := b.build(v.Children()[0]) if b.err != nil { return nil } rightExec := b.build(v.Children()[1]) if b.err != nil { return nil } defaultValues := v.DefaultValues if defaultValues == nil { if v.JoinType == plannercore.RightOuterJoin { defaultValues = make([]types.Datum, leftExec.Schema().Len()) } else { defaultValues = make([]types.Datum, rightExec.Schema().Len()) } } colsFromChildren := v.Schema().Columns if v.JoinType == plannercore.LeftOuterSemiJoin || v.JoinType == plannercore.AntiLeftOuterSemiJoin { colsFromChildren = colsFromChildren[:len(colsFromChildren)-1] } e := &MergeJoinExec{ stmtCtx: b.ctx.GetSessionVars().StmtCtx, BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), leftExec, rightExec), compareFuncs: v.CompareFuncs, joiner: newJoiner( b.ctx, v.JoinType, v.JoinType == plannercore.RightOuterJoin, defaultValues, v.OtherConditions, retTypes(leftExec), retTypes(rightExec), markChildrenUsedCols(colsFromChildren, v.Children()[0].Schema(), v.Children()[1].Schema()), false, ), isOuterJoin: v.JoinType.IsOuterJoin(), desc: v.Desc, } leftTable := &mergeJoinTable{ childIndex: 0, joinKeys: v.LeftJoinKeys, filters: v.LeftConditions, } rightTable := &mergeJoinTable{ childIndex: 1, joinKeys: v.RightJoinKeys, filters: v.RightConditions, } if v.JoinType == plannercore.RightOuterJoin { e.innerTable = leftTable e.outerTable = rightTable } else { e.innerTable = rightTable e.outerTable = leftTable } e.innerTable.isInner = true // optimizer should guarantee that filters on inner table are pushed down // to tikv or extracted to a Selection. if len(e.innerTable.filters) != 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "merge join's inner filter should be empty.") return nil } executor_metrics.ExecutorCounterMergeJoinExec.Inc() return e } func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) exec.Executor { leftExec := b.build(v.Children()[0]) if b.err != nil { return nil } rightExec := b.build(v.Children()[1]) if b.err != nil { return nil } e := &HashJoinExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), leftExec, rightExec), probeSideTupleFetcher: &probeSideTupleFetcher{}, probeWorkers: make([]*probeWorker, v.Concurrency), buildWorker: &buildWorker{}, hashJoinCtx: &hashJoinCtx{ sessCtx: b.ctx, isOuterJoin: v.JoinType.IsOuterJoin(), useOuterToBuild: v.UseOuterToBuild, joinType: v.JoinType, concurrency: v.Concurrency, }, } e.hashJoinCtx.allocPool = e.AllocPool defaultValues := v.DefaultValues lhsTypes, rhsTypes := retTypes(leftExec), retTypes(rightExec) if v.InnerChildIdx == 1 { if len(v.RightConditions) > 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "join's inner condition should be empty") return nil } } else { if len(v.LeftConditions) > 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "join's inner condition should be empty") return nil } } leftIsBuildSide := true e.isNullEQ = v.IsNullEQ var probeKeys, probeNAKeys, buildKeys, buildNAKeys []*expression.Column var buildSideExec exec.Executor if v.UseOuterToBuild { // update the buildSideEstCount due to changing the build side if v.InnerChildIdx == 1 { buildSideExec, buildKeys, buildNAKeys = leftExec, v.LeftJoinKeys, v.LeftNAJoinKeys e.probeSideTupleFetcher.probeSideExec, probeKeys, probeNAKeys = rightExec, v.RightJoinKeys, v.RightNAJoinKeys e.outerFilter = v.LeftConditions } else { buildSideExec, buildKeys, buildNAKeys = rightExec, v.RightJoinKeys, v.RightNAJoinKeys e.probeSideTupleFetcher.probeSideExec, probeKeys, probeNAKeys = leftExec, v.LeftJoinKeys, v.LeftNAJoinKeys e.outerFilter = v.RightConditions leftIsBuildSide = false } if defaultValues == nil { defaultValues = make([]types.Datum, e.probeSideTupleFetcher.probeSideExec.Schema().Len()) } } else { if v.InnerChildIdx == 0 { buildSideExec, buildKeys, buildNAKeys = leftExec, v.LeftJoinKeys, v.LeftNAJoinKeys e.probeSideTupleFetcher.probeSideExec, probeKeys, probeNAKeys = rightExec, v.RightJoinKeys, v.RightNAJoinKeys e.outerFilter = v.RightConditions } else { buildSideExec, buildKeys, buildNAKeys = rightExec, v.RightJoinKeys, v.RightNAJoinKeys e.probeSideTupleFetcher.probeSideExec, probeKeys, probeNAKeys = leftExec, v.LeftJoinKeys, v.LeftNAJoinKeys e.outerFilter = v.LeftConditions leftIsBuildSide = false } if defaultValues == nil { defaultValues = make([]types.Datum, buildSideExec.Schema().Len()) } } probeKeyColIdx := make([]int, len(probeKeys)) probeNAKeColIdx := make([]int, len(probeNAKeys)) buildKeyColIdx := make([]int, len(buildKeys)) buildNAKeyColIdx := make([]int, len(buildNAKeys)) for i := range buildKeys { buildKeyColIdx[i] = buildKeys[i].Index } for i := range buildNAKeys { buildNAKeyColIdx[i] = buildNAKeys[i].Index } for i := range probeKeys { probeKeyColIdx[i] = probeKeys[i].Index } for i := range probeNAKeys { probeNAKeColIdx[i] = probeNAKeys[i].Index } isNAJoin := len(v.LeftNAJoinKeys) > 0 colsFromChildren := v.Schema().Columns if v.JoinType == plannercore.LeftOuterSemiJoin || v.JoinType == plannercore.AntiLeftOuterSemiJoin { colsFromChildren = colsFromChildren[:len(colsFromChildren)-1] } childrenUsedSchema := markChildrenUsedCols(colsFromChildren, v.Children()[0].Schema(), v.Children()[1].Schema()) for i := uint(0); i < e.concurrency; i++ { e.probeWorkers[i] = &probeWorker{ hashJoinCtx: e.hashJoinCtx, workerID: i, joiner: newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, lhsTypes, rhsTypes, childrenUsedSchema, isNAJoin), probeKeyColIdx: probeKeyColIdx, probeNAKeyColIdx: probeNAKeColIdx, } } e.buildWorker.buildKeyColIdx, e.buildWorker.buildNAKeyColIdx, e.buildWorker.buildSideExec, e.buildWorker.hashJoinCtx = buildKeyColIdx, buildNAKeyColIdx, buildSideExec, e.hashJoinCtx e.hashJoinCtx.isNullAware = isNAJoin executor_metrics.ExecutorCountHashJoinExec.Inc() // We should use JoinKey to construct the type information using by hashing, instead of using the child's schema directly. // When a hybrid type column is hashed multiple times, we need to distinguish what field types are used. // For example, the condition `enum = int and enum = string`, we should use ETInt to hash the first column, // and use ETString to hash the second column, although they may be the same column. leftExecTypes, rightExecTypes := retTypes(leftExec), retTypes(rightExec) leftTypes, rightTypes := make([]*types.FieldType, 0, len(v.LeftJoinKeys)+len(v.LeftNAJoinKeys)), make([]*types.FieldType, 0, len(v.RightJoinKeys)+len(v.RightNAJoinKeys)) // set left types and right types for joiner. for i, col := range v.LeftJoinKeys { leftTypes = append(leftTypes, leftExecTypes[col.Index].Clone()) leftTypes[i].SetFlag(col.RetType.GetFlag()) } offset := len(v.LeftJoinKeys) for i, col := range v.LeftNAJoinKeys { leftTypes = append(leftTypes, leftExecTypes[col.Index].Clone()) leftTypes[i+offset].SetFlag(col.RetType.GetFlag()) } for i, col := range v.RightJoinKeys { rightTypes = append(rightTypes, rightExecTypes[col.Index].Clone()) rightTypes[i].SetFlag(col.RetType.GetFlag()) } offset = len(v.RightJoinKeys) for i, col := range v.RightNAJoinKeys { rightTypes = append(rightTypes, rightExecTypes[col.Index].Clone()) rightTypes[i+offset].SetFlag(col.RetType.GetFlag()) } // consider collations for i := range v.EqualConditions { chs, coll := v.EqualConditions[i].CharsetAndCollation() leftTypes[i].SetCharset(chs) leftTypes[i].SetCollate(coll) rightTypes[i].SetCharset(chs) rightTypes[i].SetCollate(coll) } offset = len(v.EqualConditions) for i := range v.NAEqualConditions { chs, coll := v.NAEqualConditions[i].CharsetAndCollation() leftTypes[i+offset].SetCharset(chs) leftTypes[i+offset].SetCollate(coll) rightTypes[i+offset].SetCharset(chs) rightTypes[i+offset].SetCollate(coll) } if leftIsBuildSide { e.buildTypes, e.probeTypes = leftTypes, rightTypes } else { e.buildTypes, e.probeTypes = rightTypes, leftTypes } return e } func (b *executorBuilder) buildHashAgg(v *plannercore.PhysicalHashAgg) exec.Executor { src := b.build(v.Children()[0]) if b.err != nil { return nil } sessionVars := b.ctx.GetSessionVars() e := &HashAggExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), src), sc: sessionVars.StmtCtx, PartialAggFuncs: make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)), GroupByItems: v.GroupByItems, } // We take `create table t(a int, b int);` as example. // // 1. If all the aggregation functions are FIRST_ROW, we do not need to set the defaultVal for them: // e.g. // mysql> select distinct a, b from t; // 0 rows in set (0.00 sec) // // 2. If there exists group by items, we do not need to set the defaultVal for them either: // e.g. // mysql> select avg(a) from t group by b; // Empty set (0.00 sec) // // mysql> select avg(a) from t group by a; // +--------+ // | avg(a) | // +--------+ // | NULL | // +--------+ // 1 row in set (0.00 sec) if len(v.GroupByItems) != 0 || aggregation.IsAllFirstRow(v.AggFuncs) { e.defaultVal = nil } else { if v.IsFinalAgg() { e.defaultVal = e.Ctx().GetSessionVars().GetNewChunkWithCapacity(retTypes(e), 1, 1, e.AllocPool) } } for _, aggDesc := range v.AggFuncs { if aggDesc.HasDistinct || len(aggDesc.OrderByItems) > 0 { e.isUnparallelExec = true } } // When we set both tidb_hashagg_final_concurrency and tidb_hashagg_partial_concurrency to 1, // we do not need to parallelly execute hash agg, // and this action can be a workaround when meeting some unexpected situation using parallelExec. if finalCon, partialCon := sessionVars.HashAggFinalConcurrency(), sessionVars.HashAggPartialConcurrency(); finalCon <= 0 || partialCon <= 0 || finalCon == 1 && partialCon == 1 { e.isUnparallelExec = true } partialOrdinal := 0 for i, aggDesc := range v.AggFuncs { if e.isUnparallelExec { e.PartialAggFuncs = append(e.PartialAggFuncs, aggfuncs.Build(b.ctx, aggDesc, i)) } else { ordinal := []int{partialOrdinal} partialOrdinal++ if aggDesc.Name == ast.AggFuncAvg { ordinal = append(ordinal, partialOrdinal+1) partialOrdinal++ } partialAggDesc, finalDesc := aggDesc.Split(ordinal) partialAggFunc := aggfuncs.Build(b.ctx, partialAggDesc, i) finalAggFunc := aggfuncs.Build(b.ctx, finalDesc, i) e.PartialAggFuncs = append(e.PartialAggFuncs, partialAggFunc) e.FinalAggFuncs = append(e.FinalAggFuncs, finalAggFunc) if partialAggDesc.Name == ast.AggFuncGroupConcat { // For group_concat, finalAggFunc and partialAggFunc need shared `truncate` flag to do duplicate. finalAggFunc.(interface{ SetTruncated(t *int32) }).SetTruncated( partialAggFunc.(interface{ GetTruncated() *int32 }).GetTruncated(), ) } } if e.defaultVal != nil { value := aggDesc.GetDefaultValue() e.defaultVal.AppendDatum(i, &value) } } executor_metrics.ExecutorCounterHashAggExec.Inc() return e } func (b *executorBuilder) buildStreamAgg(v *plannercore.PhysicalStreamAgg) exec.Executor { src := b.build(v.Children()[0]) if b.err != nil { return nil } e := &StreamAggExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), src), groupChecker: vecgroupchecker.NewVecGroupChecker(b.ctx, v.GroupByItems), aggFuncs: make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)), } if len(v.GroupByItems) != 0 || aggregation.IsAllFirstRow(v.AggFuncs) { e.defaultVal = nil } else { // Only do this for final agg, see issue #35295, #30923 if v.IsFinalAgg() { e.defaultVal = e.Ctx().GetSessionVars().GetNewChunkWithCapacity(retTypes(e), 1, 1, e.AllocPool) } } for i, aggDesc := range v.AggFuncs { aggFunc := aggfuncs.Build(b.ctx, aggDesc, i) e.aggFuncs = append(e.aggFuncs, aggFunc) if e.defaultVal != nil { value := aggDesc.GetDefaultValue() e.defaultVal.AppendDatum(i, &value) } } executor_metrics.ExecutorStreamAggExec.Inc() return e } func (b *executorBuilder) buildSelection(v *plannercore.PhysicalSelection) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } e := &SelectionExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), filters: v.Conditions, } return e } func (b *executorBuilder) buildProjection(v *plannercore.PhysicalProjection) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } e := &ProjectionExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), numWorkers: int64(b.ctx.GetSessionVars().ProjectionConcurrency()), evaluatorSuit: expression.NewEvaluatorSuite(v.Exprs, v.AvoidColumnEvaluator), calculateNoDelay: v.CalculateNoDelay, } // If the calculation row count for this Projection operator is smaller // than a Chunk size, we turn back to the un-parallel Projection // implementation to reduce the goroutine overhead. if int64(v.StatsCount()) < int64(b.ctx.GetSessionVars().MaxChunkSize) { e.numWorkers = 0 } // Use un-parallel projection for query that write on memdb to avoid data race. // See also https://github.com/pingcap/tidb/issues/26832 if b.inUpdateStmt || b.inDeleteStmt || b.inInsertStmt || b.hasLock { e.numWorkers = 0 } return e } func (b *executorBuilder) buildTableDual(v *plannercore.PhysicalTableDual) exec.Executor { if v.RowCount != 0 && v.RowCount != 1 { b.err = errors.Errorf("buildTableDual failed, invalid row count for dual table: %v", v.RowCount) return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) base.SetInitCap(v.RowCount) e := &TableDualExec{ BaseExecutor: base, numDualRows: v.RowCount, } return e } // `getSnapshotTS` returns for-update-ts if in insert/update/delete/lock statement otherwise the isolation read ts // Please notice that in RC isolation, the above two ts are the same func (b *executorBuilder) getSnapshotTS() (ts uint64, err error) { if b.forDataReaderBuilder { return b.dataReaderTS, nil } txnManager := sessiontxn.GetTxnManager(b.ctx) if b.inInsertStmt || b.inUpdateStmt || b.inDeleteStmt || b.inSelectLockStmt { return txnManager.GetStmtForUpdateTS() } return txnManager.GetStmtReadTS() } // getSnapshot get the appropriate snapshot from txnManager and set // the relevant snapshot options before return. func (b *executorBuilder) getSnapshot() (kv.Snapshot, error) { var snapshot kv.Snapshot var err error txnManager := sessiontxn.GetTxnManager(b.ctx) if b.inInsertStmt || b.inUpdateStmt || b.inDeleteStmt || b.inSelectLockStmt { snapshot, err = txnManager.GetSnapshotWithStmtForUpdateTS() } else { snapshot, err = txnManager.GetSnapshotWithStmtReadTS() } if err != nil { return nil, err } sessVars := b.ctx.GetSessionVars() replicaReadType := sessVars.GetReplicaRead() snapshot.SetOption(kv.ReadReplicaScope, b.readReplicaScope) snapshot.SetOption(kv.TaskID, sessVars.StmtCtx.TaskID) snapshot.SetOption(kv.TidbKvReadTimeout, sessVars.GetTidbKvReadTimeout()) snapshot.SetOption(kv.ResourceGroupName, sessVars.ResourceGroupName) snapshot.SetOption(kv.ExplicitRequestSourceType, sessVars.ExplicitRequestSourceType) if replicaReadType.IsClosestRead() && b.readReplicaScope != kv.GlobalTxnScope { snapshot.SetOption(kv.MatchStoreLabels, []*metapb.StoreLabel{ { Key: placement.DCLabelKey, Value: b.readReplicaScope, }, }) } return snapshot, nil } func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) exec.Executor { switch v.DBName.L { case util.MetricSchemaName.L: return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &MetricRetriever{ table: v.Table, extractor: v.Extractor.(*plannercore.MetricTableExtractor), }, } case util.InformationSchemaName.L: switch v.Table.Name.L { case strings.ToLower(infoschema.TableClusterConfig): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterConfigRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), }, } case strings.ToLower(infoschema.TableClusterLoad): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterServerInfoRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), serverInfoType: diagnosticspb.ServerInfoType_LoadInfo, }, } case strings.ToLower(infoschema.TableClusterHardware): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterServerInfoRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), serverInfoType: diagnosticspb.ServerInfoType_HardwareInfo, }, } case strings.ToLower(infoschema.TableClusterSystemInfo): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterServerInfoRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), serverInfoType: diagnosticspb.ServerInfoType_SystemInfo, }, } case strings.ToLower(infoschema.TableClusterLog): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterLogRetriever{ extractor: v.Extractor.(*plannercore.ClusterLogTableExtractor), }, } case strings.ToLower(infoschema.TableTiDBHotRegionsHistory): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &hotRegionsHistoryRetriver{ extractor: v.Extractor.(*plannercore.HotRegionsHistoryTableExtractor), }, } case strings.ToLower(infoschema.TableInspectionResult): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &inspectionResultRetriever{ extractor: v.Extractor.(*plannercore.InspectionResultTableExtractor), timeRange: v.QueryTimeRange, }, } case strings.ToLower(infoschema.TableInspectionSummary): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &inspectionSummaryRetriever{ table: v.Table, extractor: v.Extractor.(*plannercore.InspectionSummaryTableExtractor), timeRange: v.QueryTimeRange, }, } case strings.ToLower(infoschema.TableInspectionRules): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &inspectionRuleRetriever{ extractor: v.Extractor.(*plannercore.InspectionRuleTableExtractor), }, } case strings.ToLower(infoschema.TableMetricSummary): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &MetricsSummaryRetriever{ table: v.Table, extractor: v.Extractor.(*plannercore.MetricSummaryTableExtractor), timeRange: v.QueryTimeRange, }, } case strings.ToLower(infoschema.TableMetricSummaryByLabel): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &MetricsSummaryByLabelRetriever{ table: v.Table, extractor: v.Extractor.(*plannercore.MetricSummaryTableExtractor), timeRange: v.QueryTimeRange, }, } case strings.ToLower(infoschema.TableTiKVRegionPeers): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &tikvRegionPeersRetriever{ extractor: v.Extractor.(*plannercore.TikvRegionPeersExtractor), }, } case strings.ToLower(infoschema.TableSchemata), strings.ToLower(infoschema.TableStatistics), strings.ToLower(infoschema.TableTiDBIndexes), strings.ToLower(infoschema.TableViews), strings.ToLower(infoschema.TableTables), strings.ToLower(infoschema.TableReferConst), strings.ToLower(infoschema.TableSequences), strings.ToLower(infoschema.TablePartitions), strings.ToLower(infoschema.TableEngines), strings.ToLower(infoschema.TableCollations), strings.ToLower(infoschema.TableAnalyzeStatus), strings.ToLower(infoschema.TableClusterInfo), strings.ToLower(infoschema.TableProfiling), strings.ToLower(infoschema.TableCharacterSets), strings.ToLower(infoschema.TableKeyColumn), strings.ToLower(infoschema.TableUserPrivileges), strings.ToLower(infoschema.TableMetricTables), strings.ToLower(infoschema.TableCollationCharacterSetApplicability), strings.ToLower(infoschema.TableProcesslist), strings.ToLower(infoschema.ClusterTableProcesslist), strings.ToLower(infoschema.TableTiKVRegionStatus), strings.ToLower(infoschema.TableTiDBHotRegions), strings.ToLower(infoschema.TableSessionVar), strings.ToLower(infoschema.TableConstraints), strings.ToLower(infoschema.TableTiFlashReplica), strings.ToLower(infoschema.TableTiDBServersInfo), strings.ToLower(infoschema.TableTiKVStoreStatus), strings.ToLower(infoschema.TableClientErrorsSummaryGlobal), strings.ToLower(infoschema.TableClientErrorsSummaryByUser), strings.ToLower(infoschema.TableClientErrorsSummaryByHost), strings.ToLower(infoschema.TableAttributes), strings.ToLower(infoschema.TablePlacementPolicies), strings.ToLower(infoschema.TableTrxSummary), strings.ToLower(infoschema.TableVariablesInfo), strings.ToLower(infoschema.TableUserAttributes), strings.ToLower(infoschema.ClusterTableTrxSummary), strings.ToLower(infoschema.TableMemoryUsage), strings.ToLower(infoschema.TableMemoryUsageOpsHistory), strings.ToLower(infoschema.ClusterTableMemoryUsage), strings.ToLower(infoschema.ClusterTableMemoryUsageOpsHistory), strings.ToLower(infoschema.TableResourceGroups), strings.ToLower(infoschema.TableRunawayWatches): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &memtableRetriever{ table: v.Table, columns: v.Columns, extractor: v.Extractor, }, } case strings.ToLower(infoschema.TableTiDBTrx), strings.ToLower(infoschema.ClusterTableTiDBTrx): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &tidbTrxTableRetriever{ table: v.Table, columns: v.Columns, }, } case strings.ToLower(infoschema.TableDataLockWaits): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &dataLockWaitsTableRetriever{ table: v.Table, columns: v.Columns, }, } case strings.ToLower(infoschema.TableDeadlocks), strings.ToLower(infoschema.ClusterTableDeadlocks): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &deadlocksTableRetriever{ table: v.Table, columns: v.Columns, }, } case strings.ToLower(infoschema.TableStatementsSummary), strings.ToLower(infoschema.TableStatementsSummaryHistory), strings.ToLower(infoschema.TableStatementsSummaryEvicted), strings.ToLower(infoschema.ClusterTableStatementsSummary), strings.ToLower(infoschema.ClusterTableStatementsSummaryHistory), strings.ToLower(infoschema.ClusterTableStatementsSummaryEvicted): var extractor *plannercore.StatementsSummaryExtractor if v.Extractor != nil { extractor = v.Extractor.(*plannercore.StatementsSummaryExtractor) } return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: buildStmtSummaryRetriever(v.Table, v.Columns, extractor), } case strings.ToLower(infoschema.TableColumns): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &hugeMemTableRetriever{ table: v.Table, columns: v.Columns, extractor: v.Extractor.(*plannercore.ColumnsTableExtractor), viewSchemaMap: make(map[int64]*expression.Schema), viewOutputNamesMap: make(map[int64]types.NameSlice), }, } case strings.ToLower(infoschema.TableSlowQuery), strings.ToLower(infoschema.ClusterTableSlowLog): memTracker := memory.NewTracker(v.ID(), -1) memTracker.AttachTo(b.ctx.GetSessionVars().StmtCtx.MemTracker) return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &slowQueryRetriever{ table: v.Table, outputCols: v.Columns, extractor: v.Extractor.(*plannercore.SlowQueryExtractor), memTracker: memTracker, }, } case strings.ToLower(infoschema.TableStorageStats): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &tableStorageStatsRetriever{ table: v.Table, outputCols: v.Columns, extractor: v.Extractor.(*plannercore.TableStorageStatsExtractor), }, } case strings.ToLower(infoschema.TableDDLJobs): loc := b.ctx.GetSessionVars().Location() ddlJobRetriever := DDLJobRetriever{TZLoc: loc} return &DDLJobsReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), is: b.is, DDLJobRetriever: ddlJobRetriever, } case strings.ToLower(infoschema.TableTiFlashTables), strings.ToLower(infoschema.TableTiFlashSegments): return &MemTableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &TiFlashSystemTableRetriever{ table: v.Table, outputCols: v.Columns, extractor: v.Extractor.(*plannercore.TiFlashSystemTableExtractor), }, } } } tb, _ := b.is.TableByID(v.Table.ID) return &TableScanExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), t: tb, columns: v.Columns, } } func (b *executorBuilder) buildSort(v *plannercore.PhysicalSort) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } sortExec := SortExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), ByItems: v.ByItems, schema: v.Schema(), } executor_metrics.ExecutorCounterSortExec.Inc() return &sortExec } func (b *executorBuilder) buildTopN(v *plannercore.PhysicalTopN) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } sortExec := SortExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), ByItems: v.ByItems, schema: v.Schema(), } executor_metrics.ExecutorCounterTopNExec.Inc() return &TopNExec{ SortExec: sortExec, limit: &plannercore.PhysicalLimit{Count: v.Count, Offset: v.Offset}, } } func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) exec.Executor { var ( innerPlan plannercore.PhysicalPlan outerPlan plannercore.PhysicalPlan ) if v.InnerChildIdx == 0 { innerPlan = v.Children()[0] outerPlan = v.Children()[1] } else { innerPlan = v.Children()[1] outerPlan = v.Children()[0] } v.OuterSchema = plannercore.ExtractCorColumnsBySchema4PhysicalPlan(innerPlan, outerPlan.Schema()) leftChild := b.build(v.Children()[0]) if b.err != nil { return nil } rightChild := b.build(v.Children()[1]) if b.err != nil { return nil } // test is in the explain/naaj.test#part5. // although we prepared the NAEqualConditions, but for Apply mode, we still need move it to other conditions like eq condition did here. otherConditions := append(expression.ScalarFuncs2Exprs(v.EqualConditions), expression.ScalarFuncs2Exprs(v.NAEqualConditions)...) otherConditions = append(otherConditions, v.OtherConditions...) defaultValues := v.DefaultValues if defaultValues == nil { defaultValues = make([]types.Datum, v.Children()[v.InnerChildIdx].Schema().Len()) } outerExec, innerExec := leftChild, rightChild outerFilter, innerFilter := v.LeftConditions, v.RightConditions if v.InnerChildIdx == 0 { outerExec, innerExec = rightChild, leftChild outerFilter, innerFilter = v.RightConditions, v.LeftConditions } tupleJoiner := newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, otherConditions, retTypes(leftChild), retTypes(rightChild), nil, false) serialExec := &NestedLoopApplyExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec, innerExec), innerExec: innerExec, outerExec: outerExec, outerFilter: outerFilter, innerFilter: innerFilter, outer: v.JoinType != plannercore.InnerJoin, joiner: tupleJoiner, outerSchema: v.OuterSchema, ctx: b.ctx, canUseCache: v.CanUseCache, } executor_metrics.ExecutorCounterNestedLoopApplyExec.Inc() // try parallel mode if v.Concurrency > 1 { innerExecs := make([]exec.Executor, 0, v.Concurrency) innerFilters := make([]expression.CNFExprs, 0, v.Concurrency) corCols := make([][]*expression.CorrelatedColumn, 0, v.Concurrency) joiners := make([]joiner, 0, v.Concurrency) for i := 0; i < v.Concurrency; i++ { clonedInnerPlan, err := plannercore.SafeClone(innerPlan) if err != nil { b.err = nil return serialExec } corCol := plannercore.ExtractCorColumnsBySchema4PhysicalPlan(clonedInnerPlan, outerPlan.Schema()) clonedInnerExec := b.build(clonedInnerPlan) if b.err != nil { b.err = nil return serialExec } innerExecs = append(innerExecs, clonedInnerExec) corCols = append(corCols, corCol) innerFilters = append(innerFilters, innerFilter.Clone()) joiners = append(joiners, newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, otherConditions, retTypes(leftChild), retTypes(rightChild), nil, false)) } allExecs := append([]exec.Executor{outerExec}, innerExecs...) return &ParallelNestedLoopApplyExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), allExecs...), innerExecs: innerExecs, outerExec: outerExec, outerFilter: outerFilter, innerFilter: innerFilters, outer: v.JoinType != plannercore.InnerJoin, joiners: joiners, corCols: corCols, concurrency: v.Concurrency, useCache: v.CanUseCache, } } return serialExec } func (b *executorBuilder) buildMaxOneRow(v *plannercore.PhysicalMaxOneRow) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) base.SetInitCap(2) base.SetMaxChunkSize(2) e := &MaxOneRowExec{BaseExecutor: base} return e } func (b *executorBuilder) buildUnionAll(v *plannercore.PhysicalUnionAll) exec.Executor { childExecs := make([]exec.Executor, len(v.Children())) for i, child := range v.Children() { childExecs[i] = b.build(child) if b.err != nil { return nil } } e := &UnionExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExecs...), concurrency: b.ctx.GetSessionVars().UnionConcurrency(), } return e } func buildHandleColsForSplit(sc *stmtctx.StatementContext, tbInfo *model.TableInfo) plannercore.HandleCols { if tbInfo.IsCommonHandle { primaryIdx := tables.FindPrimaryIndex(tbInfo) tableCols := make([]*expression.Column, len(tbInfo.Columns)) for i, col := range tbInfo.Columns { tableCols[i] = &expression.Column{ ID: col.ID, RetType: &col.FieldType, } } for i, pkCol := range primaryIdx.Columns { tableCols[pkCol.Offset].Index = i } return plannercore.NewCommonHandleCols(sc, tbInfo, primaryIdx, tableCols) } intCol := &expression.Column{ RetType: types.NewFieldType(mysql.TypeLonglong), } return plannercore.NewIntHandleCols(intCol) } func (b *executorBuilder) buildSplitRegion(v *plannercore.SplitRegion) exec.Executor { base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) base.SetInitCap(1) base.SetMaxChunkSize(1) if v.IndexInfo != nil { return &SplitIndexRegionExec{ BaseExecutor: base, tableInfo: v.TableInfo, partitionNames: v.PartitionNames, indexInfo: v.IndexInfo, lower: v.Lower, upper: v.Upper, num: v.Num, valueLists: v.ValueLists, } } handleCols := buildHandleColsForSplit(b.ctx.GetSessionVars().StmtCtx, v.TableInfo) if len(v.ValueLists) > 0 { return &SplitTableRegionExec{ BaseExecutor: base, tableInfo: v.TableInfo, partitionNames: v.PartitionNames, handleCols: handleCols, valueLists: v.ValueLists, } } return &SplitTableRegionExec{ BaseExecutor: base, tableInfo: v.TableInfo, partitionNames: v.PartitionNames, handleCols: handleCols, lower: v.Lower, upper: v.Upper, num: v.Num, } } func (b *executorBuilder) buildUpdate(v *plannercore.Update) exec.Executor { b.inUpdateStmt = true tblID2table := make(map[int64]table.Table, len(v.TblColPosInfos)) multiUpdateOnSameTable := make(map[int64]bool) for _, info := range v.TblColPosInfos { tbl, _ := b.is.TableByID(info.TblID) if _, ok := tblID2table[info.TblID]; ok { multiUpdateOnSameTable[info.TblID] = true } tblID2table[info.TblID] = tbl if len(v.PartitionedTable) > 0 { // The v.PartitionedTable collects the partitioned table. // Replace the original table with the partitioned table to support partition selection. // e.g. update t partition (p0, p1), the new values are not belong to the given set p0, p1 // Using the table in v.PartitionedTable returns a proper error, while using the original table can't. for _, p := range v.PartitionedTable { if info.TblID == p.Meta().ID { tblID2table[info.TblID] = p } } } } if b.err = b.updateForUpdateTS(); b.err != nil { return nil } selExec := b.build(v.SelectPlan) if b.err != nil { return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), selExec) base.SetInitCap(chunk.ZeroCapacity) var assignFlag []int assignFlag, b.err = getAssignFlag(b.ctx, v, selExec.Schema().Len()) if b.err != nil { return nil } // should use the new tblID2table, since the update's schema may have been changed in Execstmt. b.err = plannercore.CheckUpdateList(assignFlag, v, tblID2table) if b.err != nil { return nil } updateExec := &UpdateExec{ BaseExecutor: base, OrderedList: v.OrderedList, allAssignmentsAreConstant: v.AllAssignmentsAreConstant, virtualAssignmentsOffset: v.VirtualAssignmentsOffset, multiUpdateOnSameTable: multiUpdateOnSameTable, tblID2table: tblID2table, tblColPosInfos: v.TblColPosInfos, assignFlag: assignFlag, } updateExec.fkChecks, b.err = buildTblID2FKCheckExecs(b.ctx, tblID2table, v.FKChecks) if b.err != nil { return nil } updateExec.fkCascades, b.err = b.buildTblID2FKCascadeExecs(tblID2table, v.FKCascades) if b.err != nil { return nil } return updateExec } func getAssignFlag(ctx sessionctx.Context, v *plannercore.Update, schemaLen int) ([]int, error) { assignFlag := make([]int, schemaLen) for i := range assignFlag { assignFlag[i] = -1 } for _, assign := range v.OrderedList { if !ctx.GetSessionVars().AllowWriteRowID && assign.Col.ID == model.ExtraHandleID { return nil, errors.Errorf("insert, update and replace statements for _tidb_rowid are not supported") } tblIdx, found := v.TblColPosInfos.FindTblIdx(assign.Col.Index) if found { colIdx := assign.Col.Index assignFlag[colIdx] = tblIdx } } return assignFlag, nil } func (b *executorBuilder) buildDelete(v *plannercore.Delete) exec.Executor { b.inDeleteStmt = true tblID2table := make(map[int64]table.Table, len(v.TblColPosInfos)) for _, info := range v.TblColPosInfos { tblID2table[info.TblID], _ = b.is.TableByID(info.TblID) } if b.err = b.updateForUpdateTS(); b.err != nil { return nil } selExec := b.build(v.SelectPlan) if b.err != nil { return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), selExec) base.SetInitCap(chunk.ZeroCapacity) deleteExec := &DeleteExec{ BaseExecutor: base, tblID2Table: tblID2table, IsMultiTable: v.IsMultiTable, tblColPosInfos: v.TblColPosInfos, } deleteExec.fkChecks, b.err = buildTblID2FKCheckExecs(b.ctx, tblID2table, v.FKChecks) if b.err != nil { return nil } deleteExec.fkCascades, b.err = b.buildTblID2FKCascadeExecs(tblID2table, v.FKCascades) if b.err != nil { return nil } return deleteExec } func (b *executorBuilder) updateForUpdateTS() error { // GetStmtForUpdateTS will auto update the for update ts if it is necessary _, err := sessiontxn.GetTxnManager(b.ctx).GetStmtForUpdateTS() return err } func (b *executorBuilder) buildAnalyzeIndexPushdown(task plannercore.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64, autoAnalyze string) *analyzeTask { job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: autoAnalyze + "analyze index " + task.IndexInfo.Name.O} _, offset := timeutil.Zone(b.ctx.GetSessionVars().Location()) sc := b.ctx.GetSessionVars().StmtCtx startTS, err := b.getSnapshotTS() if err != nil { b.err = err return nil } failpoint.Inject("injectAnalyzeSnapshot", func(val failpoint.Value) { startTS = uint64(val.(int)) }) base := baseAnalyzeExec{ ctx: b.ctx, tableID: task.TableID, concurrency: b.ctx.GetSessionVars().IndexSerialScanConcurrency(), analyzePB: &tipb.AnalyzeReq{ Tp: tipb.AnalyzeType_TypeIndex, Flags: sc.PushDownFlags(), TimeZoneOffset: offset, }, opts: opts, job: job, snapshot: startTS, } e := &AnalyzeIndexExec{ baseAnalyzeExec: base, isCommonHandle: task.TblInfo.IsCommonHandle, idxInfo: task.IndexInfo, } topNSize := new(int32) *topNSize = int32(opts[ast.AnalyzeOptNumTopN]) statsVersion := new(int32) *statsVersion = int32(task.StatsVersion) e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{ BucketSize: int64(opts[ast.AnalyzeOptNumBuckets]), NumColumns: int32(len(task.IndexInfo.Columns)), TopNSize: topNSize, Version: statsVersion, SketchSize: maxSketchSize, } if e.isCommonHandle && e.idxInfo.Primary { e.analyzePB.Tp = tipb.AnalyzeType_TypeCommonHandle } depth := int32(opts[ast.AnalyzeOptCMSketchDepth]) width := int32(opts[ast.AnalyzeOptCMSketchWidth]) e.analyzePB.IdxReq.CmsketchDepth = &depth e.analyzePB.IdxReq.CmsketchWidth = &width return &analyzeTask{taskType: idxTask, idxExec: e, job: job} } func (b *executorBuilder) buildAnalyzeIndexIncremental(task plannercore.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64) *analyzeTask { h := domain.GetDomain(b.ctx).StatsHandle() statsTbl := h.GetPartitionStats(&model.TableInfo{}, task.TableID.GetStatisticsID()) analyzeTask := b.buildAnalyzeIndexPushdown(task, opts, "") if statsTbl.Pseudo { return analyzeTask } idx, ok := statsTbl.Indices[task.IndexInfo.ID] if !ok || idx.Len() == 0 || idx.LastAnalyzePos.IsNull() { return analyzeTask } // If idx got evicted previously, we directly use IndexPushDown task as incremental analyze task will cause inaccuracy if idx.IsEvicted() { return analyzeTask } failpoint.Inject("assertEvictIndex", func() { if idx.IsEvicted() { panic("evicted index shouldn't use analyze incremental task") } }) var oldHist *statistics.Histogram if statistics.IsAnalyzed(idx.Flag) { exec := analyzeTask.idxExec if idx.CMSketch != nil { width, depth := idx.CMSketch.GetWidthAndDepth() exec.analyzePB.IdxReq.CmsketchWidth = &width exec.analyzePB.IdxReq.CmsketchDepth = &depth } oldHist = idx.Histogram.Copy() } else { _, bktID := idx.LessRowCountWithBktIdx(nil, idx.LastAnalyzePos) if bktID == 0 { return analyzeTask } oldHist = idx.TruncateHistogram(bktID) } var oldTopN *statistics.TopN if analyzeTask.idxExec.analyzePB.IdxReq.GetVersion() >= statistics.Version2 { oldTopN = idx.TopN.Copy() oldTopN.RemoveVal(oldHist.Bounds.GetRow(len(oldHist.Buckets)*2 - 1).GetBytes(0)) } oldHist = oldHist.RemoveUpperBound() job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: "analyze incremental index " + task.IndexInfo.Name.O} exec := analyzeTask.idxExec exec.job = job analyzeTask.taskType = idxIncrementalTask analyzeTask.idxIncrementalExec = &analyzeIndexIncrementalExec{AnalyzeIndexExec: *exec, oldHist: oldHist, oldCMS: idx.CMSketch, oldTopN: oldTopN} analyzeTask.job = job return analyzeTask } func (b *executorBuilder) buildAnalyzeSamplingPushdown(task plannercore.AnalyzeColumnsTask, opts map[ast.AnalyzeOptionType]uint64, schemaForVirtualColEval *expression.Schema) *analyzeTask { if task.V2Options != nil { opts = task.V2Options.FilledOpts } availableIdx := make([]*model.IndexInfo, 0, len(task.Indexes)) colGroups := make([]*tipb.AnalyzeColumnGroup, 0, len(task.Indexes)) if len(task.Indexes) > 0 { for _, idx := range task.Indexes { availableIdx = append(availableIdx, idx) colGroup := &tipb.AnalyzeColumnGroup{ ColumnOffsets: make([]int64, 0, len(idx.Columns)), } for _, col := range idx.Columns { colGroup.ColumnOffsets = append(colGroup.ColumnOffsets, int64(col.Offset)) } colGroups = append(colGroups, colGroup) } } _, offset := timeutil.Zone(b.ctx.GetSessionVars().Location()) sc := b.ctx.GetSessionVars().StmtCtx startTS, err := b.getSnapshotTS() if err != nil { b.err = err return nil } failpoint.Inject("injectAnalyzeSnapshot", func(val failpoint.Value) { startTS = uint64(val.(int)) }) statsHandle := domain.GetDomain(b.ctx).StatsHandle() count, modifyCount, err := statsHandle.StatsMetaCountAndModifyCount(task.TableID.GetStatisticsID()) if err != nil { b.err = err return nil } failpoint.Inject("injectBaseCount", func(val failpoint.Value) { count = int64(val.(int)) }) failpoint.Inject("injectBaseModifyCount", func(val failpoint.Value) { modifyCount = int64(val.(int)) }) sampleRate := new(float64) var sampleRateReason string if opts[ast.AnalyzeOptNumSamples] == 0 { *sampleRate = math.Float64frombits(opts[ast.AnalyzeOptSampleRate]) if *sampleRate < 0 { *sampleRate, sampleRateReason = b.getAdjustedSampleRate(task) if task.PartitionName != "" { sc.AppendNote(errors.Errorf( `Analyze use auto adjusted sample rate %f for table %s.%s's partition %s, reason to use this rate is "%s"`, *sampleRate, task.DBName, task.TableName, task.PartitionName, sampleRateReason, )) } else { sc.AppendNote(errors.Errorf( `Analyze use auto adjusted sample rate %f for table %s.%s, reason to use this rate is "%s"`, *sampleRate, task.DBName, task.TableName, sampleRateReason, )) } } } job := &statistics.AnalyzeJob{ DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, SampleRateReason: sampleRateReason, } base := baseAnalyzeExec{ ctx: b.ctx, tableID: task.TableID, concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency(), analyzePB: &tipb.AnalyzeReq{ Tp: tipb.AnalyzeType_TypeFullSampling, Flags: sc.PushDownFlags(), TimeZoneOffset: offset, }, opts: opts, job: job, snapshot: startTS, } e := &AnalyzeColumnsExec{ baseAnalyzeExec: base, tableInfo: task.TblInfo, colsInfo: task.ColsInfo, handleCols: task.HandleCols, indexes: availableIdx, AnalyzeInfo: task.AnalyzeInfo, schemaForVirtualColEval: schemaForVirtualColEval, baseCount: count, baseModifyCnt: modifyCount, } e.analyzePB.ColReq = &tipb.AnalyzeColumnsReq{ BucketSize: int64(opts[ast.AnalyzeOptNumBuckets]), SampleSize: int64(opts[ast.AnalyzeOptNumSamples]), SampleRate: sampleRate, SketchSize: maxSketchSize, ColumnsInfo: util.ColumnsToProto(task.ColsInfo, task.TblInfo.PKIsHandle, false), ColumnGroups: colGroups, } if task.TblInfo != nil { e.analyzePB.ColReq.PrimaryColumnIds = tables.TryGetCommonPkColumnIds(task.TblInfo) if task.TblInfo.IsCommonHandle { e.analyzePB.ColReq.PrimaryPrefixColumnIds = tables.PrimaryPrefixColumnIDs(task.TblInfo) } } b.err = tables.SetPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, task.ColsInfo) return &analyzeTask{taskType: colTask, colExec: e, job: job} } // getAdjustedSampleRate calculate the sample rate by the table size. If we cannot get the table size. We use the 0.001 as the default sample rate. // From the paper "Random sampling for histogram construction: how much is enough?"'s Corollary 1 to Theorem 5, // for a table size n, histogram size k, maximum relative error in bin size f, and error probability gamma, // the minimum random sample size is // // r = 4 * k * ln(2*n/gamma) / f^2 // // If we take f = 0.5, gamma = 0.01, n =1e6, we would got r = 305.82* k. // Since the there's log function over the table size n, the r grows slowly when the n increases. // If we take n = 1e12, a 300*k sample still gives <= 0.66 bin size error with probability 0.99. // So if we don't consider the top-n values, we can keep the sample size at 300*256. // But we may take some top-n before building the histogram, so we increase the sample a little. func (b *executorBuilder) getAdjustedSampleRate(task plannercore.AnalyzeColumnsTask) (sampleRate float64, reason string) { statsHandle := domain.GetDomain(b.ctx).StatsHandle() defaultRate := 0.001 if statsHandle == nil { return defaultRate, fmt.Sprintf("statsHandler is nil, use the default-rate=%v", defaultRate) } var statsTbl *statistics.Table tid := task.TableID.GetStatisticsID() if tid == task.TblInfo.ID { statsTbl = statsHandle.GetTableStats(task.TblInfo) } else { statsTbl = statsHandle.GetPartitionStats(task.TblInfo, tid) } approxiCount, hasPD := b.getApproximateTableCountFromStorage(tid, task) // If there's no stats meta and no pd, return the default rate. if statsTbl == nil && !hasPD { return defaultRate, fmt.Sprintf("TiDB cannot get the row count of the table, use the default-rate=%v", defaultRate) } // If the count in stats_meta is still 0 and there's no information from pd side, we scan all rows. if statsTbl.RealtimeCount == 0 && !hasPD { return 1, "TiDB assumes that the table is empty and cannot get row count from PD, use sample-rate=1" } // we have issue https://github.com/pingcap/tidb/issues/29216. // To do a workaround for this issue, we check the approxiCount from the pd side to do a comparison. // If the count from the stats_meta is extremely smaller than the approximate count from the pd, // we think that we meet this issue and use the approximate count to calculate the sample rate. if float64(statsTbl.RealtimeCount*5) < approxiCount { // Confirmed by TiKV side, the experience error rate of the approximate count is about 20%. // So we increase the number to 150000 to reduce this error rate. sampleRate = math.Min(1, 150000/approxiCount) return sampleRate, fmt.Sprintf("Row count in stats_meta is much smaller compared with the row count got by PD, use min(1, 15000/%v) as the sample-rate=%v", approxiCount, sampleRate) } // If we don't go into the above if branch and we still detect the count is zero. Return 1 to prevent the dividing zero. if statsTbl.RealtimeCount == 0 { return 1, "TiDB assumes that the table is empty, use sample-rate=1" } // We are expected to scan about 100000 rows or so. // Since there's tiny error rate around the count from the stats meta, we use 110000 to get a little big result sampleRate = math.Min(1, config.DefRowsForSampleRate/float64(statsTbl.RealtimeCount)) return sampleRate, fmt.Sprintf("use min(1, %v/%v) as the sample-rate=%v", config.DefRowsForSampleRate, statsTbl.RealtimeCount, sampleRate) } func (b *executorBuilder) getApproximateTableCountFromStorage(tid int64, task plannercore.AnalyzeColumnsTask) (float64, bool) { return pdhelper.GlobalPDHelper.GetApproximateTableCountFromStorage(b.ctx, tid, task.DBName, task.TableName, task.PartitionName) } func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plannercore.AnalyzeColumnsTask, opts map[ast.AnalyzeOptionType]uint64, autoAnalyze string, schemaForVirtualColEval *expression.Schema) *analyzeTask { if task.StatsVersion == statistics.Version2 { return b.buildAnalyzeSamplingPushdown(task, opts, schemaForVirtualColEval) } job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: autoAnalyze + "analyze columns"} cols := task.ColsInfo if hasPkHist(task.HandleCols) { colInfo := task.TblInfo.Columns[task.HandleCols.GetCol(0).Index] cols = append([]*model.ColumnInfo{colInfo}, cols...) } else if task.HandleCols != nil && !task.HandleCols.IsInt() { cols = make([]*model.ColumnInfo, 0, len(task.ColsInfo)+task.HandleCols.NumCols()) for i := 0; i < task.HandleCols.NumCols(); i++ { cols = append(cols, task.TblInfo.Columns[task.HandleCols.GetCol(i).Index]) } cols = append(cols, task.ColsInfo...) task.ColsInfo = cols } _, offset := timeutil.Zone(b.ctx.GetSessionVars().Location()) sc := b.ctx.GetSessionVars().StmtCtx startTS, err := b.getSnapshotTS() if err != nil { b.err = err return nil } failpoint.Inject("injectAnalyzeSnapshot", func(val failpoint.Value) { startTS = uint64(val.(int)) }) base := baseAnalyzeExec{ ctx: b.ctx, tableID: task.TableID, concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency(), analyzePB: &tipb.AnalyzeReq{ Tp: tipb.AnalyzeType_TypeColumn, Flags: sc.PushDownFlags(), TimeZoneOffset: offset, }, opts: opts, job: job, snapshot: startTS, } e := &AnalyzeColumnsExec{ baseAnalyzeExec: base, colsInfo: task.ColsInfo, handleCols: task.HandleCols, AnalyzeInfo: task.AnalyzeInfo, } depth := int32(opts[ast.AnalyzeOptCMSketchDepth]) width := int32(opts[ast.AnalyzeOptCMSketchWidth]) e.analyzePB.ColReq = &tipb.AnalyzeColumnsReq{ BucketSize: int64(opts[ast.AnalyzeOptNumBuckets]), SampleSize: MaxRegionSampleSize, SketchSize: maxSketchSize, ColumnsInfo: util.ColumnsToProto(cols, task.HandleCols != nil && task.HandleCols.IsInt(), false), CmsketchDepth: &depth, CmsketchWidth: &width, } if task.TblInfo != nil { e.analyzePB.ColReq.PrimaryColumnIds = tables.TryGetCommonPkColumnIds(task.TblInfo) if task.TblInfo.IsCommonHandle { e.analyzePB.ColReq.PrimaryPrefixColumnIds = tables.PrimaryPrefixColumnIDs(task.TblInfo) } } if task.CommonHandleInfo != nil { topNSize := new(int32) *topNSize = int32(opts[ast.AnalyzeOptNumTopN]) statsVersion := new(int32) *statsVersion = int32(task.StatsVersion) e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{ BucketSize: int64(opts[ast.AnalyzeOptNumBuckets]), NumColumns: int32(len(task.CommonHandleInfo.Columns)), TopNSize: topNSize, Version: statsVersion, } depth := int32(opts[ast.AnalyzeOptCMSketchDepth]) width := int32(opts[ast.AnalyzeOptCMSketchWidth]) e.analyzePB.IdxReq.CmsketchDepth = &depth e.analyzePB.IdxReq.CmsketchWidth = &width e.analyzePB.IdxReq.SketchSize = maxSketchSize e.analyzePB.ColReq.PrimaryColumnIds = tables.TryGetCommonPkColumnIds(task.TblInfo) e.analyzePB.Tp = tipb.AnalyzeType_TypeMixed e.commonHandle = task.CommonHandleInfo } b.err = tables.SetPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, cols) return &analyzeTask{taskType: colTask, colExec: e, job: job} } func (b *executorBuilder) buildAnalyzePKIncremental(task plannercore.AnalyzeColumnsTask, opts map[ast.AnalyzeOptionType]uint64) *analyzeTask { h := domain.GetDomain(b.ctx).StatsHandle() statsTbl := h.GetPartitionStats(&model.TableInfo{}, task.TableID.GetStatisticsID()) analyzeTask := b.buildAnalyzeColumnsPushdown(task, opts, "", nil) if statsTbl.Pseudo { return analyzeTask } if task.HandleCols == nil || !task.HandleCols.IsInt() { return analyzeTask } col, ok := statsTbl.Columns[task.HandleCols.GetCol(0).ID] if !ok || col.Len() == 0 || col.LastAnalyzePos.IsNull() { return analyzeTask } var oldHist *statistics.Histogram if statistics.IsAnalyzed(col.Flag) { oldHist = col.Histogram.Copy() } else { d, err := col.LastAnalyzePos.ConvertTo(b.ctx.GetSessionVars().StmtCtx, col.Tp) if err != nil { b.err = err return nil } _, bktID := col.LessRowCountWithBktIdx(nil, d) if bktID == 0 { return analyzeTask } oldHist = col.TruncateHistogram(bktID) oldHist.NDV = int64(oldHist.TotalRowCount()) } job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: "analyze incremental primary key"} exec := analyzeTask.colExec exec.job = job analyzeTask.taskType = pkIncrementalTask analyzeTask.colIncrementalExec = &analyzePKIncrementalExec{AnalyzeColumnsExec: *exec, oldHist: oldHist} analyzeTask.job = job return analyzeTask } func (b *executorBuilder) buildAnalyzeFastColumn(e *AnalyzeExec, task plannercore.AnalyzeColumnsTask, opts map[ast.AnalyzeOptionType]uint64) { findTask := false for _, eTask := range e.tasks { if eTask.fastExec != nil && eTask.fastExec.tableID.Equals(&task.TableID) { eTask.fastExec.colsInfo = append(eTask.fastExec.colsInfo, task.ColsInfo...) findTask = true break } } if !findTask { job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: "fast analyze columns"} var concurrency int concurrency, b.err = getBuildStatsConcurrency(e.Ctx()) if b.err != nil { return } startTS, err := b.getSnapshotTS() if err != nil { b.err = err return } base := baseAnalyzeExec{ ctx: b.ctx, tableID: task.TableID, opts: opts, concurrency: concurrency, job: job, snapshot: startTS, } fastExec := &AnalyzeFastExec{ baseAnalyzeExec: base, colsInfo: task.ColsInfo, handleCols: task.HandleCols, tblInfo: task.TblInfo, wg: &sync.WaitGroup{}, } b.err = fastExec.calculateEstimateSampleStep() if b.err != nil { return } e.tasks = append(e.tasks, &analyzeTask{ taskType: fastTask, fastExec: fastExec, job: job, }) } } func (b *executorBuilder) buildAnalyzeFastIndex(e *AnalyzeExec, task plannercore.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64) { findTask := false for _, eTask := range e.tasks { if eTask.fastExec != nil && eTask.fastExec.tableID.Equals(&task.TableID) { eTask.fastExec.idxsInfo = append(eTask.fastExec.idxsInfo, task.IndexInfo) findTask = true break } } if !findTask { job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: "fast analyze index " + task.IndexInfo.Name.O} var concurrency int concurrency, b.err = getBuildStatsConcurrency(e.Ctx()) if b.err != nil { return } startTS, err := b.getSnapshotTS() if err != nil { b.err = err return } base := baseAnalyzeExec{ ctx: b.ctx, tableID: task.TableID, opts: opts, concurrency: concurrency, job: job, snapshot: startTS, } fastExec := &AnalyzeFastExec{ baseAnalyzeExec: base, idxsInfo: []*model.IndexInfo{task.IndexInfo}, tblInfo: task.TblInfo, wg: &sync.WaitGroup{}, } b.err = fastExec.calculateEstimateSampleStep() if b.err != nil { return } e.tasks = append(e.tasks, &analyzeTask{ taskType: fastTask, fastExec: fastExec, job: job, }) } } func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) exec.Executor { e := &AnalyzeExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tasks: make([]*analyzeTask, 0, len(v.ColTasks)+len(v.IdxTasks)), opts: v.Opts, OptionsMap: v.OptionsMap, } enableFastAnalyze := b.ctx.GetSessionVars().EnableFastAnalyze autoAnalyze := "" if b.ctx.GetSessionVars().InRestrictedSQL { autoAnalyze = "auto " } for _, task := range v.ColTasks { if task.Incremental { e.tasks = append(e.tasks, b.buildAnalyzePKIncremental(task, v.Opts)) } else { if enableFastAnalyze { b.buildAnalyzeFastColumn(e, task, v.Opts) } else { columns, _, err := expression.ColumnInfos2ColumnsAndNames(b.ctx, model.NewCIStr(task.AnalyzeInfo.DBName), task.TblInfo.Name, task.ColsInfo, task.TblInfo) if err != nil { b.err = err return nil } schema := expression.NewSchema(columns...) e.tasks = append(e.tasks, b.buildAnalyzeColumnsPushdown(task, v.Opts, autoAnalyze, schema)) } } if b.err != nil { return nil } } for _, task := range v.IdxTasks { if task.Incremental { e.tasks = append(e.tasks, b.buildAnalyzeIndexIncremental(task, v.Opts)) } else { if enableFastAnalyze { b.buildAnalyzeFastIndex(e, task, v.Opts) } else { e.tasks = append(e.tasks, b.buildAnalyzeIndexPushdown(task, v.Opts, autoAnalyze)) } } if b.err != nil { return nil } } return e } // markChildrenUsedCols compares each child with the output schema, and mark // each column of the child is used by output or not. func markChildrenUsedCols(outputCols []*expression.Column, childSchemas ...*expression.Schema) (childrenUsed [][]bool) { childrenUsed = make([][]bool, 0, len(childSchemas)) markedOffsets := make(map[int]struct{}) for _, col := range outputCols { markedOffsets[col.Index] = struct{}{} } prefixLen := 0 for _, childSchema := range childSchemas { used := make([]bool, len(childSchema.Columns)) for i := range childSchema.Columns { if _, ok := markedOffsets[prefixLen+i]; ok { used[i] = true } } childrenUsed = append(childrenUsed, used) prefixLen += childSchema.Len() } return } func (*executorBuilder) corColInDistPlan(plans []plannercore.PhysicalPlan) bool { for _, p := range plans { x, ok := p.(*plannercore.PhysicalSelection) if !ok { continue } for _, cond := range x.Conditions { if len(expression.ExtractCorColumns(cond)) > 0 { return true } } } return false } // corColInAccess checks whether there's correlated column in access conditions. func (*executorBuilder) corColInAccess(p plannercore.PhysicalPlan) bool { var access []expression.Expression switch x := p.(type) { case *plannercore.PhysicalTableScan: access = x.AccessCondition case *plannercore.PhysicalIndexScan: access = x.AccessCondition } for _, cond := range access { if len(expression.ExtractCorColumns(cond)) > 0 { return true } } return false } func (b *executorBuilder) newDataReaderBuilder(p plannercore.PhysicalPlan) (*dataReaderBuilder, error) { ts, err := b.getSnapshotTS() if err != nil { return nil, err } builderForDataReader := *b builderForDataReader.forDataReaderBuilder = true builderForDataReader.dataReaderTS = ts return &dataReaderBuilder{ Plan: p, executorBuilder: &builderForDataReader, }, nil } func (b *executorBuilder) buildIndexLookUpJoin(v *plannercore.PhysicalIndexJoin) exec.Executor { outerExec := b.build(v.Children()[1-v.InnerChildIdx]) if b.err != nil { return nil } outerTypes := retTypes(outerExec) innerPlan := v.Children()[v.InnerChildIdx] innerTypes := make([]*types.FieldType, innerPlan.Schema().Len()) for i, col := range innerPlan.Schema().Columns { innerTypes[i] = col.RetType.Clone() // The `innerTypes` would be called for `Datum.ConvertTo` when converting the columns from outer table // to build hash map or construct lookup keys. So we need to modify its flen otherwise there would be // truncate error. See issue https://github.com/pingcap/tidb/issues/21232 for example. if innerTypes[i].EvalType() == types.ETString { innerTypes[i].SetFlen(types.UnspecifiedLength) } } // Use the probe table's collation. for i, col := range v.OuterHashKeys { outerTypes[col.Index] = outerTypes[col.Index].Clone() outerTypes[col.Index].SetCollate(innerTypes[v.InnerHashKeys[i].Index].GetCollate()) outerTypes[col.Index].SetFlag(col.RetType.GetFlag()) } // We should use JoinKey to construct the type information using by hashing, instead of using the child's schema directly. // When a hybrid type column is hashed multiple times, we need to distinguish what field types are used. // For example, the condition `enum = int and enum = string`, we should use ETInt to hash the first column, // and use ETString to hash the second column, although they may be the same column. innerHashTypes := make([]*types.FieldType, len(v.InnerHashKeys)) outerHashTypes := make([]*types.FieldType, len(v.OuterHashKeys)) for i, col := range v.InnerHashKeys { innerHashTypes[i] = innerTypes[col.Index].Clone() innerHashTypes[i].SetFlag(col.RetType.GetFlag()) } for i, col := range v.OuterHashKeys { outerHashTypes[i] = outerTypes[col.Index].Clone() outerHashTypes[i].SetFlag(col.RetType.GetFlag()) } var ( outerFilter []expression.Expression leftTypes, rightTypes []*types.FieldType ) if v.InnerChildIdx == 0 { leftTypes, rightTypes = innerTypes, outerTypes outerFilter = v.RightConditions if len(v.LeftConditions) > 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "join's inner condition should be empty") return nil } } else { leftTypes, rightTypes = outerTypes, innerTypes outerFilter = v.LeftConditions if len(v.RightConditions) > 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "join's inner condition should be empty") return nil } } defaultValues := v.DefaultValues if defaultValues == nil { defaultValues = make([]types.Datum, len(innerTypes)) } hasPrefixCol := false for _, l := range v.IdxColLens { if l != types.UnspecifiedLength { hasPrefixCol = true break } } readerBuilder, err := b.newDataReaderBuilder(innerPlan) if err != nil { b.err = err return nil } e := &IndexLookUpJoin{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec), outerCtx: outerCtx{ rowTypes: outerTypes, hashTypes: outerHashTypes, filter: outerFilter, }, innerCtx: innerCtx{ readerBuilder: readerBuilder, rowTypes: innerTypes, hashTypes: innerHashTypes, colLens: v.IdxColLens, hasPrefixCol: hasPrefixCol, }, workerWg: new(sync.WaitGroup), isOuterJoin: v.JoinType.IsOuterJoin(), indexRanges: v.Ranges, keyOff2IdxOff: v.KeyOff2IdxOff, lastColHelper: v.CompareFilters, finished: &atomic.Value{}, } colsFromChildren := v.Schema().Columns if v.JoinType == plannercore.LeftOuterSemiJoin || v.JoinType == plannercore.AntiLeftOuterSemiJoin { colsFromChildren = colsFromChildren[:len(colsFromChildren)-1] } childrenUsedSchema := markChildrenUsedCols(colsFromChildren, v.Children()[0].Schema(), v.Children()[1].Schema()) e.joiner = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, leftTypes, rightTypes, childrenUsedSchema, false) outerKeyCols := make([]int, len(v.OuterJoinKeys)) for i := 0; i < len(v.OuterJoinKeys); i++ { outerKeyCols[i] = v.OuterJoinKeys[i].Index } innerKeyCols := make([]int, len(v.InnerJoinKeys)) innerKeyColIDs := make([]int64, len(v.InnerJoinKeys)) keyCollators := make([]collate.Collator, 0, len(v.InnerJoinKeys)) for i := 0; i < len(v.InnerJoinKeys); i++ { innerKeyCols[i] = v.InnerJoinKeys[i].Index innerKeyColIDs[i] = v.InnerJoinKeys[i].ID keyCollators = append(keyCollators, collate.GetCollator(v.InnerJoinKeys[i].RetType.GetCollate())) } e.outerCtx.keyCols = outerKeyCols e.innerCtx.keyCols = innerKeyCols e.innerCtx.keyColIDs = innerKeyColIDs e.innerCtx.keyCollators = keyCollators outerHashCols, innerHashCols := make([]int, len(v.OuterHashKeys)), make([]int, len(v.InnerHashKeys)) hashCollators := make([]collate.Collator, 0, len(v.InnerHashKeys)) for i := 0; i < len(v.OuterHashKeys); i++ { outerHashCols[i] = v.OuterHashKeys[i].Index } for i := 0; i < len(v.InnerHashKeys); i++ { innerHashCols[i] = v.InnerHashKeys[i].Index hashCollators = append(hashCollators, collate.GetCollator(v.InnerHashKeys[i].RetType.GetCollate())) } e.outerCtx.hashCols = outerHashCols e.innerCtx.hashCols = innerHashCols e.innerCtx.hashCollators = hashCollators e.joinResult = tryNewCacheChunk(e) executor_metrics.ExecutorCounterIndexLookUpJoin.Inc() return e } func (b *executorBuilder) buildIndexLookUpMergeJoin(v *plannercore.PhysicalIndexMergeJoin) exec.Executor { outerExec := b.build(v.Children()[1-v.InnerChildIdx]) if b.err != nil { return nil } outerTypes := retTypes(outerExec) innerPlan := v.Children()[v.InnerChildIdx] innerTypes := make([]*types.FieldType, innerPlan.Schema().Len()) for i, col := range innerPlan.Schema().Columns { innerTypes[i] = col.RetType.Clone() // The `innerTypes` would be called for `Datum.ConvertTo` when converting the columns from outer table // to build hash map or construct lookup keys. So we need to modify its flen otherwise there would be // truncate error. See issue https://github.com/pingcap/tidb/issues/21232 for example. if innerTypes[i].EvalType() == types.ETString { innerTypes[i].SetFlen(types.UnspecifiedLength) } } var ( outerFilter []expression.Expression leftTypes, rightTypes []*types.FieldType ) if v.InnerChildIdx == 0 { leftTypes, rightTypes = innerTypes, outerTypes outerFilter = v.RightConditions if len(v.LeftConditions) > 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "join's inner condition should be empty") return nil } } else { leftTypes, rightTypes = outerTypes, innerTypes outerFilter = v.LeftConditions if len(v.RightConditions) > 0 { b.err = errors.Annotate(exeerrors.ErrBuildExecutor, "join's inner condition should be empty") return nil } } defaultValues := v.DefaultValues if defaultValues == nil { defaultValues = make([]types.Datum, len(innerTypes)) } outerKeyCols := make([]int, len(v.OuterJoinKeys)) for i := 0; i < len(v.OuterJoinKeys); i++ { outerKeyCols[i] = v.OuterJoinKeys[i].Index } innerKeyCols := make([]int, len(v.InnerJoinKeys)) keyCollators := make([]collate.Collator, 0, len(v.InnerJoinKeys)) for i := 0; i < len(v.InnerJoinKeys); i++ { innerKeyCols[i] = v.InnerJoinKeys[i].Index keyCollators = append(keyCollators, collate.GetCollator(v.InnerJoinKeys[i].RetType.GetCollate())) } executor_metrics.ExecutorCounterIndexLookUpJoin.Inc() readerBuilder, err := b.newDataReaderBuilder(innerPlan) if err != nil { b.err = err return nil } e := &IndexLookUpMergeJoin{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec), outerMergeCtx: outerMergeCtx{ rowTypes: outerTypes, filter: outerFilter, joinKeys: v.OuterJoinKeys, keyCols: outerKeyCols, needOuterSort: v.NeedOuterSort, compareFuncs: v.OuterCompareFuncs, }, innerMergeCtx: innerMergeCtx{ readerBuilder: readerBuilder, rowTypes: innerTypes, joinKeys: v.InnerJoinKeys, keyCols: innerKeyCols, keyCollators: keyCollators, compareFuncs: v.CompareFuncs, colLens: v.IdxColLens, desc: v.Desc, keyOff2KeyOffOrderByIdx: v.KeyOff2KeyOffOrderByIdx, }, workerWg: new(sync.WaitGroup), isOuterJoin: v.JoinType.IsOuterJoin(), indexRanges: v.Ranges, keyOff2IdxOff: v.KeyOff2IdxOff, lastColHelper: v.CompareFilters, } colsFromChildren := v.Schema().Columns if v.JoinType == plannercore.LeftOuterSemiJoin || v.JoinType == plannercore.AntiLeftOuterSemiJoin { colsFromChildren = colsFromChildren[:len(colsFromChildren)-1] } childrenUsedSchema := markChildrenUsedCols(colsFromChildren, v.Children()[0].Schema(), v.Children()[1].Schema()) joiners := make([]joiner, e.Ctx().GetSessionVars().IndexLookupJoinConcurrency()) for i := 0; i < len(joiners); i++ { joiners[i] = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, leftTypes, rightTypes, childrenUsedSchema, false) } e.joiners = joiners return e } func (b *executorBuilder) buildIndexNestedLoopHashJoin(v *plannercore.PhysicalIndexHashJoin) exec.Executor { join := b.buildIndexLookUpJoin(&(v.PhysicalIndexJoin)) if b.err != nil { return nil } e := join.(*IndexLookUpJoin) idxHash := &IndexNestedLoopHashJoin{ IndexLookUpJoin: *e, keepOuterOrder: v.KeepOuterOrder, } concurrency := e.Ctx().GetSessionVars().IndexLookupJoinConcurrency() idxHash.joiners = make([]joiner, concurrency) for i := 0; i < concurrency; i++ { idxHash.joiners[i] = e.joiner.Clone() } return idxHash } func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableReader) (*TableReaderExecutor, error) { tablePlans := v.TablePlans if v.StoreType == kv.TiFlash { tablePlans = []plannercore.PhysicalPlan{v.GetTablePlan()} } dagReq, err := builder.ConstructDAGReq(b.ctx, tablePlans, v.StoreType) if err != nil { return nil, err } ts, err := v.GetTableScan() if err != nil { return nil, err } if err = b.validCanReadTemporaryOrCacheTable(ts.Table); err != nil { return nil, err } tbl, _ := b.is.TableByID(ts.Table.ID) isPartition, physicalTableID := ts.IsPartition() if isPartition { pt := tbl.(table.PartitionedTable) tbl = pt.GetPartition(physicalTableID) } startTS, err := b.getSnapshotTS() if err != nil { return nil, err } paging := b.ctx.GetSessionVars().EnablePaging e := &TableReaderExecutor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, netDataSize: v.GetNetDataSize(), table: tbl, keepOrder: ts.KeepOrder, desc: ts.Desc, byItems: ts.ByItems, columns: ts.Columns, paging: paging, corColInFilter: b.corColInDistPlan(v.TablePlans), corColInAccess: b.corColInAccess(v.TablePlans[0]), plans: v.TablePlans, tablePlan: v.GetTablePlan(), storeType: v.StoreType, batchCop: v.ReadReqType == plannercore.BatchCop, } e.buildVirtualColumnInfo() if v.StoreType == kv.TiDB && b.ctx.GetSessionVars().User != nil { // User info is used to do privilege check. It is only used in TiDB cluster memory table. e.dagPB.User = &tipb.UserIdentity{ UserName: b.ctx.GetSessionVars().User.Username, UserHost: b.ctx.GetSessionVars().User.Hostname, } } for i := range v.Schema().Columns { dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i)) } return e, nil } func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) exec.Executor { startTs, err := b.getSnapshotTS() if err != nil { b.err = err return nil } gather := &MPPGather{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), is: b.is, originalPlan: v.GetTablePlan(), startTS: startTs, mppQueryID: kv.MPPQueryID{QueryTs: getMPPQueryTS(b.ctx), LocalQueryID: getMPPQueryID(b.ctx), ServerID: domain.GetDomain(b.ctx).ServerID()}, memTracker: memory.NewTracker(v.ID(), -1), columns: []*model.ColumnInfo{}, virtualColumnIndex: []int{}, virtualColumnRetFieldTypes: []*types.FieldType{}, } gather.memTracker.AttachTo(b.ctx.GetSessionVars().StmtCtx.MemTracker) var hasVirtualCol bool for _, col := range v.Schema().Columns { if col.VirtualExpr != nil { hasVirtualCol = true break } } var isSingleDataSource bool tableScans := v.GetTableScans() if len(tableScans) == 1 { isSingleDataSource = true } // 1. hasVirtualCol: when got virtual column in TableScan, will generate plan like the following, // and there will be no other operators in the MPP fragment. // MPPGather // ExchangeSender // PhysicalTableScan // 2. UnionScan: there won't be any operators like Join between UnionScan and TableScan. // and UnionScan cannot push down to tiflash. if !isSingleDataSource { if hasVirtualCol || b.encounterUnionScan { b.err = errors.Errorf("should only have one TableScan in MPP fragment(hasVirtualCol: %v, encounterUnionScan: %v)", hasVirtualCol, b.encounterUnionScan) return nil } return gather } // Setup MPPGather.table if isSingleDataSource. // Virtual Column or UnionScan need to use it. ts := tableScans[0] gather.columns = ts.Columns if hasVirtualCol { gather.virtualColumnIndex, gather.virtualColumnRetFieldTypes = buildVirtualColumnInfo(gather.Schema(), gather.columns) } tbl, _ := b.is.TableByID(ts.Table.ID) isPartition, physicalTableID := ts.IsPartition() if isPartition { // Only for static pruning partition table. pt := tbl.(table.PartitionedTable) tbl = pt.GetPartition(physicalTableID) } gather.table = tbl return gather } // buildTableReader builds a table reader executor. It first build a no range table reader, // and then update it ranges from table scan plan. func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) exec.Executor { failpoint.Inject("checkUseMPP", func(val failpoint.Value) { if !b.ctx.GetSessionVars().InRestrictedSQL && val.(bool) != useMPPExecution(b.ctx, v) { if val.(bool) { b.err = errors.New("expect mpp but not used") } else { b.err = errors.New("don't expect mpp but we used it") } failpoint.Return(nil) } }) useMPP := useMPPExecution(b.ctx, v) useTiFlashBatchCop := v.ReadReqType == plannercore.BatchCop useTiFlash := useMPP || useTiFlashBatchCop if useTiFlash { if _, isTiDBZoneLabelSet := config.GetGlobalConfig().Labels[placement.DCLabelKey]; b.ctx.GetSessionVars().TiFlashReplicaRead != tiflash.AllReplicas && !isTiDBZoneLabelSet { b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("the variable tiflash_replica_read is ignored, because the entry TiDB[%s] does not set the zone attribute and tiflash_replica_read is '%s'", config.GetGlobalConfig().AdvertiseAddress, tiflash.GetTiFlashReplicaRead(b.ctx.GetSessionVars().TiFlashReplicaRead))) } } if useMPP { return b.buildMPPGather(v) } ts, err := v.GetTableScan() if err != nil { b.err = err return nil } ret, err := buildNoRangeTableReader(b, v) if err != nil { b.err = err return nil } if err = b.validCanReadTemporaryOrCacheTable(ts.Table); err != nil { b.err = err return nil } if ret.table.Meta().TempTableType != model.TempTableNone { ret.dummy = true } ret.ranges = ts.Ranges sctx := b.ctx.GetSessionVars().StmtCtx sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { return ret } // When isPartition is set, it means the union rewriting is done, so a partition reader is preferred. if ok, _ := ts.IsPartition(); ok { return ret } pi := ts.Table.GetPartitionInfo() if pi == nil { return ret } tmp, _ := b.is.TableByID(ts.Table.ID) tbl := tmp.(table.PartitionedTable) partitions, err := partitionPruning(b.ctx, tbl, v.PartitionInfo.PruningConds, v.PartitionInfo.PartitionNames, v.PartitionInfo.Columns, v.PartitionInfo.ColumnNames) if err != nil { b.err = err return nil } if v.StoreType == kv.TiFlash { sctx.IsTiFlash.Store(true) } if len(partitions) == 0 { return &TableDualExec{BaseExecutor: *ret.Base()} } // Sort the partition is necessary to make the final multiple partition key ranges ordered. slices.SortFunc(partitions, func(i, j table.PhysicalTable) int { return cmp.Compare(i.GetPhysicalID(), j.GetPhysicalID()) }) ret.kvRangeBuilder = kvRangeBuilderFromRangeAndPartition{ sctx: b.ctx, partitions: partitions, } return ret } func buildIndexRangeForEachPartition(ctx sessionctx.Context, usedPartitions []table.PhysicalTable, contentPos []int64, lookUpContent []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) (map[int64][]*ranger.Range, error) { contentBucket := make(map[int64][]*indexJoinLookUpContent) for _, p := range usedPartitions { contentBucket[p.GetPhysicalID()] = make([]*indexJoinLookUpContent, 0, 8) } for i, pos := range contentPos { if _, ok := contentBucket[pos]; ok { contentBucket[pos] = append(contentBucket[pos], lookUpContent[i]) } } nextRange := make(map[int64][]*ranger.Range) for _, p := range usedPartitions { ranges, err := buildRangesForIndexJoin(ctx, contentBucket[p.GetPhysicalID()], indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } nextRange[p.GetPhysicalID()] = ranges } return nextRange, nil } func getPartitionKeyColOffsets(keyColIDs []int64, pt table.PartitionedTable) []int { keyColOffsets := make([]int, len(keyColIDs)) for i, colID := range keyColIDs { offset := -1 for j, col := range pt.Cols() { if colID == col.ID { offset = j break } } if offset == -1 { return nil } keyColOffsets[i] = offset } t, ok := pt.(interface { PartitionExpr() *tables.PartitionExpr }) if !ok { return nil } pe := t.PartitionExpr() if pe == nil { return nil } offsetMap := make(map[int]struct{}) for _, offset := range keyColOffsets { offsetMap[offset] = struct{}{} } for _, offset := range pe.ColumnOffset { if _, ok := offsetMap[offset]; !ok { return nil } } return keyColOffsets } func (builder *dataReaderBuilder) prunePartitionForInnerExecutor(tbl table.Table, partitionInfo *plannercore.PartitionInfo, lookUpContent []*indexJoinLookUpContent) (usedPartition []table.PhysicalTable, canPrune bool, contentPos []int64, err error) { partitionTbl := tbl.(table.PartitionedTable) // In index join, this is called by multiple goroutines simultaneously, but partitionPruning is not thread-safe. // Use once.Do to avoid DATA RACE here. // TODO: condition based pruning can be do in advance. condPruneResult, err := builder.partitionPruning(partitionTbl, partitionInfo.PruningConds, partitionInfo.PartitionNames, partitionInfo.Columns, partitionInfo.ColumnNames) if err != nil { return nil, false, nil, err } // recalculate key column offsets if len(lookUpContent) == 0 { return nil, false, nil, nil } if lookUpContent[0].keyColIDs == nil { return nil, false, nil, plannercore.ErrInternal.GenWithStack("cannot get column IDs when dynamic pruning") } keyColOffsets := getPartitionKeyColOffsets(lookUpContent[0].keyColIDs, partitionTbl) if len(keyColOffsets) == 0 { return condPruneResult, false, nil, nil } locateKey := make([]types.Datum, len(partitionTbl.Cols())) partitions := make(map[int64]table.PhysicalTable) contentPos = make([]int64, len(lookUpContent)) for idx, content := range lookUpContent { for i, data := range content.keys { locateKey[keyColOffsets[i]] = data } p, err := partitionTbl.GetPartitionByRow(builder.ctx, locateKey) if table.ErrNoPartitionForGivenValue.Equal(err) { continue } if err != nil { return nil, false, nil, err } if _, ok := partitions[p.GetPhysicalID()]; !ok { partitions[p.GetPhysicalID()] = p } contentPos[idx] = p.GetPhysicalID() } usedPartition = make([]table.PhysicalTable, 0, len(partitions)) for _, p := range condPruneResult { if _, ok := partitions[p.GetPhysicalID()]; ok { usedPartition = append(usedPartition, p) } } // To make the final key ranges involving multiple partitions ordered. slices.SortFunc(usedPartition, func(i, j table.PhysicalTable) int { return cmp.Compare(i.GetPhysicalID(), j.GetPhysicalID()) }) return usedPartition, true, contentPos, nil } func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexReader) (*IndexReaderExecutor, error) { dagReq, err := builder.ConstructDAGReq(b.ctx, v.IndexPlans, kv.TiKV) if err != nil { return nil, err } is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) tbl, _ := b.is.TableByID(is.Table.ID) isPartition, physicalTableID := is.IsPartition() if isPartition { pt := tbl.(table.PartitionedTable) tbl = pt.GetPartition(physicalTableID) } else { physicalTableID = is.Table.ID } startTS, err := b.getSnapshotTS() if err != nil { return nil, err } paging := b.ctx.GetSessionVars().EnablePaging e := &IndexReaderExecutor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, netDataSize: v.GetNetDataSize(), physicalTableID: physicalTableID, table: tbl, index: is.Index, keepOrder: is.KeepOrder, desc: is.Desc, columns: is.Columns, byItems: is.ByItems, paging: paging, corColInFilter: b.corColInDistPlan(v.IndexPlans), corColInAccess: b.corColInAccess(v.IndexPlans[0]), idxCols: is.IdxCols, colLens: is.IdxColLens, plans: v.IndexPlans, outputColumns: v.OutputColumns, } for _, col := range v.OutputColumns { dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(col.Index)) } return e, nil } func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) exec.Executor { is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) if err := b.validCanReadTemporaryOrCacheTable(is.Table); err != nil { b.err = err return nil } ret, err := buildNoRangeIndexReader(b, v) if err != nil { b.err = err return nil } if ret.table.Meta().TempTableType != model.TempTableNone { ret.dummy = true } ret.ranges = is.Ranges sctx := b.ctx.GetSessionVars().StmtCtx sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { return ret } // When isPartition is set, it means the union rewriting is done, so a partition reader is preferred. if ok, _ := is.IsPartition(); ok { return ret } pi := is.Table.GetPartitionInfo() if pi == nil { return ret } if is.Index.Global { tmp, ok := b.is.TableByID(ret.table.Meta().ID) if !ok { b.err = infoschema.ErrTableNotExists return nil } tbl, ok := tmp.(table.PartitionedTable) if !ok { b.err = exeerrors.ErrBuildExecutor return nil } ret.partitionIDMap, err = getPartitionIdsAfterPruning(b.ctx, tbl, &v.PartitionInfo) if err != nil { b.err = err return nil } return ret } tmp, _ := b.is.TableByID(is.Table.ID) tbl := tmp.(table.PartitionedTable) partitions, err := partitionPruning(b.ctx, tbl, v.PartitionInfo.PruningConds, v.PartitionInfo.PartitionNames, v.PartitionInfo.Columns, v.PartitionInfo.ColumnNames) if err != nil { b.err = err return nil } ret.partitions = partitions return ret } func buildTableReq(b *executorBuilder, schemaLen int, plans []plannercore.PhysicalPlan) (dagReq *tipb.DAGRequest, val table.Table, err error) { tableReq, err := builder.ConstructDAGReq(b.ctx, plans, kv.TiKV) if err != nil { return nil, nil, err } for i := 0; i < schemaLen; i++ { tableReq.OutputOffsets = append(tableReq.OutputOffsets, uint32(i)) } ts := plans[0].(*plannercore.PhysicalTableScan) tbl, _ := b.is.TableByID(ts.Table.ID) isPartition, physicalTableID := ts.IsPartition() if isPartition { pt := tbl.(table.PartitionedTable) tbl = pt.GetPartition(physicalTableID) } return tableReq, tbl, err } // buildIndexReq is designed to create a DAG for index request. // If len(ByItems) != 0 means index request should return related columns // to sort result rows in TiDB side for parition tables. func buildIndexReq(ctx sessionctx.Context, columns []*model.IndexColumn, handleLen int, plans []plannercore.PhysicalPlan) (dagReq *tipb.DAGRequest, err error) { indexReq, err := builder.ConstructDAGReq(ctx, plans, kv.TiKV) if err != nil { return nil, err } indexReq.OutputOffsets = []uint32{} idxScan := plans[0].(*plannercore.PhysicalIndexScan) if len(idxScan.ByItems) != 0 { schema := idxScan.Schema() for _, item := range idxScan.ByItems { c, ok := item.Expr.(*expression.Column) if !ok { return nil, errors.Errorf("Not support non-column in orderBy pushed down") } find := false for i, schemaColumn := range schema.Columns { if schemaColumn.ID == c.ID { indexReq.OutputOffsets = append(indexReq.OutputOffsets, uint32(i)) find = true break } } if !find { return nil, errors.Errorf("Not found order by related columns in indexScan.schema") } } } for i := 0; i < handleLen; i++ { indexReq.OutputOffsets = append(indexReq.OutputOffsets, uint32(len(columns)+i)) } if idxScan.NeedExtraOutputCol() { // need add one more column for pid or physical table id indexReq.OutputOffsets = append(indexReq.OutputOffsets, uint32(len(columns)+handleLen)) } return indexReq, err } func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIndexLookUpReader) (*IndexLookUpExecutor, error) { is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) var handleLen int if len(v.CommonHandleCols) != 0 { handleLen = len(v.CommonHandleCols) } else { handleLen = 1 } indexReq, err := buildIndexReq(b.ctx, is.Index.Columns, handleLen, v.IndexPlans) if err != nil { return nil, err } indexPaging := false if v.Paging { indexPaging = true } tableReq, tbl, err := buildTableReq(b, v.Schema().Len(), v.TablePlans) if err != nil { return nil, err } ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) startTS, err := b.getSnapshotTS() if err != nil { return nil, err } readerBuilder, err := b.newDataReaderBuilder(nil) if err != nil { return nil, err } e := &IndexLookUpExecutor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: indexReq, startTS: startTS, table: tbl, index: is.Index, keepOrder: is.KeepOrder, byItems: is.ByItems, desc: is.Desc, tableRequest: tableReq, columns: ts.Columns, indexPaging: indexPaging, dataReaderBuilder: readerBuilder, corColInIdxSide: b.corColInDistPlan(v.IndexPlans), corColInTblSide: b.corColInDistPlan(v.TablePlans), corColInAccess: b.corColInAccess(v.IndexPlans[0]), idxCols: is.IdxCols, colLens: is.IdxColLens, idxPlans: v.IndexPlans, tblPlans: v.TablePlans, PushedLimit: v.PushedLimit, idxNetDataSize: v.GetAvgTableRowSize(), avgRowSize: v.GetAvgTableRowSize(), } if v.ExtraHandleCol != nil { e.handleIdx = append(e.handleIdx, v.ExtraHandleCol.Index) e.handleCols = []*expression.Column{v.ExtraHandleCol} } else { for _, handleCol := range v.CommonHandleCols { e.handleIdx = append(e.handleIdx, handleCol.Index) } e.handleCols = v.CommonHandleCols e.primaryKeyIndex = tables.FindPrimaryIndex(tbl.Meta()) } return e, nil } func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLookUpReader) exec.Executor { if b.Ti != nil { b.Ti.UseTableLookUp.Store(true) } is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) if err := b.validCanReadTemporaryOrCacheTable(is.Table); err != nil { b.err = err return nil } ret, err := buildNoRangeIndexLookUpReader(b, v) if err != nil { b.err = err return nil } if ret.table.Meta().TempTableType != model.TempTableNone { ret.dummy = true } ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) ret.ranges = is.Ranges executor_metrics.ExecutorCounterIndexLookUpExecutor.Inc() sctx := b.ctx.GetSessionVars().StmtCtx sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { return ret } if pi := is.Table.GetPartitionInfo(); pi == nil { return ret } if is.Index.Global || len(is.ByItems) != 0 { tmp, ok := b.is.TableByID(ts.Table.ID) if !ok { b.err = err return nil } tbl, ok := tmp.(table.PartitionedTable) if !ok { b.err = exeerrors.ErrBuildExecutor return nil } ret.partitionIDMap, err = getPartitionIdsAfterPruning(b.ctx, tbl, &v.PartitionInfo) if err != nil { b.err = err return nil } if is.Index.Global { return ret } } if ok, _ := is.IsPartition(); ok { // Already pruned when translated to logical union. return ret } tmp, _ := b.is.TableByID(is.Table.ID) tbl := tmp.(table.PartitionedTable) partitions, err := partitionPruning(b.ctx, tbl, v.PartitionInfo.PruningConds, v.PartitionInfo.PartitionNames, v.PartitionInfo.Columns, v.PartitionInfo.ColumnNames) if err != nil { b.err = err return nil } ret.partitionTableMode = true ret.prunedPartitions = partitions return ret } func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalIndexMergeReader) (*IndexMergeReaderExecutor, error) { partialPlanCount := len(v.PartialPlans) partialReqs := make([]*tipb.DAGRequest, 0, partialPlanCount) partialDataSizes := make([]float64, 0, partialPlanCount) indexes := make([]*model.IndexInfo, 0, partialPlanCount) descs := make([]bool, 0, partialPlanCount) ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) isCorColInPartialFilters := make([]bool, 0, partialPlanCount) isCorColInPartialAccess := make([]bool, 0, partialPlanCount) for i := 0; i < partialPlanCount; i++ { var tempReq *tipb.DAGRequest var err error if is, ok := v.PartialPlans[i][0].(*plannercore.PhysicalIndexScan); ok { tempReq, err = buildIndexReq(b.ctx, is.Index.Columns, ts.HandleCols.NumCols(), v.PartialPlans[i]) descs = append(descs, is.Desc) indexes = append(indexes, is.Index) } else { ts := v.PartialPlans[i][0].(*plannercore.PhysicalTableScan) tempReq, _, err = buildTableReq(b, len(ts.Columns), v.PartialPlans[i]) descs = append(descs, ts.Desc) indexes = append(indexes, nil) } if err != nil { return nil, err } collect := false tempReq.CollectRangeCounts = &collect partialReqs = append(partialReqs, tempReq) isCorColInPartialFilters = append(isCorColInPartialFilters, b.corColInDistPlan(v.PartialPlans[i])) isCorColInPartialAccess = append(isCorColInPartialAccess, b.corColInAccess(v.PartialPlans[i][0])) partialDataSizes = append(partialDataSizes, v.GetPartialReaderNetDataSize(v.PartialPlans[i][0])) } tableReq, tblInfo, err := buildTableReq(b, v.Schema().Len(), v.TablePlans) isCorColInTableFilter := b.corColInDistPlan(v.TablePlans) if err != nil { return nil, err } startTS, err := b.getSnapshotTS() if err != nil { return nil, err } readerBuilder, err := b.newDataReaderBuilder(nil) if err != nil { return nil, err } paging := b.ctx.GetSessionVars().EnablePaging e := &IndexMergeReaderExecutor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPBs: partialReqs, startTS: startTS, table: tblInfo, indexes: indexes, descs: descs, tableRequest: tableReq, columns: ts.Columns, partialPlans: v.PartialPlans, tblPlans: v.TablePlans, partialNetDataSizes: partialDataSizes, dataAvgRowSize: v.GetAvgTableRowSize(), dataReaderBuilder: readerBuilder, paging: paging, handleCols: v.HandleCols, isCorColInPartialFilters: isCorColInPartialFilters, isCorColInTableFilter: isCorColInTableFilter, isCorColInPartialAccess: isCorColInPartialAccess, isIntersection: v.IsIntersectionType, byItems: v.ByItems, pushedLimit: v.PushedLimit, keepOrder: v.KeepOrder, } collectTable := false e.tableRequest.CollectRangeCounts = &collectTable return e, nil } func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMergeReader) exec.Executor { if b.Ti != nil { b.Ti.UseIndexMerge = true b.Ti.UseTableLookUp.Store(true) } ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) if err := b.validCanReadTemporaryOrCacheTable(ts.Table); err != nil { b.err = err return nil } ret, err := buildNoRangeIndexMergeReader(b, v) if err != nil { b.err = err return nil } ret.ranges = make([][]*ranger.Range, 0, len(v.PartialPlans)) sctx := b.ctx.GetSessionVars().StmtCtx for i := 0; i < len(v.PartialPlans); i++ { if is, ok := v.PartialPlans[i][0].(*plannercore.PhysicalIndexScan); ok { ret.ranges = append(ret.ranges, is.Ranges) sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) } else { ret.ranges = append(ret.ranges, v.PartialPlans[i][0].(*plannercore.PhysicalTableScan).Ranges) if ret.table.Meta().IsCommonHandle { tblInfo := ret.table.Meta() sctx.IndexNames = append(sctx.IndexNames, tblInfo.Name.O+":"+tables.FindPrimaryIndex(tblInfo).Name.O) } } } sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) executor_metrics.ExecutorCounterIndexMergeReaderExecutor.Inc() if !b.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { return ret } if pi := ts.Table.GetPartitionInfo(); pi == nil { return ret } tmp, _ := b.is.TableByID(ts.Table.ID) partitions, err := partitionPruning(b.ctx, tmp.(table.PartitionedTable), v.PartitionInfo.PruningConds, v.PartitionInfo.PartitionNames, v.PartitionInfo.Columns, v.PartitionInfo.ColumnNames) if err != nil { b.err = err return nil } ret.partitionTableMode, ret.prunedPartitions = true, partitions return ret } // dataReaderBuilder build an executor. // The executor can be used to read data in the ranges which are constructed by datums. // Differences from executorBuilder: // 1. dataReaderBuilder calculate data range from argument, rather than plan. // 2. the result executor is already opened. type dataReaderBuilder struct { plannercore.Plan *executorBuilder selectResultHook // for testing once struct { sync.Once condPruneResult []table.PhysicalTable err error } } type mockPhysicalIndexReader struct { plannercore.PhysicalPlan e exec.Executor } // MemoryUsage of mockPhysicalIndexReader is only for testing func (*mockPhysicalIndexReader) MemoryUsage() (sum int64) { return } func (builder *dataReaderBuilder) buildExecutorForIndexJoin(ctx context.Context, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) } func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context.Context, plan plannercore.Plan, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { switch v := plan.(type) { case *plannercore.PhysicalTableReader: return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) case *plannercore.PhysicalIndexReader: return builder.buildIndexReaderForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) case *plannercore.PhysicalIndexLookUpReader: return builder.buildIndexLookUpReaderForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) case *plannercore.PhysicalUnionScan: return builder.buildUnionScanForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) // The inner child of IndexJoin might be Projection when a combination of the following conditions is true: // 1. The inner child fetch data using indexLookupReader // 2. PK is not handle // 3. The inner child needs to keep order // In this case, an extra column tidb_rowid will be appended in the output result of IndexLookupReader(see copTask.doubleReadNeedProj). // Then we need a Projection upon IndexLookupReader to prune the redundant column. case *plannercore.PhysicalProjection: return builder.buildProjectionForIndexJoin(ctx, v, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) // Need to support physical selection because after PR 16389, TiDB will push down all the expr supported by TiKV or TiFlash // in predicate push down stage, so if there is an expr which only supported by TiFlash, a physical selection will be added after index read case *plannercore.PhysicalSelection: childExec, err := builder.buildExecutorForIndexJoinInternal(ctx, v.Children()[0], lookUpContents, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) if err != nil { return nil, err } exec := &SelectionExec{ BaseExecutor: exec.NewBaseExecutor(builder.ctx, v.Schema(), v.ID(), childExec), filters: v.Conditions, } err = exec.open(ctx) return exec, err case *mockPhysicalIndexReader: return v.e, nil } return nil, errors.New("Wrong plan type for dataReaderBuilder") } func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context, v *plannercore.PhysicalUnionScan, values []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { childBuilder, err := builder.newDataReaderBuilder(v.Children()[0]) if err != nil { return nil, err } reader, err := childBuilder.buildExecutorForIndexJoin(ctx, values, indexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) if err != nil { return nil, err } ret := builder.buildUnionScanFromReader(reader, v) if us, ok := ret.(*UnionScanExec); ok { err = us.open(ctx) } return ret, err } func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalTableReader, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { e, err := buildNoRangeTableReader(builder.executorBuilder, v) if !canReorderHandles { // `canReorderHandles` is set to false only in IndexMergeJoin. IndexMergeJoin will trigger a dead loop problem // when enabling paging(tidb/issues/35831). But IndexMergeJoin is not visible to the user and is deprecated // for now. Thus, we disable paging here. e.paging = false } if err != nil { return nil, err } tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { if v.IsCommonHandle { kvRanges, err := buildKvRangesForIndexJoin(e.Ctx(), getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } return builder.buildTableReaderFromKvRanges(ctx, e, kvRanges) } handles, _ := dedupHandles(lookUpContents) return builder.buildTableReaderFromHandles(ctx, e, handles, canReorderHandles) } tbl, _ := builder.is.TableByID(tbInfo.ID) pt := tbl.(table.PartitionedTable) partitionInfo := &v.PartitionInfo usedPartitionList, err := builder.partitionPruning(pt, partitionInfo.PruningConds, partitionInfo.PartitionNames, partitionInfo.Columns, partitionInfo.ColumnNames) if err != nil { return nil, err } usedPartitions := make(map[int64]table.PhysicalTable, len(usedPartitionList)) for _, p := range usedPartitionList { usedPartitions[p.GetPhysicalID()] = p } var kvRanges []kv.KeyRange var keyColOffsets []int if len(lookUpContents) > 0 { keyColOffsets = getPartitionKeyColOffsets(lookUpContents[0].keyColIDs, pt) } if v.IsCommonHandle { if len(keyColOffsets) > 0 { locateKey := make([]types.Datum, len(pt.Cols())) kvRanges = make([]kv.KeyRange, 0, len(lookUpContents)) // lookUpContentsByPID groups lookUpContents by pid(partition) so that kv ranges for same partition can be merged. lookUpContentsByPID := make(map[int64][]*indexJoinLookUpContent) for _, content := range lookUpContents { for i, data := range content.keys { locateKey[keyColOffsets[i]] = data } p, err := pt.GetPartitionByRow(e.Ctx(), locateKey) if table.ErrNoPartitionForGivenValue.Equal(err) { continue } if err != nil { return nil, err } pid := p.GetPhysicalID() if _, ok := usedPartitions[pid]; !ok { continue } lookUpContentsByPID[pid] = append(lookUpContentsByPID[pid], content) } for pid, contents := range lookUpContentsByPID { // buildKvRanges for each partition. tmp, err := buildKvRangesForIndexJoin(e.Ctx(), pid, -1, contents, indexRanges, keyOff2IdxOff, cwc, nil, interruptSignal) if err != nil { return nil, err } kvRanges = append(kvRanges, tmp...) } } else { kvRanges = make([]kv.KeyRange, 0, len(usedPartitions)*len(lookUpContents)) for _, p := range usedPartitionList { tmp, err := buildKvRangesForIndexJoin(e.Ctx(), p.GetPhysicalID(), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } kvRanges = append(tmp, kvRanges...) } } // The key ranges should be ordered. slices.SortFunc(kvRanges, func(i, j kv.KeyRange) int { return bytes.Compare(i.StartKey, j.StartKey) }) return builder.buildTableReaderFromKvRanges(ctx, e, kvRanges) } handles, lookUpContents := dedupHandles(lookUpContents) if len(keyColOffsets) > 0 { locateKey := make([]types.Datum, len(pt.Cols())) kvRanges = make([]kv.KeyRange, 0, len(lookUpContents)) for _, content := range lookUpContents { for i, data := range content.keys { locateKey[keyColOffsets[i]] = data } p, err := pt.GetPartitionByRow(e.Ctx(), locateKey) if table.ErrNoPartitionForGivenValue.Equal(err) { continue } if err != nil { return nil, err } pid := p.GetPhysicalID() if _, ok := usedPartitions[pid]; !ok { continue } handle := kv.IntHandle(content.keys[0].GetInt64()) ranges, _ := distsql.TableHandlesToKVRanges(pid, []kv.Handle{handle}) kvRanges = append(kvRanges, ranges...) } } else { for _, p := range usedPartitionList { ranges, _ := distsql.TableHandlesToKVRanges(p.GetPhysicalID(), handles) kvRanges = append(kvRanges, ranges...) } } // The key ranges should be ordered. slices.SortFunc(kvRanges, func(i, j kv.KeyRange) int { return bytes.Compare(i.StartKey, j.StartKey) }) return builder.buildTableReaderFromKvRanges(ctx, e, kvRanges) } func dedupHandles(lookUpContents []*indexJoinLookUpContent) ([]kv.Handle, []*indexJoinLookUpContent) { handles := make([]kv.Handle, 0, len(lookUpContents)) validLookUpContents := make([]*indexJoinLookUpContent, 0, len(lookUpContents)) for _, content := range lookUpContents { isValidHandle := true handle := kv.IntHandle(content.keys[0].GetInt64()) for _, key := range content.keys { if handle.IntValue() != key.GetInt64() { isValidHandle = false break } } if isValidHandle { handles = append(handles, handle) validLookUpContents = append(validLookUpContents, content) } } return handles, validLookUpContents } type kvRangeBuilderFromRangeAndPartition struct { sctx sessionctx.Context partitions []table.PhysicalTable } func (h kvRangeBuilderFromRangeAndPartition) buildKeyRangeSeparately(ranges []*ranger.Range) ([]int64, [][]kv.KeyRange, error) { ret := make([][]kv.KeyRange, len(h.partitions)) pids := make([]int64, 0, len(h.partitions)) for i, p := range h.partitions { pid := p.GetPhysicalID() pids = append(pids, pid) meta := p.Meta() if len(ranges) == 0 { continue } kvRange, err := distsql.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges) if err != nil { return nil, nil, err } ret[i] = kvRange.AppendSelfTo(ret[i]) } return pids, ret, nil } func (h kvRangeBuilderFromRangeAndPartition) buildKeyRange(ranges []*ranger.Range) ([][]kv.KeyRange, error) { ret := make([][]kv.KeyRange, len(h.partitions)) if len(ranges) == 0 { return ret, nil } for i, p := range h.partitions { pid := p.GetPhysicalID() meta := p.Meta() kvRange, err := distsql.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges) if err != nil { return nil, err } ret[i] = kvRange.AppendSelfTo(ret[i]) } return ret, nil } // newClosestReadAdjuster let the request be sent to closest replica(within the same zone) // if response size exceeds certain threshold. func newClosestReadAdjuster(ctx sessionctx.Context, req *kv.Request, netDataSize float64) kv.CoprRequestAdjuster { if req.ReplicaRead != kv.ReplicaReadClosestAdaptive { return nil } return func(req *kv.Request, copTaskCount int) bool { // copTaskCount is the number of coprocessor requests if int64(netDataSize/float64(copTaskCount)) >= ctx.GetSessionVars().ReplicaClosestReadThreshold { req.MatchStoreLabels = append(req.MatchStoreLabels, &metapb.StoreLabel{ Key: placement.DCLabelKey, Value: config.GetTxnScopeFromConfig(), }) return true } // reset to read from leader when the data size is small. req.ReplicaRead = kv.ReplicaReadLeader return false } } func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *TableReaderExecutor, reqBuilderWithRange distsql.RequestBuilder) (*TableReaderExecutor, error) { startTS, err := builder.getSnapshotTS() if err != nil { return nil, err } kvReq, err := reqBuilderWithRange. SetDAGRequest(e.dagPB). SetStartTS(startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.Ctx().GetSessionVars()). SetFromInfoSchema(e.Ctx().GetInfoSchema()). SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &reqBuilderWithRange.Request, e.netDataSize)). SetPaging(e.paging). SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return nil, err } e.kvRanges = kvReq.KeyRanges.AppendSelfTo(e.kvRanges) e.resultHandler = &tableResultHandler{} result, err := builder.SelectResult(ctx, builder.ctx, kvReq, retTypes(e), getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { return nil, err } e.resultHandler.open(nil, result) return e, nil } func (builder *dataReaderBuilder) buildTableReaderFromHandles(ctx context.Context, e *TableReaderExecutor, handles []kv.Handle, canReorderHandles bool) (*TableReaderExecutor, error) { if canReorderHandles { slices.SortFunc(handles, func(i, j kv.Handle) int { return i.Compare(j) }) } var b distsql.RequestBuilder if len(handles) > 0 { if _, ok := handles[0].(kv.PartitionHandle); ok { b.SetPartitionsAndHandles(handles) } else { b.SetTableHandles(getPhysicalTableID(e.table), handles) } } else { b.SetKeyRanges(nil) } return builder.buildTableReaderBase(ctx, e, b) } func (builder *dataReaderBuilder) buildTableReaderFromKvRanges(ctx context.Context, e *TableReaderExecutor, ranges []kv.KeyRange) (exec.Executor, error) { var b distsql.RequestBuilder b.SetKeyRanges(ranges) return builder.buildTableReaderBase(ctx, e, b) } func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexReader, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memoryTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { e, err := buildNoRangeIndexReader(builder.executorBuilder, v) if err != nil { return nil, err } tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { kvRanges, err := buildKvRangesForIndexJoin(e.Ctx(), e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memoryTracker, interruptSignal) if err != nil { return nil, err } err = e.open(ctx, kvRanges) return e, err } is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) if is.Index.Global { tmp, ok := builder.is.TableByID(tbInfo.ID) if !ok { return nil, infoschema.ErrTableNotExists } tbl, ok := tmp.(table.PartitionedTable) if !ok { return nil, exeerrors.ErrBuildExecutor } e.partitionIDMap, err = getPartitionIdsAfterPruning(builder.ctx, tbl, &v.PartitionInfo) if err != nil { return nil, err } if e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { return nil, err } if err := e.Open(ctx); err != nil { return nil, err } return e, nil } tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) usedPartition, canPrune, contentPos, err := builder.prunePartitionForInnerExecutor(tbl, &v.PartitionInfo, lookUpContents) if err != nil { return nil, err } if len(usedPartition) != 0 { if canPrune { rangeMap, err := buildIndexRangeForEachPartition(e.Ctx(), usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } e.partitions = usedPartition e.ranges = indexRanges e.partRangeMap = rangeMap } else { e.partitions = usedPartition if e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { return nil, err } } if err := e.Open(ctx); err != nil { return nil, err } return e, nil } ret := &TableDualExec{BaseExecutor: *e.Base()} err = ret.Open(ctx) return ret, err } func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexLookUpReader, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { if builder.Ti != nil { builder.Ti.UseTableLookUp.Store(true) } e, err := buildNoRangeIndexLookUpReader(builder.executorBuilder, v) if err != nil { return nil, err } tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { e.kvRanges, err = buildKvRangesForIndexJoin(e.Ctx(), getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } err = e.open(ctx) return e, err } is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) if is.Index.Global { tmp, ok := builder.is.TableByID(ts.Table.ID) if !ok { return nil, infoschema.ErrTableNotExists } tbl, ok := tmp.(table.PartitionedTable) if !ok { return nil, exeerrors.ErrBuildExecutor } e.partitionIDMap, err = getPartitionIdsAfterPruning(builder.ctx, tbl, &v.PartitionInfo) if err != nil { return nil, err } e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } if err := e.Open(ctx); err != nil { return nil, err } return e, err } tbl, _ := builder.executorBuilder.is.TableByID(tbInfo.ID) usedPartition, canPrune, contentPos, err := builder.prunePartitionForInnerExecutor(tbl, &v.PartitionInfo, lookUpContents) if err != nil { return nil, err } if len(usedPartition) != 0 { if canPrune { rangeMap, err := buildIndexRangeForEachPartition(e.Ctx(), usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } e.prunedPartitions = usedPartition e.ranges = indexRanges e.partitionRangeMap = rangeMap } else { e.prunedPartitions = usedPartition e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } } e.partitionTableMode = true if err := e.Open(ctx); err != nil { return nil, err } return e, err } ret := &TableDualExec{BaseExecutor: *e.Base()} err = ret.Open(ctx) return ret, err } func (builder *dataReaderBuilder) buildProjectionForIndexJoin(ctx context.Context, v *plannercore.PhysicalProjection, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { var ( childExec exec.Executor err error ) switch op := v.Children()[0].(type) { case *plannercore.PhysicalIndexLookUpReader: if childExec, err = builder.buildIndexLookUpReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal); err != nil { return nil, err } case *plannercore.PhysicalTableReader: if childExec, err = builder.buildTableReaderForIndexJoin(ctx, op, lookUpContents, indexRanges, keyOff2IdxOff, cwc, true, memTracker, interruptSignal); err != nil { return nil, err } default: return nil, errors.Errorf("inner child of Projection should be IndexLookupReader/TableReader, but got %T", v.Children()[0]) } e := &ProjectionExec{ BaseExecutor: exec.NewBaseExecutor(builder.ctx, v.Schema(), v.ID(), childExec), numWorkers: int64(builder.ctx.GetSessionVars().ProjectionConcurrency()), evaluatorSuit: expression.NewEvaluatorSuite(v.Exprs, v.AvoidColumnEvaluator), calculateNoDelay: v.CalculateNoDelay, } // If the calculation row count for this Projection operator is smaller // than a Chunk size, we turn back to the un-parallel Projection // implementation to reduce the goroutine overhead. if int64(v.StatsCount()) < int64(builder.ctx.GetSessionVars().MaxChunkSize) { e.numWorkers = 0 } err = e.open(ctx) return e, err } // buildRangesForIndexJoin builds kv ranges for index join when the inner plan is index scan plan. func buildRangesForIndexJoin(ctx sessionctx.Context, lookUpContents []*indexJoinLookUpContent, ranges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager) ([]*ranger.Range, error) { retRanges := make([]*ranger.Range, 0, len(ranges)*len(lookUpContents)) lastPos := len(ranges[0].LowVal) - 1 tmpDatumRanges := make([]*ranger.Range, 0, len(lookUpContents)) for _, content := range lookUpContents { for _, ran := range ranges { for keyOff, idxOff := range keyOff2IdxOff { ran.LowVal[idxOff] = content.keys[keyOff] ran.HighVal[idxOff] = content.keys[keyOff] } } if cwc == nil { // A deep copy is need here because the old []*range.Range is overwriten for _, ran := range ranges { retRanges = append(retRanges, ran.Clone()) } continue } nextColRanges, err := cwc.BuildRangesByRow(ctx, content.row) if err != nil { return nil, err } for _, nextColRan := range nextColRanges { for _, ran := range ranges { ran.LowVal[lastPos] = nextColRan.LowVal[0] ran.HighVal[lastPos] = nextColRan.HighVal[0] ran.LowExclude = nextColRan.LowExclude ran.HighExclude = nextColRan.HighExclude ran.Collators = nextColRan.Collators tmpDatumRanges = append(tmpDatumRanges, ran.Clone()) } } } if cwc == nil { return retRanges, nil } return ranger.UnionRanges(ctx, tmpDatumRanges, true) } // buildKvRangesForIndexJoin builds kv ranges for index join when the inner plan is index scan plan. func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, lookUpContents []*indexJoinLookUpContent, ranges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (_ []kv.KeyRange, err error) { kvRanges := make([]kv.KeyRange, 0, len(ranges)*len(lookUpContents)) if len(ranges) == 0 { return []kv.KeyRange{}, nil } lastPos := len(ranges[0].LowVal) - 1 sc := ctx.GetSessionVars().StmtCtx tmpDatumRanges := make([]*ranger.Range, 0, len(lookUpContents)) for _, content := range lookUpContents { for _, ran := range ranges { for keyOff, idxOff := range keyOff2IdxOff { ran.LowVal[idxOff] = content.keys[keyOff] ran.HighVal[idxOff] = content.keys[keyOff] } } if cwc == nil { // Index id is -1 means it's a common handle. var tmpKvRanges *kv.KeyRanges var err error if indexID == -1 { tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) } else { tmpKvRanges, err = distsql.IndexRangesToKVRangesWithInterruptSignal(sc, tableID, indexID, ranges, memTracker, interruptSignal) } if err != nil { return nil, err } kvRanges = tmpKvRanges.AppendSelfTo(kvRanges) continue } nextColRanges, err := cwc.BuildRangesByRow(ctx, content.row) if err != nil { return nil, err } for _, nextColRan := range nextColRanges { for _, ran := range ranges { ran.LowVal[lastPos] = nextColRan.LowVal[0] ran.HighVal[lastPos] = nextColRan.HighVal[0] ran.LowExclude = nextColRan.LowExclude ran.HighExclude = nextColRan.HighExclude ran.Collators = nextColRan.Collators tmpDatumRanges = append(tmpDatumRanges, ran.Clone()) } } } if len(kvRanges) != 0 && memTracker != nil { memTracker.Consume(int64(2 * cap(kvRanges[0].StartKey) * len(kvRanges))) } if len(tmpDatumRanges) != 0 && memTracker != nil { memTracker.Consume(2 * int64(len(tmpDatumRanges)) * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges))) } if cwc == nil { slices.SortFunc(kvRanges, func(i, j kv.KeyRange) int { return bytes.Compare(i.StartKey, j.StartKey) }) return kvRanges, nil } tmpDatumRanges, err = ranger.UnionRanges(ctx, tmpDatumRanges, true) if err != nil { return nil, err } // Index id is -1 means it's a common handle. if indexID == -1 { tmpKeyRanges, err := distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) return tmpKeyRanges.FirstPartitionRange(), err } tmpKeyRanges, err := distsql.IndexRangesToKVRangesWithInterruptSignal(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, memTracker, interruptSignal) return tmpKeyRanges.FirstPartitionRange(), err } func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) groupByItems := make([]expression.Expression, 0, len(v.PartitionBy)) for _, item := range v.PartitionBy { groupByItems = append(groupByItems, item.Col) } orderByCols := make([]*expression.Column, 0, len(v.OrderBy)) for _, item := range v.OrderBy { orderByCols = append(orderByCols, item.Col) } windowFuncs := make([]aggfuncs.AggFunc, 0, len(v.WindowFuncDescs)) partialResults := make([]aggfuncs.PartialResult, 0, len(v.WindowFuncDescs)) resultColIdx := v.Schema().Len() - len(v.WindowFuncDescs) for _, desc := range v.WindowFuncDescs { aggDesc, err := aggregation.NewAggFuncDescForWindowFunc(b.ctx, desc, false) if err != nil { b.err = err return nil } agg := aggfuncs.BuildWindowFunctions(b.ctx, aggDesc, resultColIdx, orderByCols) windowFuncs = append(windowFuncs, agg) partialResult, _ := agg.AllocPartialResult() partialResults = append(partialResults, partialResult) resultColIdx++ } if b.ctx.GetSessionVars().EnablePipelinedWindowExec { exec := &PipelinedWindowExec{ BaseExecutor: base, groupChecker: vecgroupchecker.NewVecGroupChecker(b.ctx, groupByItems), numWindowFuncs: len(v.WindowFuncDescs), } exec.windowFuncs = windowFuncs exec.partialResults = partialResults if v.Frame == nil { exec.start = &plannercore.FrameBound{ Type: ast.Preceding, UnBounded: true, } exec.end = &plannercore.FrameBound{ Type: ast.Following, UnBounded: true, } } else { exec.start = v.Frame.Start exec.end = v.Frame.End if v.Frame.Type == ast.Ranges { cmpResult := int64(-1) if len(v.OrderBy) > 0 && v.OrderBy[0].Desc { cmpResult = 1 } exec.orderByCols = orderByCols exec.expectedCmpResult = cmpResult exec.isRangeFrame = true } } return exec } var processor windowProcessor if v.Frame == nil { processor = &aggWindowProcessor{ windowFuncs: windowFuncs, partialResults: partialResults, } } else if v.Frame.Type == ast.Rows { processor = &rowFrameWindowProcessor{ windowFuncs: windowFuncs, partialResults: partialResults, start: v.Frame.Start, end: v.Frame.End, } } else { cmpResult := int64(-1) if len(v.OrderBy) > 0 && v.OrderBy[0].Desc { cmpResult = 1 } processor = &rangeFrameWindowProcessor{ windowFuncs: windowFuncs, partialResults: partialResults, start: v.Frame.Start, end: v.Frame.End, orderByCols: orderByCols, expectedCmpResult: cmpResult, } } return &WindowExec{BaseExecutor: base, processor: processor, groupChecker: vecgroupchecker.NewVecGroupChecker(b.ctx, groupByItems), numWindowFuncs: len(v.WindowFuncDescs), } } func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleExec { base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) shuffle := &ShuffleExec{ BaseExecutor: base, concurrency: v.Concurrency, } // 1. initialize the splitters splitters := make([]partitionSplitter, len(v.ByItemArrays)) switch v.SplitterType { case plannercore.PartitionHashSplitterType: for i, byItems := range v.ByItemArrays { splitters[i] = buildPartitionHashSplitter(shuffle.concurrency, byItems) } case plannercore.PartitionRangeSplitterType: for i, byItems := range v.ByItemArrays { splitters[i] = buildPartitionRangeSplitter(b.ctx, shuffle.concurrency, byItems) } default: panic("Not implemented. Should not reach here.") } shuffle.splitters = splitters // 2. initialize the data sources (build the data sources from physical plan to executors) shuffle.dataSources = make([]exec.Executor, len(v.DataSources)) for i, dataSource := range v.DataSources { shuffle.dataSources[i] = b.build(dataSource) if b.err != nil { return nil } } // 3. initialize the workers head := v.Children()[0] // A `PhysicalShuffleReceiverStub` for every worker have the same `DataSource` but different `Receiver`. // We preallocate `PhysicalShuffleReceiverStub`s here and reuse them below. stubs := make([]*plannercore.PhysicalShuffleReceiverStub, 0, len(v.DataSources)) for _, dataSource := range v.DataSources { stub := plannercore.PhysicalShuffleReceiverStub{ DataSource: dataSource, }.Init(b.ctx, dataSource.StatsInfo(), dataSource.SelectBlockOffset(), nil) stub.SetSchema(dataSource.Schema()) stubs = append(stubs, stub) } shuffle.workers = make([]*shuffleWorker, shuffle.concurrency) for i := range shuffle.workers { receivers := make([]*shuffleReceiver, len(v.DataSources)) for j, dataSource := range v.DataSources { receivers[j] = &shuffleReceiver{ BaseExecutor: exec.NewBaseExecutor(b.ctx, dataSource.Schema(), stubs[j].ID()), } } w := &shuffleWorker{ receivers: receivers, } for j := range v.DataSources { stub := stubs[j] stub.Receiver = (unsafe.Pointer)(receivers[j]) v.Tails[j].SetChildren(stub) } w.childExec = b.build(head) if b.err != nil { return nil } shuffle.workers[i] = w } return shuffle } func (*executorBuilder) buildShuffleReceiverStub(v *plannercore.PhysicalShuffleReceiverStub) *shuffleReceiver { return (*shuffleReceiver)(v.Receiver) } func (b *executorBuilder) buildSQLBindExec(v *plannercore.SQLBindPlan) exec.Executor { base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) base.SetInitCap(chunk.ZeroCapacity) e := &SQLBindExec{ BaseExecutor: base, sqlBindOp: v.SQLBindOp, normdOrigSQL: v.NormdOrigSQL, bindSQL: v.BindSQL, charset: v.Charset, collation: v.Collation, db: v.Db, isGlobal: v.IsGlobal, bindAst: v.BindStmt, newStatus: v.NewStatus, source: v.Source, sqlDigest: v.SQLDigest, planDigest: v.PlanDigest, } return e } // NewRowDecoder creates a chunk decoder for new row format row value decode. func NewRowDecoder(ctx sessionctx.Context, schema *expression.Schema, tbl *model.TableInfo) *rowcodec.ChunkDecoder { getColInfoByID := func(tbl *model.TableInfo, colID int64) *model.ColumnInfo { for _, col := range tbl.Columns { if col.ID == colID { return col } } return nil } var pkCols []int64 reqCols := make([]rowcodec.ColInfo, len(schema.Columns)) for i := range schema.Columns { idx, col := i, schema.Columns[i] isPK := (tbl.PKIsHandle && mysql.HasPriKeyFlag(col.RetType.GetFlag())) || col.ID == model.ExtraHandleID if isPK { pkCols = append(pkCols, col.ID) } isGeneratedCol := false if col.VirtualExpr != nil { isGeneratedCol = true } reqCols[idx] = rowcodec.ColInfo{ ID: col.ID, VirtualGenCol: isGeneratedCol, Ft: col.RetType, } } if len(pkCols) == 0 { pkCols = tables.TryGetCommonPkColumnIds(tbl) if len(pkCols) == 0 { pkCols = []int64{-1} } } defVal := func(i int, chk *chunk.Chunk) error { if reqCols[i].ID < 0 { // model.ExtraHandleID, ExtraPidColID, ExtraPhysTblID... etc // Don't set the default value for that column. chk.AppendNull(i) return nil } ci := getColInfoByID(tbl, reqCols[i].ID) d, err := table.GetColOriginDefaultValue(ctx, ci) if err != nil { return err } chk.AppendDatum(i, &d) return nil } return rowcodec.NewChunkDecoder(reqCols, pkCols, defVal, ctx.GetSessionVars().Location()) } func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan) exec.Executor { var err error if err = b.validCanReadTemporaryOrCacheTable(plan.TblInfo); err != nil { b.err = err return nil } if plan.Lock && !b.inSelectLockStmt { b.inSelectLockStmt = true defer func() { b.inSelectLockStmt = false }() } decoder := NewRowDecoder(b.ctx, plan.Schema(), plan.TblInfo) e := &BatchPointGetExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, plan.Schema(), plan.ID()), tblInfo: plan.TblInfo, idxInfo: plan.IndexInfo, rowDecoder: decoder, keepOrder: plan.KeepOrder, desc: plan.Desc, lock: plan.Lock, waitTime: plan.LockWaitTime, partExpr: plan.PartitionExpr, partPos: plan.PartitionColPos, planPhysIDs: plan.PartitionIDs, singlePart: plan.SinglePart, partTblID: plan.PartTblID, columns: plan.Columns, } e.snapshot, err = b.getSnapshot() if err != nil { b.err = err return nil } if e.Ctx().GetSessionVars().IsReplicaReadClosestAdaptive() { e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.Ctx(), plan.GetAvgRowSize())) } if e.RuntimeStats() != nil { snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} e.stats = &runtimeStatsWithSnapshot{ SnapshotRuntimeStats: snapshotStats, } e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) } if plan.IndexInfo != nil { sctx := b.ctx.GetSessionVars().StmtCtx sctx.IndexNames = append(sctx.IndexNames, plan.TblInfo.Name.O+":"+plan.IndexInfo.Name.O) } failpoint.Inject("assertBatchPointReplicaOption", func(val failpoint.Value) { assertScope := val.(string) if e.Ctx().GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != b.readReplicaScope { panic("batch point get replica option fail") } }) snapshotTS, err := b.getSnapshotTS() if err != nil { b.err = err return nil } if plan.TblInfo.TableCacheStatusType == model.TableCacheStatusEnable { if cacheTable := b.getCacheTable(plan.TblInfo, snapshotTS); cacheTable != nil { e.snapshot = cacheTableSnapshot{e.snapshot, cacheTable} } } if plan.TblInfo.TempTableType != model.TempTableNone { // Temporary table should not do any lock operations e.lock = false e.waitTime = 0 } if e.lock { b.hasLock = true } var capacity int if plan.IndexInfo != nil && !isCommonHandleRead(plan.TblInfo, plan.IndexInfo) { e.idxVals = plan.IndexValues capacity = len(e.idxVals) } else { // `SELECT a FROM t WHERE a IN (1, 1, 2, 1, 2)` should not return duplicated rows handles := make([]kv.Handle, 0, len(plan.Handles)) dedup := kv.NewHandleMap() if plan.IndexInfo == nil { for _, handle := range plan.Handles { if _, found := dedup.Get(handle); found { continue } dedup.Set(handle, true) handles = append(handles, handle) } } else { for _, value := range plan.IndexValues { if datumsContainNull(value) { continue } handleBytes, err := EncodeUniqueIndexValuesForKey(e.Ctx(), e.tblInfo, plan.IndexInfo, value) if err != nil { if kv.ErrNotExist.Equal(err) { continue } b.err = err return nil } handle, err := kv.NewCommonHandle(handleBytes) if err != nil { b.err = err return nil } if _, found := dedup.Get(handle); found { continue } dedup.Set(handle, true) handles = append(handles, handle) } } e.handles = handles capacity = len(e.handles) } e.Base().SetInitCap(capacity) e.Base().SetMaxChunkSize(capacity) e.buildVirtualColumnInfo() return e } func newReplicaReadAdjuster(ctx sessionctx.Context, avgRowSize float64) txnkv.ReplicaReadAdjuster { return func(count int) (tikv.StoreSelectorOption, clientkv.ReplicaReadType) { if int64(avgRowSize*float64(count)) >= ctx.GetSessionVars().ReplicaClosestReadThreshold { return tikv.WithMatchLabels([]*metapb.StoreLabel{ { Key: placement.DCLabelKey, Value: config.GetTxnScopeFromConfig(), }, }), clientkv.ReplicaReadMixed } // fallback to read from leader if the request is small return nil, clientkv.ReplicaReadLeader } } func isCommonHandleRead(tbl *model.TableInfo, idx *model.IndexInfo) bool { return tbl.IsCommonHandle && idx.Primary } func getPhysicalTableID(t table.Table) int64 { if p, ok := t.(table.PhysicalTable); ok { return p.GetPhysicalID() } return t.Meta().ID } func (b *executorBuilder) buildAdminShowTelemetry(v *plannercore.AdminShowTelemetry) exec.Executor { return &AdminShowTelemetryExec{BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID())} } func (b *executorBuilder) buildAdminResetTelemetryID(v *plannercore.AdminResetTelemetryID) exec.Executor { return &AdminResetTelemetryIDExec{BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID())} } func (builder *dataReaderBuilder) partitionPruning(tbl table.PartitionedTable, conds []expression.Expression, partitionNames []model.CIStr, columns []*expression.Column, columnNames types.NameSlice) ([]table.PhysicalTable, error) { builder.once.Do(func() { condPruneResult, err := partitionPruning(builder.executorBuilder.ctx, tbl, conds, partitionNames, columns, columnNames) builder.once.condPruneResult = condPruneResult builder.once.err = err }) return builder.once.condPruneResult, builder.once.err } func partitionPruning(ctx sessionctx.Context, tbl table.PartitionedTable, conds []expression.Expression, partitionNames []model.CIStr, columns []*expression.Column, columnNames types.NameSlice) ([]table.PhysicalTable, error) { idxArr, err := plannercore.PartitionPruning(ctx, tbl, conds, partitionNames, columns, columnNames) if err != nil { return nil, err } pi := tbl.Meta().GetPartitionInfo() var ret []table.PhysicalTable if fullRangePartition(idxArr) { ret = make([]table.PhysicalTable, 0, len(pi.Definitions)) for _, def := range pi.Definitions { p := tbl.GetPartition(def.ID) ret = append(ret, p) } } else { ret = make([]table.PhysicalTable, 0, len(idxArr)) for _, idx := range idxArr { pid := pi.Definitions[idx].ID p := tbl.GetPartition(pid) ret = append(ret, p) } } return ret, nil } func getPartitionIdsAfterPruning(ctx sessionctx.Context, tbl table.PartitionedTable, partInfo *plannercore.PartitionInfo) (map[int64]struct{}, error) { if partInfo == nil { return nil, errors.New("partInfo in getPartitionIdsAfterPruning must not be nil") } idxArr, err := plannercore.PartitionPruning(ctx, tbl, partInfo.PruningConds, partInfo.PartitionNames, partInfo.Columns, partInfo.ColumnNames) if err != nil { return nil, err } var ret map[int64]struct{} pi := tbl.Meta().GetPartitionInfo() if fullRangePartition(idxArr) { ret = make(map[int64]struct{}, len(pi.Definitions)) for _, def := range pi.Definitions { ret[def.ID] = struct{}{} } } else { ret = make(map[int64]struct{}, len(idxArr)) for _, idx := range idxArr { pid := pi.Definitions[idx].ID ret[pid] = struct{}{} } } return ret, nil } func fullRangePartition(idxArr []int) bool { return len(idxArr) == 1 && idxArr[0] == plannercore.FullRange } type emptySampler struct{} func (*emptySampler) writeChunk(_ *chunk.Chunk) error { return nil } func (*emptySampler) finished() bool { return true } func (b *executorBuilder) buildTableSample(v *plannercore.PhysicalTableSample) *TableSampleExecutor { startTS, err := b.getSnapshotTS() if err != nil { b.err = err return nil } e := &TableSampleExecutor{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.TableInfo, startTS: startTS, } tblInfo := v.TableInfo.Meta() if tblInfo.TempTableType != model.TempTableNone { if tblInfo.TempTableType != model.TempTableGlobal { b.err = errors.New("TABLESAMPLE clause can not be applied to local temporary tables") return nil } e.sampler = &emptySampler{} } else if v.TableSampleInfo.AstNode.SampleMethod == ast.SampleMethodTypeTiDBRegion { e.sampler = newTableRegionSampler( b.ctx, v.TableInfo, startTS, v.TableSampleInfo.Partitions, v.Schema(), v.TableSampleInfo.FullSchema, e.RetFieldTypes(), v.Desc) } return e } func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) exec.Executor { if b.Ti != nil { b.Ti.UseNonRecursive = true } if v.RecurPlan != nil && b.Ti != nil { b.Ti.UseRecursive = true } storageMap, ok := b.ctx.GetSessionVars().StmtCtx.CTEStorageMap.(map[int]*CTEStorages) if !ok { b.err = errors.New("type assertion for CTEStorageMap failed") return nil } chkSize := b.ctx.GetSessionVars().MaxChunkSize // iterOutTbl will be constructed in CTEExec.Open(). var resTbl cteutil.Storage var iterInTbl cteutil.Storage var producer *cteProducer storages, ok := storageMap[v.CTE.IDForStorage] if ok { // Storage already setup. resTbl = storages.ResTbl iterInTbl = storages.IterInTbl producer = storages.Producer } else { if v.SeedPlan == nil { b.err = errors.New("cte.seedPlan cannot be nil") return nil } // Build seed part. corCols := plannercore.ExtractOuterApplyCorrelatedCols(v.SeedPlan) seedExec := b.build(v.SeedPlan) if b.err != nil { return nil } // Setup storages. tps := seedExec.Base().RetFieldTypes() resTbl = cteutil.NewStorageRowContainer(tps, chkSize) if err := resTbl.OpenAndRef(); err != nil { b.err = err return nil } iterInTbl = cteutil.NewStorageRowContainer(tps, chkSize) if err := iterInTbl.OpenAndRef(); err != nil { b.err = err return nil } storageMap[v.CTE.IDForStorage] = &CTEStorages{ResTbl: resTbl, IterInTbl: iterInTbl} // Build recursive part. var recursiveExec exec.Executor if v.RecurPlan != nil { recursiveExec = b.build(v.RecurPlan) if b.err != nil { return nil } corCols = append(corCols, plannercore.ExtractOuterApplyCorrelatedCols(v.RecurPlan)...) } var sel []int if v.CTE.IsDistinct { sel = make([]int, chkSize) for i := 0; i < chkSize; i++ { sel[i] = i } } var corColHashCodes [][]byte for _, corCol := range corCols { corColHashCodes = append(corColHashCodes, getCorColHashCode(corCol)) } producer = &cteProducer{ ctx: b.ctx, seedExec: seedExec, recursiveExec: recursiveExec, resTbl: resTbl, iterInTbl: iterInTbl, isDistinct: v.CTE.IsDistinct, sel: sel, hasLimit: v.CTE.HasLimit, limitBeg: v.CTE.LimitBeg, limitEnd: v.CTE.LimitEnd, corCols: corCols, corColHashCodes: corColHashCodes, } storageMap[v.CTE.IDForStorage].Producer = producer } return &CTEExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), producer: producer, } } func (b *executorBuilder) buildCTETableReader(v *plannercore.PhysicalCTETable) exec.Executor { storageMap, ok := b.ctx.GetSessionVars().StmtCtx.CTEStorageMap.(map[int]*CTEStorages) if !ok { b.err = errors.New("type assertion for CTEStorageMap failed") return nil } storages, ok := storageMap[v.IDForStorage] if !ok { b.err = errors.Errorf("iterInTbl should already be set up by CTEExec(id: %d)", v.IDForStorage) return nil } return &CTETableReaderExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), iterInTbl: storages.IterInTbl, chkIdx: 0, } } func (b *executorBuilder) validCanReadTemporaryOrCacheTable(tbl *model.TableInfo) error { err := b.validCanReadTemporaryTable(tbl) if err != nil { return err } return b.validCanReadCacheTable(tbl) } func (b *executorBuilder) validCanReadCacheTable(tbl *model.TableInfo) error { if tbl.TableCacheStatusType == model.TableCacheStatusDisable { return nil } sessionVars := b.ctx.GetSessionVars() // Temporary table can't switch into cache table. so the following code will not cause confusion if sessionVars.TxnCtx.IsStaleness || b.isStaleness { return errors.Trace(errors.New("can not stale read cache table")) } return nil } func (b *executorBuilder) validCanReadTemporaryTable(tbl *model.TableInfo) error { if tbl.TempTableType == model.TempTableNone { return nil } // Some tools like dumpling use history read to dump all table's records and will be fail if we return an error. // So we do not check SnapshotTS here sessionVars := b.ctx.GetSessionVars() if tbl.TempTableType == model.TempTableLocal && sessionVars.SnapshotTS != 0 { return errors.New("can not read local temporary table when 'tidb_snapshot' is set") } if sessionVars.TxnCtx.IsStaleness || b.isStaleness { return errors.New("can not stale read temporary table") } return nil } func (b *executorBuilder) getCacheTable(tblInfo *model.TableInfo, startTS uint64) kv.MemBuffer { tbl, ok := b.is.TableByID(tblInfo.ID) if !ok { b.err = errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs(b.ctx.GetSessionVars().CurrentDB, tblInfo.Name)) return nil } sessVars := b.ctx.GetSessionVars() leaseDuration := time.Duration(variable.TableCacheLease.Load()) * time.Second cacheData, loading := tbl.(table.CachedTable).TryReadFromCache(startTS, leaseDuration) if cacheData != nil { sessVars.StmtCtx.ReadFromTableCache = true return cacheData } else if loading { return nil } else { if !b.ctx.GetSessionVars().StmtCtx.InExplainStmt && !b.inDeleteStmt && !b.inUpdateStmt { tbl.(table.CachedTable).UpdateLockForRead(context.Background(), b.ctx.GetStore(), startTS, leaseDuration) } } return nil } func (b *executorBuilder) buildCompactTable(v *plannercore.CompactTable) exec.Executor { if v.ReplicaKind != ast.CompactReplicaKindTiFlash && v.ReplicaKind != ast.CompactReplicaKindAll { b.err = errors.Errorf("compact %v replica is not supported", strings.ToLower(string(v.ReplicaKind))) return nil } store := b.ctx.GetStore() tikvStore, ok := store.(tikv.Storage) if !ok { b.err = errors.New("compact tiflash replica can only run with tikv compatible storage") return nil } var partitionIDs []int64 if v.PartitionNames != nil { if v.TableInfo.Partition == nil { b.err = errors.Errorf("table:%s is not a partition table, but user specify partition name list:%+v", v.TableInfo.Name.O, v.PartitionNames) return nil } // use map to avoid FindPartitionDefinitionByName partitionMap := map[string]int64{} for _, partition := range v.TableInfo.Partition.Definitions { partitionMap[partition.Name.L] = partition.ID } for _, partitionName := range v.PartitionNames { partitionID, ok := partitionMap[partitionName.L] if !ok { b.err = table.ErrUnknownPartition.GenWithStackByArgs(partitionName.O, v.TableInfo.Name.O) return nil } partitionIDs = append(partitionIDs, partitionID) } if b.Ti.PartitionTelemetry == nil { b.Ti.PartitionTelemetry = &PartitionTelemetryInfo{} } b.Ti.PartitionTelemetry.UseCompactTablePartition = true } return &CompactTableTiFlashExec{ BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tableInfo: v.TableInfo, partitionIDs: partitionIDs, tikvStore: tikvStore, } }
package main import ( "github.com/faiface/pixel" "github.com/mateusz/rtsian/piksele" ) const ( MOBS_MISSILE_START_ID = 31 ) type missile struct { mobile piksele.Sprite } func NewMissile(position pixel.Vec, target pixel.Vec) missile { mv := target.Sub(position) m := missile{ mobile: mobile{ position: position, target: target, d: mv.Len(), v: mv.Unit().Scaled(10.0), }, Sprite: piksele.Sprite{ Spriteset: &mobSprites, SpriteID: MOBS_MISSILE_START_ID, }, } return m } func (m *missile) Update(dt float64) { if m.d <= 0.0 { for _, ent := range gameEntities.List { p, okp := ent.(positionable) e, oke := ent.(explodable) if okp && oke { if m.position.X > p.GetX()-8 && m.position.X < p.GetX()+8 && m.position.Y > p.GetY()-8 && m.position.Y < p.GetY()+8 { e.startExploding() } } } gameEntities.Remove(m) gamePositionables.Remove(m) gameDrawables.Remove(m) } m.mobile.Update(dt) } func (m *missile) Draw(t pixel.Target) { m.Spriteset.Sprites[m.SpriteID].Draw(t, rescueBottomPixels.Moved(m.position)) }
package main import ( "bufio" "fmt" "os" "strconv" "strings" ) func main() { reader := bufio.NewReaderSize(os.Stdin, 100001) l, _ := reader.ReadString('\n') t, _ := strconv.Atoi(strings.TrimSpace(l)) for ; t > 0; t-- { a, _ := reader.ReadString('\n') b, _ := reader.ReadString('\n') a = strings.TrimSpace(a) b = strings.TrimSpace(b) ar := map[rune]bool{} for _, r := range a { ar[r] = true } found := false for _, r := range b { if ar[r] { found = true break } } if found { fmt.Println("YES") } else { fmt.Println("NO") } } }
package main import ( "log" "menteslibres.net/gosexy/redis" "strings" ) var host = "127.0.0.1" var port = uint(6379) var publisher *redis.Client var consumer *redis.Client func main() { var err error publisher = redis.New() err = publisher.Connect(host, port) if err != nil { log.Fatalf("Publisher failed to connect: %s\n", err.Error()) return } log.Println("Publisher connected to redis-server.") consumer = redis.New() err = consumer.ConnectNonBlock(host, port) if err != nil { log.Fatalf("Consumer failed to connect: %s\n", err.Error()) return } log.Println("Consumer connected to redis-server.") rec := make(chan []string) log.Printf("Consumer is now inside a go-routine.\n") go consumer.Subscribe(rec, "channel") log.Printf("Publisher will send messages in another go-routine\n") go func() { publisher.Publish("channel", "Hello world!") publisher.Publish("channel", "Do you know how to count?") for i := 0; i < 3; i++ { publisher.Publish("channel", i) } }() log.Printf("Reading subscription...\n") var ls []string for j := 0; j < 6; j++ { ls = <-rec log.Printf("Consumer received: %v\n", strings.Join(ls, ", ")) } log.Printf("Done, quitting.\n") consumer.Quit() publisher.Quit() }
package main import ( "time" "log" "strings" ) type Storm struct { reminderCycle time.Duration config *PluginConfig } type StormMode struct { on bool link string } var lastStorm = time.Now().UTC() var stormTakerMsg = "IS THE STORM TAKER! \n" + "Go forth and summon the engineering powers of the team and transform " + "these requirements into tasks. If the requirements are incomplete or " + "confusing, it is your duty Storm Taker, yours alone, to remedy this. Good luck" var stormMsg = "A storm is upon us! Who will step up and become the storm taker?!" func NewStorm(bot *Hipbot) *Storm { storm := new(Storm) storm.reminderCycle = time.Second //time (s) between storms storm.config = &PluginConfig{ EchoMessages: true, OnlyMentions: false, } return storm } // Configuration func (storm *Storm) Config() *PluginConfig { return storm.config } // Handler func (storm *Storm) Handle(bot *Hipbot, msg *BotMessage) { //check for stormmode fromMyself := strings.HasPrefix(msg.FromNick(), bot.config.Nickname) room := "123823_devops" if msg.Contains("preparing storm") && fromMyself { // send first storms! bot.stormMode.on = true log.Println(bot.stormMode) sendStorm(room, RandomGIF("storm")) sendStorm(room, bot.stormMode.link) sendStorm(room, stormMsg) } else if bot.stormMode.on && !fromMyself { // storm taker! log.Println("Storm Taker!!!!!") stormTaker := msg.FromNick() stormTakerMsg = stormTaker + " " + stormTakerMsg bot.stormMode.on = false sendStorm(room, RandomGIF("herpderp")) sendStorm(room, stormTakerMsg) } // else if time.Since(lastStorm) > storm.reminderCycle { // //update laststorm // lastStorm = time.Now().UTC(); // } return } func sendStorm(room string, message string) { if !strings.Contains(room, "@") { room = room + "@" + ConfDomain } reply := &BotReply{ To: room, Message: message, } bot.replySink <- reply return }
package Server import ( "xwork/App/Middleware/recover" "xwork/BootStrap/DbBase" "xwork/BootStrap/LogInit" "context" "log" "net/http" "time" "github.com/iris-contrib/middleware/cors" "github.com/kataras/iris/v12" "github.com/kataras/iris/v12/middleware/logger" "github.com/spf13/viper" ) func InitIris() { app := iris.New() app = LogInit.LogInit(app) //错误定义 app.Use(recover.New()) //日志定义 app.Use(logger.New()) //定义允许头部 app.Use(cors.New(cors.Options{ AllowedOrigins: viper.GetStringSlice("AllowedOrigins"), MaxAge: viper.GetInt("MaxAge"), AllowCredentials: viper.GetBool("AllowCredentials"), AllowedMethods: []string{iris.MethodGet, iris.MethodPost}, AllowedHeaders: viper.GetStringSlice("AllowedHeaders"), })) //初始化路由 app = initRouter(app) //关闭服务器后处理 iris.RegisterOnInterrupt(func() { timeout := 5 * time.Second ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() DbBase.CloseDB() app.Shutdown(ctx) }) //配置服务器 app.Configure(iris.WithConfiguration(iris.YAML("./Config/iris.yaml"))) log.Fatal(app.Run(iris.Server(&http.Server{Addr: ":" + viper.GetString("Port")}), iris.WithoutInterruptHandler)) }
// DRUNKWATER TEMPLATE(add description and prototypes) // Question Title and Description on leetcode.com // Function Declaration and Function Prototypes on leetcode.com //289. Game of Life //According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970." //Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article): //Any live cell with fewer than two live neighbors dies, as if caused by under-population. //Any live cell with two or three live neighbors lives on to the next generation. //Any live cell with more than three live neighbors dies, as if by over-population.. //Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction. //Write a function to compute the next state (after one update) of the board given its current state. //Follow up: //Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells. //In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems? //Credits: //Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases. //func gameOfLife(board [][]int) { //} // Time Is Money
package models type User struct { ID string Name string Age uint }
package main import ( "adventOfCode/days/day8" "io/ioutil" "log" "os" "os/signal" "strings" "syscall" ) func main() { c := make(chan os.Signal) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { <-c os.Exit(1) }() data := readMap("./days/day8/data.txt") day8.Main(data) } func readMap(filePath string) []string { file, err := ioutil.ReadFile(filePath) if err != nil { log.Fatal(err) } lines := strings.Split(string(file), "\n") return lines }
package main import "fmt" func main() { list1 := []int{5, 4, 3, 2, 1} list2 := []int{3, 5, 432 ,122, 12, 314, 1, 9, 21, 13, 8} fmt.Println(sortList(list1)) fmt.Println(sortList(list2)) } func sortList(xi []int) []int { for { swapped := false for i := 0 ; i < len(xi) - 1 ; i++ { if xi[i] > xi[i + 1] { xi[i], xi[i + 1] = xi[i + 1], xi[i] swapped = true } } if !swapped { break } } return xi }
/** * day 01 2020 * https://adventofcode.com/2020/day/1 * * compile: go build main.go * run: ./main < input * compile & run: go run main.go < input **/ package main import ( "bufio" "os" "fmt" "strings" ) func part1(n []int) int { for _, i := range n { for _, j := range n { if i + j == 2020 { return i * j } } } return -1 } func part2(n []int) int { for _, i := range n { for _, j := range n { for _, k := range n { if i + j +k == 2020 { return i * j * k } } } } return -1 } func main() { var nums []int var n int scan := bufio.NewScanner(os.Stdin) for scan.Scan() { f := strings.NewReader(scan.Text()) fmt.Fscanf(f, "%d", &n) nums = append(nums, n) } a := part1(nums) b := part2(nums) fmt.Println("part 1:", a) fmt.Println("part 2:", b) }
package domain import "github.com/tokopedia/tdk/go/app/resource" type UserDomain struct { resource UserResourceItf } func InitUserDomain(rsc UserResourceItf) UserDomain { return UserDomain{ resource: rsc, } } func (user UserDomain) IsValidUser(userID int) bool { if err := user.resource.FindUser(userID); err != nil { return false } return true } type UserResourceItf interface { FindUser(int) error } type UserResource struct { DB resource.SQLDB } // this is just simple function to give a picture of how user resource works func (user UserResource) FindUser(userId int) error { // you may check to db is this userId present in database return nil }
package model // 资源管理表 // 连接不同类型的资源(如菜单,接口),角色的权限仅映射到该表的id type Resource struct { Id int AppId string //应用id ResType int //资源类型 ResId int //资源id } // 菜单,资源详情 type ResCollection struct { DetailId int `json:"-"`//菜单id Name string `json:"name"`//菜单名称 ResType int `json:"-"`//资源类型 ParentId int `json:"-"`//父级id RESId int `json:"id"`//关联资源id Group string `json:"group"`//所在模块 } // Mix menu and interface resource type MixResource struct { Menu []ResCollection `json:"menus"` Inter []ResCollection `json:"interfaces"` }
package leetcode func titleToNumber(s string) int { var ret = 0 if len(s) == 0 { return ret } ret = int(s[0]) - 65 for i := 1; i < len(s); i += 1 { ret = (ret + 1) * 26 + int(s[i]) - 65 } return ret + 1 }
package nimkv import ( "net/http" "net/http/httptest" "testing" "github.com/julienschmidt/httprouter" ) func TestIndexHandler(t *testing.T) { router := httprouter.New() router.GET("/", indexHandler) req, err := http.NewRequest("GET", "/", nil) if err != nil { t.Error(err) } recorder := httptest.NewRecorder() router.ServeHTTP(recorder, req) if status := recorder.Code; status != http.StatusOK { t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusOK) } if recorder.Body.String() != "nimKV - (nimble Key-Value Store)" { t.Error("Unexpected response.") } }
package main import "fmt" func sum(nums... int){ fmt.Println(nums, " ") total := 0 for _,num := range nums{ total += num } fmt.Print(total,"\n") } func main(){ sum (1,23,3) sum (2,34,3) arr1 := []int {3,4,5,6,7} sum(arr1...) }
package main // // import ( // "fmt" // "net/http" // // "github.com/go-chi/chi" // "github.com/zmb3/spotify" // ) // // func (a *api) getArtist(w http.ResponseWriter, r *http.Request) { // a.Log.Infof("🎸 Starting getArtist...") // a.Log.Infof("GetArtist request: %+v", r) // // artistID := chi.URLParam(r, "artist_id") // // a.Log.Infof("Artist ID from params: %s", artistID) // // //artist := models.Artist{} // // //err := artist.GetArtistByID(a.Log, artistID) // //if err != nil { // // a.Log.Errorw("Error getting artist by ID", "error", err.Error()) // // w.WriteHeader(http.StatusInternalServerError) // // _, _ = w.Write([]byte(err.Error())) // //} // //id := spotify.ID("").String() // // a.Log.Infof("🎵 a.Spotify: %+v", a.Spotify) // a.Log.Infof("a.Spotify.authenticator: %+v", a.Spotify.authenticator) // // artist, err := a.Spotify.client.GetArtist(spotify.ID(artistID)) // if err != nil { // a.Log.Errorw("Error getting artist by ID", "error", err.Error()) // w.WriteHeader(http.StatusInternalServerError) // _, _ = w.Write([]byte(err.Error())) // } // // a.Log.Infof("Artist: %+v", artist) // // //_, _ = w.Write(jsonBody) // w.WriteHeader(http.StatusOK) // // fmt.Println() // fmt.Println() // }
package helper import ( "context" "strings" qm "github.com/volatiletech/sqlboiler/queries/qm" "github.com/99designs/gqlgen/graphql" ) type ColumnSetting struct { Name string IDAvailable bool // ID is available without preloading } func PreloadsContainMoreThanId(a []string, v string) bool { for _, av := range a { if strings.HasPrefix(av, v) && av != v && // e.g. parentTable !strings.HasPrefix(av, v+".id") { // e.g parentTable.id return true } } return false } func PreloadsContain(a []string, v string) bool { for _, av := range a { if av == v { return true } } return false } func GetPreloadMods(ctx context.Context, preloadColumnMap map[string]ColumnSetting) (queryMods []qm.QueryMod) { return GetPreloadModsWithLevel(ctx, preloadColumnMap, "") } func GetPreloadModsWithLevel(ctx context.Context, preloadColumnMap map[string]ColumnSetting, level string) (queryMods []qm.QueryMod) { preloads := GetPreloadsFromContext(ctx, level) for _, preload := range preloads { dbPreloads := []string{} columnSetting, ok := preloadColumnMap[preload] if ok { if columnSetting.IDAvailable { if PreloadsContainMoreThanId(preloads, preload) { dbPreloads = append(dbPreloads, columnSetting.Name) } } else { dbPreloads = append(dbPreloads, columnSetting.Name) } } if len(dbPreloads) > 0 { queryMods = append(queryMods, qm.Load(strings.Join(dbPreloads, "."))) } } return } func GetPreloadsFromContext(ctx context.Context, level string) []string { return StripPreloads(GetNestedPreloads( graphql.GetRequestContext(ctx), graphql.CollectFieldsCtx(ctx, nil), "", ), level) } // e.g. sometimes input is deeper and we want // createdFlowBlock.block.blockChoice => when we fetch block in database we want to strip flowBlock func StripPreloads(preloads []string, prefix string) []string { if prefix == "" { return preloads } for i, preload := range preloads { preloads[i] = strings.TrimPrefix(preload, prefix+".") } return preloads } func GetNestedPreloads(ctx *graphql.RequestContext, fields []graphql.CollectedField, prefix string) (preloads []string) { for _, column := range fields { prefixColumn := GetPreloadString(prefix, column.Name) preloads = append(preloads, prefixColumn) preloads = append(preloads, GetNestedPreloads(ctx, graphql.CollectFields(ctx, column.SelectionSet, nil), prefixColumn)...) preloads = append(preloads, GetNestedPreloads(ctx, graphql.CollectFields(ctx, column.Selections, nil), prefixColumn)...) } return } func GetPreloadString(prefix, name string) string { if len(prefix) > 0 { return prefix + "." + name } return name }
// This file is subject to a 1-clause BSD license. // Its contents can be found in the enclosed LICENSE file. package evdev import ( "fmt" "syscall" "unsafe" ) func ioctl(fd, name uintptr, data interface{}) error { var v uintptr switch dd := data.(type) { case unsafe.Pointer: v = uintptr(dd) case int: v = uintptr(dd) case uintptr: v = dd default: return fmt.Errorf("ioctl: Invalid argument: %T", data) } _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, fd, name, v) if errno == 0 { return nil } return errno } var ( _EVIOCGVERSION uintptr _EVIOCGID uintptr _EVIOCGREP uintptr _EVIOCSREP uintptr _EVIOCGKEYCODE uintptr _EVIOCGKEYCODE_V2 uintptr _EVIOCSKEYCODE uintptr _EVIOCSKEYCODE_V2 uintptr _EVIOCSFF uintptr _EVIOCRMFF uintptr _EVIOCGEFFECTS uintptr _EVIOCGRAB uintptr _EVIOCSCLOCKID uintptr ) func init() { var i int32 var id Id var ke KeymapEntry var ffe Effect sizeof_int := int(unsafe.Sizeof(i)) sizeof_int2 := sizeof_int << 1 sizeof_id := int(unsafe.Sizeof(id)) sizeof_keymap_entry := int(unsafe.Sizeof(ke)) sizeof_effect := int(unsafe.Sizeof(ffe)) _EVIOCGVERSION = _IOR('E', 0x01, sizeof_int) _EVIOCGID = _IOR('E', 0x02, sizeof_id) _EVIOCGREP = _IOR('E', 0x03, sizeof_int2) _EVIOCSREP = _IOW('E', 0x03, sizeof_int2) _EVIOCGKEYCODE = _IOR('E', 0x04, sizeof_int2) _EVIOCGKEYCODE_V2 = _IOR('E', 0x04, sizeof_keymap_entry) _EVIOCSKEYCODE = _IOW('E', 0x04, sizeof_int2) _EVIOCSKEYCODE_V2 = _IOW('E', 0x04, sizeof_keymap_entry) _EVIOCSFF = _IOC(_IOC_WRITE, 'E', 0x80, sizeof_effect) _EVIOCRMFF = _IOW('E', 0x81, sizeof_int) _EVIOCGEFFECTS = _IOR('E', 0x84, sizeof_int) _EVIOCGRAB = _IOW('E', 0x90, sizeof_int) _EVIOCSCLOCKID = _IOW('E', 0xa0, sizeof_int) } func _EVIOCGNAME(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x06, len) } func _EVIOCGPHYS(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x07, len) } func _EVIOCGUNIQ(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x08, len) } func _EVIOCGPROP(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x09, len) } func _EVIOCGMTSLOTS(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x0a, len) } func _EVIOCGKEY(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x18, len) } func _EVIOCGLED(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x19, len) } func _EVIOCGSND(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x1a, len) } func _EVIOCGSW(len int) uintptr { return _IOC(_IOC_READ, 'E', 0x1b, len) } func _EVIOCGBIT(ev, len int) uintptr { return _IOC(_IOC_READ, 'E', 0x20+ev, len) } func _EVIOCGABS(abs int) uintptr { var v AbsInfo return _IOR('E', 0x40+abs, int(unsafe.Sizeof(v))) } func _EVIOCSABS(abs int) uintptr { var v AbsInfo return _IOW('E', 0xc0+abs, int(unsafe.Sizeof(v))) } const ( _IOC_NONE = 0x0 _IOC_WRITE = 0x1 _IOC_READ = 0x2 _IOC_NRBITS = 8 _IOC_TYPEBITS = 8 _IOC_SIZEBITS = 14 _IOC_DIRBITS = 2 _IOC_NRSHIFT = 0 _IOC_NRMASK = (1 << _IOC_NRBITS) - 1 _IOC_TYPEMASK = (1 << _IOC_TYPEBITS) - 1 _IOC_SIZEMASK = (1 << _IOC_SIZEBITS) - 1 _IOC_DIRMASK = (1 << _IOC_DIRBITS) - 1 _IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS _IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS _IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS _IOC_IN = _IOC_WRITE << _IOC_DIRSHIFT _IOC_OUT = _IOC_READ << _IOC_DIRSHIFT _IOC_INOUT = (_IOC_WRITE | _IOC_READ) << _IOC_DIRSHIFT _IOCSIZE_MASK = _IOC_SIZEMASK << _IOC_SIZESHIFT ) func _IOC(dir, t, nr, size int) uintptr { return uintptr((dir << _IOC_DIRSHIFT) | (t << _IOC_TYPESHIFT) | (nr << _IOC_NRSHIFT) | (size << _IOC_SIZESHIFT)) } func _IO(t, nr int) uintptr { return _IOC(_IOC_NONE, t, nr, 0) } func _IOR(t, nr, size int) uintptr { return _IOC(_IOC_READ, t, nr, size) } func _IOW(t, nr, size int) uintptr { return _IOC(_IOC_WRITE, t, nr, size) } func _IOWR(t, nr, size int) uintptr { return _IOC(_IOC_READ|_IOC_WRITE, t, nr, size) } func _IOC_DIR(nr int) uintptr { return uintptr(((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) } func _IOC_TYPE(nr int) uintptr { return uintptr(((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) } func _IOC_NR(nr int) uintptr { return uintptr(((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) } func _IOC_SIZE(nr int) uintptr { return uintptr(((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) }
package DataTable import ( "github.com/team-zf/framework/dal" "time" ) type Server struct { dal.BaseTable Id int64 `db:"id,pk"json:"id"` Title string `db:"title"json:"title"` Status int `db:"status"json:"status"` Host string `db:"host"` Port int `db:"port"` ServiceTime time.Time `db:"service_time"json:"service_time"` } func NewServer() *Server { result := new(Server) result.BaseTable.Init(result) return result }
// Copyright 2016 Google Inc. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "log" "sync" "time" ) var processorLock = &sync.Mutex{} func reconcileUnscheduledPods(interval int, done chan struct{}, wg *sync.WaitGroup) { for { select { case <-time.After(time.Duration(interval) * time.Second): err := schedulePods() if err != nil { log.Println(err) } case <-done: wg.Done() log.Println("Stopped reconciliation loop.") return } } } func monitorUnscheduledPods(done chan struct{}, wg *sync.WaitGroup) { pods, errc := watchUnscheduledPods() for { select { case err := <-errc: log.Println(err) case pod := <-pods: processorLock.Lock() time.Sleep(2 * time.Second) err := schedulePod(&pod) if err != nil { log.Println(err) } processorLock.Unlock() case <-done: wg.Done() log.Println("Stopped scheduler.") return } } } func schedulePod(pod *Pod) error { nodes, err := fit(pod) if err != nil { return err } if len(nodes) == 0 { return fmt.Errorf("Unable to schedule pod (%s) failed to fit in any node", pod.Metadata.Name) } node, err := bestPrice(nodes) if err != nil { return err } err = bind(pod, node) if err != nil { return err } return nil } func schedulePods() error { processorLock.Lock() defer processorLock.Unlock() pods, err := getUnscheduledPods() if err != nil { return err } for _, pod := range pods { err := schedulePod(pod) if err != nil { log.Println(err) } } return nil }
package models type ClassLog struct { Id int64 `json:"id"` Action string `json:"action"` Uid int `json:"uid"` Classid int `json:"classid"` Teaid int `json:"teaid"` Stuid int `json:"stuid"` Ts string `json:"ts"` Note string `json:"note"` Roomid string `json:"roomid"` Traceid string `json:"traceid"` Bookid string `json:"bookid"` Schoolid int `json:"schoolid"` Usetime int `json:"usetime"` Subtype string `json:"subtype"` Uname string `json:"uname"` } type Student struct{ Stuid int `json:"stuid"` Score int `json:"score"` } type TeachedResult struct{ Name string `json:"name"` Num int `json:"num"` Avg_TeachingTime int `json:"avg_teachtime"` } type TeacherTime struct { Name string `json:"name"` TeacherTime int `json:"teachtime"` } type TeachingRate struct { Uname string `json:"uname"` Uid int `json:"uid"` Teachcount int `json:"teachcount"` } type TeacherData struct { Uname string `json:"uname"` Uid int `json:"uid"` ClassTimes int `json:"class_times"` ClassCount int `json:"class_count"` PreClassTimes int `json:"preclass_times"` PreClassCount int`json:"preclass_count"` }
package Works import ( "ShiqianCrawler/models" "ShiqianCrawler/utils" "github.com/gocolly/colly" "gopkg.in/mgo.v2/bson" "log" "strconv" "strings" ) func Taitea_DoWork(rootUrl string,crawlerChan chan string,collectionName string){ collector := colly.NewCollector() //爬取下一页链接 collector.OnHTML("div#pages", func(e *colly.HTMLElement) { GetNextPage_Taitea(collector,e) }) //爬取每页的公告url collector.OnHTML("ul.list.lh24.f14>li", func(e *colly.HTMLElement) { //log.Printf("爬取每页的公告url") GetDetailUrl_Taitea(e,collector, collectionName) }) //获取详细信息 collector.OnHTML("div#Article", func(e *colly.HTMLElement){ //log.Printf("获取详细信息") GetDetailInfo_Taitea(e,collectionName) }) collector.Visit(rootUrl) crawlerChan <- "苔茶文化爬取结束" } //获取下一页链接 func GetNextPage_Taitea(collector *colly.Collector, element *colly.HTMLElement) { index := element.ChildText("span") indexInt,err := strconv.Atoi(index) if err != nil{ return } nextIndex := indexInt + 1 url := "http://www.sqxw.gov.cn/html/wenhua/taichawenhua/"+strconv.Itoa(nextIndex)+".html" collector.Visit(url) } //爬取每页的公告url func GetDetailUrl_Taitea(element *colly.HTMLElement, collector *colly.Collector,collectionName string) { url := element.ChildAttr("a","href") if url == "" || len(url) <= 0 { return } if err := utils.InsertDetailUrl(collectionName, url,models.Taitea{}); err != nil { log.Printf("%s",err) } if len(url) > 0 { collector.Visit(url) } } //获取详细信息 func GetDetailInfo_Taitea(e *colly.HTMLElement, collectionName string) { website := utils.GetWebsite(e.Request.URL) reslut := utils.CheckWebsite(collectionName,website) if reslut == false{ log.Printf("数据不存在 : %s\n",website) return } title := e.ChildText("h1") timer := e.ChildText("h1>span") //处理title title = utils.ReplaceString(title) saveTitle := utils.TitleRg1.FindAllString(title,-1) title = "" for i := 0; i < len(saveTitle) ; i++ { title += saveTitle[i] + ""; } //处理timer timer = utils.TimerReg.FindString(timer) //获取cotent imgShow := e.ChildAttr("div.content>img","src") var contentList []string e.ForEach("div.content>p", func(i int, element *colly.HTMLElement) { p := element.Text span := element.ChildText("span") pUrl := element.ChildAttr("img","src") var save string if p != "" { save = p }else if span != "" { save = span } if save != "" { save = utils.ReplaceString(save) save = strings.Replace(save,"聽","",-1) contentList = append(contentList, save) }else if pUrl != "" { if imgShow == "" { imgShow = pUrl } contentList = append(contentList, pUrl) } }) //如果没有imglist 则认为是脏数据,不要爬取 if title == "" || timer == ""||len(contentList) <= 0 || strings.Contains(title,`銆愬皬闃″湪鐜板満銆戠煶闃″幙鑱氬嚖涔`){ //log.Printf("删除数据 : %s\n",website) utils.RemoveDataByWebsite(collectionName,website) return } //进行数据的更新 session := utils.Session.Copy() defer session.Close() collection := session.DB("ShiqianNews").C(collectionName) err := collection.Update(bson.M{"website":website},bson.M{"$set":bson.M{ "title":title, "timerid":timer, "imgshow":imgShow, "contents":contentList, }}) if err != nil { //log.Fatalf("[main.go] Work : 出现错误 %s ",err) return } }
package main import ( "fmt" "github.com/karalabe/cookiejar/graph" "github.com/karalabe/cookiejar/graph/dfs" ) func main() { // Create the graph g := graph.New(7) g.Connect(0, 1) g.Connect(1, 2) g.Connect(2, 3) g.Connect(3, 4) g.Connect(3, 5) // Create the depth first search algo structure for g and source node #2 d := dfs.New(g, 0) // Get the path between #0 (source) and #2 fmt.Println("Path 0->5:", d.Path(5)) fmt.Println("Order:", d.Order()) fmt.Println("Reachable #4 #6:", d.Reachable(4), d.Reachable(6)) }
package main import ( "bufio" "fmt" "os" "strconv" "strings" "tictactoe/components" "tictactoe/service" ) func main() { reader := bufio.NewReader(os.Stdin) fmt.Println("Welcome") //taking size fmt.Print("Enter size of the board : ") AGAIN: size_of_board, _ := reader.ReadString('\n') size_of_board = strings.TrimSpace(size_of_board) size, err := strconv.Atoi(size_of_board) if err != nil || size > 5 { fmt.Println("value is incorrect. try again") goto AGAIN } //taking names and marks fmt.Print("Enter Name of first player : ") name1, _ := reader.ReadString('\n') name1 = strings.TrimSpace(name1) fmt.Print("Enter Name of second player : ") name2, _ := reader.ReadString('\n') name2 = strings.TrimSpace(name2) fmt.Print("For player 1 Enter 1 to take X and anything else to take O : ") mark, _ := reader.ReadString('\n') mark = strings.TrimSpace(mark) var pl [2]*components.Player if mark == "1" { pl[0] = components.CreatePlayer(name1, "X") pl[1] = components.CreatePlayer(name2, "O") } else { pl[0] = components.CreatePlayer(name1, "O") pl[1] = components.CreatePlayer(name2, "X") } //starting new game newGame := service.NewGameService(pl, size) //start playing game var res string play := true fmt.Println(pl[0].Name, " your chance ") for { fmt.Print(newGame.PrintBoard()) fmt.Print("Enter position : ") pos, _ := reader.ReadString('\n') pos = strings.TrimSpace(pos) position, err := strconv.Atoi(pos) if err != nil { fmt.Println("Position should be an integer") continue } if play { err, res = newGame.Play(uint8(position)) } else { err, res = newGame.Play(uint8(position)) } if err != nil { fmt.Println(err) continue } else if res == "win" { if play { fmt.Print(newGame.PrintBoard()) fmt.Print(pl[0].Name, " has won ") break } else { fmt.Print(newGame.PrintBoard()) fmt.Print(pl[1].Name, " has won ") break } } else if res == "draw" { fmt.Print("Match is draw") break } if play { fmt.Print(pl[1].Name, " your chance \n") } else { fmt.Print(pl[0].Name, " your chance \n") } play = !play } }
package msg const ( GroupTopic = "Group_" )
package kebab import ( ) func Grill() string { return "ju-ju!" }
package http import ( "context" "fmt" "io" "io/ioutil" "log" "net/http" "os" "testing" "time" ) //DON'T forget to add PORT to firewall exception var ( fileField = "files" dataDir = "testdata/" uploadDir = "testdata/upload/" uploadTarget = "/upload" serverPort = ":8080" serverHost = "http://localhost" ) //File upload handler for testing. func uploadHandler(w http.ResponseWriter, r *http.Request) { const maxMemory = 1024 * 1024 err := r.ParseMultipartForm(maxMemory) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } //Our multipart form multi := r.MultipartForm log.Printf("File upload request received") //handles form data log.Printf("Process custom field(s)...") for field, value := range multi.Value { fmt.Printf(" Field[`%s`]: %v\n", field, value) } //Get file header. log.Printf("Process file(s)...") files := multi.File[fileField] for _, fh := range files { fmt.Printf(" %s\n", fh.Filename) //Handle each files file, err := fh.Open() defer file.Close() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } //Create destination file dstFile, err := os.Create(uploadDir + fh.Filename) defer dstFile.Close() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } //Copy and overwrite destination file if _, err := io.Copy(dstFile, file); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } str := fmt.Sprintf("fields=%02d, files=%02d\n", len(multi.Value), len(multi.File)) w.Write([]byte(str)) } func TestMain(m *testing.M) { server := setup() go server.ListenAndServe() code := m.Run() shutdown(server) os.Exit(code) } func setup() *http.Server { mux := http.NewServeMux() mux.HandleFunc(uploadTarget, uploadHandler) //DON'T forget to allow connection to PORT //when the PC has firewall. server := &http.Server{ Addr: serverPort, Handler: mux, } return server } func shutdown(server *http.Server) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() if err := server.Shutdown(ctx); err != nil { log.Printf("Error shutdown HTTP server: %v", err) } log.Printf("--- DONE ---") } func doSubmit(submit func(string) (*http.Response, error), failOnError bool, t *testing.T) { url := serverHost + serverPort + uploadTarget resp, err := submit(url) if err != nil { if failOnError { t.Fatal(err) } else { t.Logf("Error: %s", err) } return } defer resp.Body.Close() response, err := ioutil.ReadAll(resp.Body) if err != nil { if failOnError { t.Fatal(err) } else { t.Logf("Error: %s", err) } } log.Printf("Response=%v", string(response)) } func TestFieldsOnly(t *testing.T) { fu := NewFormUploader() fu.AddField("id", "Field-only-upload") fu.AddField("id", "Second ID") fu.AddField("time", time.Now().Format(time.RFC3339)) fu.AddField("description", "Custom information") //perform upload doSubmit(fu.Post, true, t) } func TestSingleFileOnly(t *testing.T) { fu := NewFormUploader() fu.AddFiles(fileField, dataDir+"image01.jpg") //perform upload doSubmit(fu.Post, true, t) } func TestMultipleFilesOnly(t *testing.T) { fu := NewFormUploader() files := []string{ dataDir + "image01.jpg", dataDir + "file01.txt", dataDir + "file02.pdf", dataDir + "file03.pdf", } fu.AddFiles(fileField, files...) //perform upload doSubmit(fu.Post, true, t) } func TestFilesWithFields(t *testing.T) { fu := NewFormUploader() //Add fields fu.AddField("id", "File and custom files") fu.AddField("time", time.Now().Format(time.RFC3339)) fu.AddField("description", "Custom information") files := []string{ dataDir + "image01.jpg", dataDir + "file01.txt", dataDir + "file02.pdf", } fu.AddFiles(fileField, files...) //perform upload doSubmit(fu.Post, true, t) } func TestPutFilesWithFields(t *testing.T) { fu := NewFormUploader() //Add fields fu.AddField("id", "File and custom files") fu.AddField("time", time.Now().Format(time.RFC3339)) fu.AddField("description", "Custom information") files := []string{ dataDir + "image01.jpg", dataDir + "file01.txt", dataDir + "conflict/file01.txt", dataDir + "file02.pdf", } fu.AddFiles(fileField, files...) //perform upload: PUT doSubmit(fu.Put, true, t) } func TestFileDoesNotExist(t *testing.T) { fu := NewFormUploader() files := []string{ dataDir + "file01.txt", "desnotexist.txt", } fu.AddFiles(fileField, files...) //perform upload doSubmit(fu.Post, false, t) }
package arc import ( "bytes" "fmt" "sort" "strings" "time" "github.com/PuerkitoBio/goquery" log "github.com/sirupsen/logrus" ) const snapshotLayout = "2 Jan 2006 15:04" // Snapshot represents an instance of a URL page snapshot on archive.is. type Snapshot struct { URL string ThumbnailURL string Timestamp time.Time } // Search for URL snapshots. func Search(url string, timeout time.Duration, cookie string) ([]Snapshot, error) { searchURL := fmt.Sprintf("%v/%v", BaseURL, url) resp, body, err := doRequest("", searchURL, nil, timeout, cookie) if err != nil { return nil, err } if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("received non-2xx status code=%v while checking for archived snapshots of %v", resp.StatusCode, url) } doc, err := goquery.NewDocumentFromReader(bytes.NewBuffer(body)) if err != nil { return nil, fmt.Errorf("constructing goquery doc from archive.is search response: %s", err) } snaps := []Snapshot{} doc.Find(".THUMBS-BLOCK > div").Each(func(_ int, s *goquery.Selection) { var ( u = s.Find("a").AttrOr("href", "") th = s.Find("img").AttrOr("src", "") tsStr = s.Find("div").Text() ) if u == "" && strings.HasSuffix(tsStr, " more") { // Skip it, as it's a "# more" button. return } ts, err := time.Parse(snapshotLayout, tsStr) if err != nil { log.WithField("url", url).Errorf("Parsing snapshot timestamp %q: %s (skipping snapshot entry u=%s)", tsStr, err, u) return } snap := Snapshot{ URL: u, ThumbnailURL: th, Timestamp: ts, } snaps = append(snaps, snap) }) sort.Slice(snaps, func(i, j int) bool { return snaps[i].Timestamp.After(snaps[j].Timestamp) }) return snaps, nil }
package testdata import ( "testing" "github.com/stretchr/testify/require" ) func TestTestData(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { if c.success { require.NoError(t, c.function()) } else { require.Error(t, c.function()) } }) } } // grep 'func [PF]' ./test/testdata/interfacemustbeptr.go | sed -e "s/.*func \(.*\)().*/{\1, \"\1\"},/" var cases = []struct { function func() error name string success bool }{ {Fail_UnmarshalMap, "Fail_UnmarshalMap", false}, {Pass_MarshalMap_AddressOperator, "Pass_MarshalMap_AddressOperator", true}, {Fail_UnmarshalMap_AliasedPackage, "Fail_UnmarshalMap_AliasedPackage", false}, {Fail_UnmarshalMap_Closure, "Fail_UnmarshalMap_Closure", false}, {Fail_UnmarshalMap_NamedClosure, "Fail_UnmarshalMap_NamedClosure", false}, {Fail_UnmarshalMap_Copy, "Fail_UnmarshalMap_Copy", false}, {Pass_UnmarshalMap_CopyAddressOperator, "Pass_UnmarshalMap_CopyAddressOperator", true}, {Pass_UnmarshalMap_CreatePointer, "Pass_UnmarshalMap_CreatePointer", true}, {Fail_UnmarshalMap_TypeAlias, "Fail_UnmarshalMap_TypeAlias", false}, {Pass_UnmarshalMap_AddressOfTypeAlias, "Pass_UnmarshalMap_AddressOfTypeAlias", true}, {Pass_UnmarshalMap_PtrTypeAlias, "Pass_UnmarshalMap_PtrTypeAlias", true}, {Pass_UnmarshalMap_FunctionCall, "Pass_UnmarshalMap_FunctionCall", true}, {Fail_UnmarshalMap_FunctionCall, "Fail_UnmarshalMap_FunctionCall", false}, {Fail_UnmarshalMap_ConstExpr, "Fail_UnmarshalMap_ConstExpr", false}, {Fail_UnmarshalMap_Parens, "Fail_UnmarshalMap_Parens", false}, {Pass_UnmarshalMap_Parens, "Pass_UnmarshalMap_Parens", true}, {Pass_UnmarshalMap_FancyDst, "Pass_UnmarshalMap_FancyDst", true}, {Fail_UnmarshalMap_FancyDst, "Fail_UnmarshalMap_FancyDst", false}, {Fail_UnmarshalMap_Indirect, "Fail_UnmarshalMap_Indirect", false}, }
package main import ( "bufio" "fmt" "os" "strings" ) const maxLength = 20 type Name struct { fname string lname string } func main() { var fn string fmt.Print("Input file name: ") fmt.Scanln(&fn) file, err := os.Open(fn) if err != nil { panic(err) } defer file.Close() data := make([]Name, 1) scanner := bufio.NewScanner(file) i := 0 for scanner.Scan() { line := scanner.Text() if len(line) == 0 { continue } s := strings.Split(line, " ") if len(s) < 2 { continue } firstName := s[0] if len(firstName) > maxLength { firstName = firstName[:maxLength] } lastName := s[1] if len(lastName) > maxLength { lastName = lastName[:maxLength] } n := Name{ fname: firstName, lname: lastName } if len(data) > i { data[i] = n } else { data = append(data, n) } i++ } for i, v := range data { fmt.Printf("Item# %d, First name: %s, Last Name: %s\n", i, v.fname, v.lname) } }
package types import ( "github.com/tendermint/tendermint/crypto" sdk "github.com/cosmos/cosmos-sdk/types" servicetypes "github.com/irisnet/irismod/modules/service/types" "github.com/irisnet/irismod/modules/oracle/types" ) const ( ServiceName = "random" ServiceDesc = "system service definition of random module" ServiceValueJSONPath = "seed" AuthorDescription = "random module account" ServiceSchemas = ` { "input": { "$schema": "http://json-schema.org/draft-04/schema#", "title": "random-seed-input-body", "description": "IRIS Hub Random Seed Input Body Schema", "type": "object", "additionalProperties": false }, "output": { "$schema": "http://json-schema.org/draft-04/schema#", "title": "random-seed-output-body", "description": "IRIS Hub Random Seed Output Body Schema", "type": "object", "properties": { "seed": { "description": "random seed", "type": "string", "pattern": "^[0-9a-fA-F]{64}$" } }, "additionalProperties": false, "required": [ "seed" ] } }` ) var ( ServiceTags = []string{types.ModuleName} Author = sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName))) ) func GetSvcDefinition() servicetypes.ServiceDefinition { return servicetypes.NewServiceDefinition( ServiceName, ServiceDesc, ServiceTags, Author, AuthorDescription, ServiceSchemas, ) }
//go:build ocr package uixt import ( "testing" ) func TestDriverExtOCR(t *testing.T) { driverExt, err := iosDevice.NewDriver(nil) checkErr(t, err) point, err := driverExt.FindScreenText("抖音") checkErr(t, err) t.Logf("point.X: %v, point.Y: %v", point.X, point.Y) driverExt.Driver.TapFloat(point.X, point.Y-20) }
package main import ( "github.com/mustafa-zidan/simscale/cache" "github.com/mustafa-zidan/simscale/parser" "github.com/mustafa-zidan/simscale/stats" "github.com/urfave/cli" "log" "os" "sync" ) var inFile, outFile, version string var flags = []cli.Flag{ cli.StringFlag{ Name: "in-file, i", Value: "", Usage: "file to be processed", Destination: &inFile, Required: true, }, cli.StringFlag{ Name: "out-file, o", Value: "trace.txt", Usage: "output trace file ", Destination: &outFile, }, } func main() { app := cli.NewApp() app.Name = "Simscale" app.Usage = "Simscale coding challange" app.Flags = flags app.Version = version app.Action = func(c *cli.Context) error { wg := &sync.WaitGroup{} stats.CounterInitialize() cache := cache.NewCache(outFile, wg) p, err := parser.NewParser(inFile, cache) p.Process() wg.Wait() s := stats.CounterList() log.Printf("Total Number of Logs: \t\t %d\n", s["total"]) log.Printf("Number of Records parsed: \t\t %d\n", s["success"]) log.Printf("Number of Traces: \t\t\t %d\n", s["traces"]) log.Printf("Number of Orphen Logs: \t\t %d\n", s["orphens"]) log.Printf("Number of Malformed Logs: \t\t %d\n", s["malformed"]) return err } err := app.Run(os.Args) if err != nil { log.Fatal(err) } }
package matcher import "github.com/fdingiit/matching-algorithms/def" type Matcher interface { Add(subs ...def.Subscription) Match(product def.Product) []def.Subscription }
package refmt_test import ( "bytes" "fmt" "testing" "github.com/polydawn/refmt" "github.com/polydawn/refmt/cbor" "github.com/polydawn/refmt/json" "github.com/polydawn/refmt/obj/atlas" ) func TestRoundTrip(t *testing.T) { t.Run("nil nil", func(t *testing.T) { testRoundTripAllEncodings(t, nil, atlas.MustBuild()) }) t.Run("empty []interface{}", func(t *testing.T) { testRoundTripAllEncodings(t, []interface{}{}, atlas.MustBuild()) }) t.Run("nil []byte{}", func(t *testing.T) { testRoundTripAllEncodings(t, []byte(nil), atlas.MustBuild()) }) t.Run("nil []interface{}", func(t *testing.T) { testRoundTripAllEncodings(t, []interface{}(nil), atlas.MustBuild()) }) t.Run("empty map[string]interface{}", func(t *testing.T) { testRoundTripAllEncodings(t, map[string]interface{}(nil), atlas.MustBuild()) }) t.Run("nil map[string]interface{}", func(t *testing.T) { testRoundTripAllEncodings(t, map[string]interface{}(nil), atlas.MustBuild()) }) t.Run("4-value []interface{str}", func(t *testing.T) { testRoundTripAllEncodings(t, []interface{}{"str", "ing", "bri", "ng"}, atlas.MustBuild()) }) t.Run("4-value map[string]interface{str|int}", func(t *testing.T) { testRoundTripAllEncodings(t, map[string]interface{}{"k": "v", "a": "b", "z": 26, "m": 9}, atlas.MustBuild()) }) t.Run("cbor tagging and str-str transform", func(t *testing.T) { type Taggery string roundTrip(t, map[string]interface{}{"k": Taggery("v"), "a": "b", "z": 26, "m": 9}, cbor.EncodeOptions{}, cbor.DecodeOptions{}, atlas.MustBuild( atlas.BuildEntry(Taggery("")).UseTag(54).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x Taggery) (string, error) { return string(x), nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( func(x string) (Taggery, error) { return Taggery(x), nil })). Complete()), ) }) t.Run("cbor tagging and struct-[]byte transform", func(t *testing.T) { type Taggery struct{ x []byte } roundTrip(t, map[string]interface{}{ "foo": "bar", "hello": Taggery{[]byte("c1")}, "baz": []interface{}{ Taggery{[]byte("c1")}, Taggery{[]byte("c2")}, }, "cats": map[string]interface{}{ "qux": Taggery{[]byte("c3")}, }, }, cbor.EncodeOptions{}, cbor.DecodeOptions{}, atlas.MustBuild( atlas.BuildEntry(Taggery{}).UseTag(54).Transform(). TransformMarshal(atlas.MakeMarshalTransformFunc( func(x Taggery) ([]byte, error) { return x.x, nil })). TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( func(x []byte) (Taggery, error) { return Taggery{x}, nil })). Complete()), ) }) } func testRoundTripAllEncodings( t *testing.T, value interface{}, atl atlas.Atlas, ) { t.Run("cbor", func(t *testing.T) { roundTrip(t, value, cbor.EncodeOptions{}, cbor.DecodeOptions{}, atl) }) t.Run("json", func(t *testing.T) { roundTrip(t, value, json.EncodeOptions{}, json.DecodeOptions{}, atl) }) } func roundTrip( t *testing.T, value interface{}, encodeOptions refmt.EncodeOptions, decodeOptions refmt.DecodeOptions, atl atlas.Atlas, ) { // Encode. var buf bytes.Buffer encoder := refmt.NewMarshallerAtlased(encodeOptions, &buf, atl) if err := encoder.Marshal(value); err != nil { t.Fatalf("failed encoding: %s", err) } // Decode back to obj. decoder := refmt.NewUnmarshallerAtlased(decodeOptions, bytes.NewBuffer(buf.Bytes()), atl) var slot interface{} if err := decoder.Unmarshal(&slot); err != nil { t.Fatalf("failed decoding: %s", err) } t.Logf("%T -- %#v", slot, slot) // Re-encode. Expect to get same encoded form. var buf2 bytes.Buffer encoder2 := refmt.NewMarshallerAtlased(encodeOptions, &buf2, atl) if err := encoder2.Marshal(slot); err != nil { t.Fatalf("failed re-encoding: %s", err) } // Stringify. (Plain "%q" escapes unprintables quite nicely.) str1 := fmt.Sprintf("%q", buf.String()) str2 := fmt.Sprintf("%q", buf2.String()) if str1 != str2 { t.Errorf("%q != %q", str1, str2) } t.Logf("%#v == %q", value, str1) }
package main const ErlHeaderStr = `-module(ergo). -compile(export_all). -on_load(init/0). init() -> ok = erlang:load_nif("./ergo", 0). `
package bosh_test import ( "github.com/cloudfoundry/bosh-bootloader/bosh" "github.com/cloudfoundry/bosh-bootloader/storage" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("SSHKeyDeleter", func() { Describe("Delete", func() { var ( sshKeyDeleter bosh.SSHKeyDeleter state storage.State expectedState storage.State ) BeforeEach(func() { sshKeyDeleter = bosh.NewSSHKeyDeleter() state = storage.State{ BOSH: storage.BOSH{ Variables: "foo: bar\njumpbox_ssh:\n private_key: some-private-key", }, Jumpbox: storage.Jumpbox{ Variables: "foo: bar\njumpbox_ssh:\n private_key: some-private-key", }, } expectedState = storage.State{ BOSH: storage.BOSH{ Variables: "foo: bar\n", }, Jumpbox: storage.Jumpbox{ Variables: "foo: bar\n", }, } }) It("deletes the jumpbox ssh key from the state and returns the new state", func() { newState, err := sshKeyDeleter.Delete(state) Expect(err).NotTo(HaveOccurred()) Expect(newState).To(Equal(expectedState)) }) Context("when the BOSH variables is invalid YAML", func() { It("returns an error", func() { state.BOSH.Variables = "invalid yaml" _, err := sshKeyDeleter.Delete(state) Expect(err).To(MatchError(ContainSubstring("BOSH variables: yaml: unmarshal errors:"))) }) }) Context("when the Jumpbox variables is invalid YAML", func() { It("returns an error", func() { state.Jumpbox.Variables = "invalid yaml" _, err := sshKeyDeleter.Delete(state) Expect(err).To(MatchError(ContainSubstring("Jumpbox variables: yaml: unmarshal errors:"))) }) }) }) })
package main import ( "fmt" "time" ) func wait(index int) { time.Sleep(time.Second * 10) fmt.Println("finished", index) } func main() { for i := 0; i < 10; i++ { w := i go wait(w) defer func() { fmt.Println(w) }() } fmt.Println("finish") }
package rest import ( "regexp" "strconv" r "github.com/jinmukeji/jiujiantang-services/pkg/rest" "github.com/kataras/iris/v12" ) var codeToMsg = map[int]string{ ErrOK: "OK", ErrUnknown: "Unknown error", ErrClientUnauthorized: "Unauthorized Client", ErrUserUnauthorized: "Unauthorized User", ErrParsingRequestFailed: "Parsing request body failed", ErrValueRequired: "Value is required", ErrInvalidValue: "Invalid value", ErrRPCInternal: "RPC internal error", ErrRPCTimeout: "RPC request is timeout", ErrClientInternal: "Client internal error", ErrUsernamePasswordNotMatch: "Username and Password doesn't match", ErrNullClientID: "ClientID is null", ErrIncorrectClientID: "ClientID is incorrect", ErrInvalidSecretKey: "SecretKey is invalid", ErrInvalidUser: "User is invalid", ErrBuildJwtToken: "Failed to build JWT token", } const ( // 错误码定义清单 // ErrOK OK. Not used. ErrOK = 0 // ErrUnknown Unknown error ErrUnknown = 1 // 授权、身份验证、权限等错误 // ErrClientUnauthorized Client 未授权 ErrClientUnauthorized = 1000 // ErrUserUnauthorized User 未授权 ErrUserUnauthorized = 1100 // ErrUsernamePasswordNotMatch 用户名密码错误 ErrUsernamePasswordNotMatch = 1200 // ErrNullClientID 空的客户端ID ErrNullClientID = 1300 // ErrIncorrectClientID 客户端ID不正确 ErrIncorrectClientID = 1400 // ErrInvalidSecretKey secretkey错误 ErrInvalidSecretKey = 1500 // ErrInvalidUser 无效的用户 ErrInvalidUser = 1600 // Request 数据错误 // ErrParsingRequestFailed 解析请求失败 ErrParsingRequestFailed = 2000 // ErrValueRequired 请求值错误 ErrValueRequired = 2001 // ErrInvalidValue 无效的值 ErrInvalidValue = 2002 // RPC 请求相关 // ErrRPCInternal PRC内部错误 ErrRPCInternal = 3000 // ErrRPCTimeout PRC超时 ErrRPCTimeout = 3001 // ErrBuildJwtToken JWT Token 生成错误 ErrBuildJwtToken = 4001 // ErrClientInternal 客户端内部错误 ErrClientInternal = 5000 ) // ErrorMsg 根据错误码获得标准错误消息内容 func ErrorMsg(code int) string { if msg, ok := codeToMsg[code]; ok { return msg } return "" } // WrapError 包装一个 Error func wrapError(code int, cause string, err error) r.Error { return r.NewErrorWithError(code, ErrorMsg(code), cause, err) } // WriteError 向 Response 中写入 Error func writeError(ctx iris.Context, err r.Error, shouldBeArrayData bool) { l := r.ContextLogger(ctx) if err.InternalError != nil { l.WithError(err.InternalError).Warn(err.Error()) } else { l.Warn(err.Error()) } r.WriteErrorJSON(ctx, err, shouldBeArrayData) } var ( regErrorCode = regexp.MustCompile(`\[errcode:(\d+)\]`) ) func rpcErrorCode(rpcErr error) (int, bool) { regSubmatches := regErrorCode.FindStringSubmatch(rpcErr.Error()) if len(regSubmatches) >= 2 { if rpcCode, err := strconv.Atoi(regSubmatches[1]); err == nil { return rpcCode, true } } return 0, false } func writeRpcInternalError(ctx iris.Context, err error, shouldBeArrayData bool) { if code, ok := rpcErrorCode(err); ok { if _, ok := codeToMsg[code]; ok { writeError(ctx, wrapError(code, "", err), shouldBeArrayData) return } } writeError(ctx, wrapError(ErrRPCInternal, "", err), shouldBeArrayData) }
package gitlab import ( "archive/zip" "bytes" "context" "fmt" "io" "io/ioutil" "os" "github.com/rs/zerolog" "github.com/sirkon/gitlab" "github.com/sirkon/gitlab/gitlabdata" "github.com/sirkon/goproxy/internal/errors" "github.com/sirkon/goproxy" "github.com/sirkon/goproxy/fsrepack" "github.com/sirkon/goproxy/gomod" "github.com/sirkon/goproxy/semver" ) type gitlabModule struct { client gitlab.Client fullPath string path string pathUnversioned string major int } func (s *gitlabModule) ModulePath() string { return s.path } func (s *gitlabModule) Versions(ctx context.Context, prefix string) ([]string, error) { tags, err := s.getVersions(ctx, prefix, s.pathUnversioned) if err == nil { return tags, err } if s.pathUnversioned == s.path { return nil, err } zerolog.Ctx(ctx).Warn().Err(err).Msgf("failed to get with unversioned path `%s` (original %s), someone is loving cars a bit too much :)", s.pathUnversioned, s.path) return s.getVersions(ctx, prefix, s.path) } func (s *gitlabModule) getVersions(ctx context.Context, prefix string, path string) ([]string, error) { tags, err := s.client.Tags(ctx, path, "") if err != nil { return nil, errors.Wrapf(err, "gitlab getting tags from gitlab repository") } var resp []string for _, tag := range tags { if semver.IsValid(tag.Name) { resp = append(resp, tag.Name) } } if len(resp) > 0 { return resp, nil } info, err := s.getStat(ctx, "master") if err != nil { zerolog.Ctx(ctx).Warn().Err(err).Msg("getting revision info for master") return nil, errors.Newf("gitlab no tags found in the current repo") } return []string{info.Version}, nil } func (s *gitlabModule) Stat(ctx context.Context, rev string) (*goproxy.RevInfo, error) { res, err := s.getStat(ctx, rev) if err != nil { return nil, err } if major := semver.Major(res.Version); major >= 2 && s.major < major { return nil, errors.Newf("gitlab branch relates to higher major version v%d than what was expected from module path (v%d)", major, s.major) } return res, nil } func (s *gitlabModule) getStat(ctx context.Context, rev string) (res *goproxy.RevInfo, err error) { if semver.IsValid(rev) { return s.statVersion(ctx, rev) } // revision looks like a branch or non-semver tag, need to build pseudo-version return s.statWithPseudoVersion(ctx, rev) } // statVersion processing for semver revision func (s *gitlabModule) statVersion(ctx context.Context, rev string) (*goproxy.RevInfo, error) { // check if this rev does look like pseudo-version – will try statWithPseudoVersion in this case with short SHA pseudo := semver.Pseudo(rev) if len(pseudo) > 0 { res, err := s.statWithPseudoVersion(ctx, pseudo) if err == nil { // should use base version from the commit itself if semver.Compare(rev, res.Version) > 0 { res.Version = rev } return res, nil } } tags, err := s.client.Tags(ctx, s.pathUnversioned, rev) if err != nil { tags, err = s.client.Tags(ctx, s.path, rev) if err != nil { return nil, errors.Wrapf(err, "gitlab getting tags from gitlab repository") } } // Looking for exact revision match for _, tag := range tags { if tag.Name == rev { return &goproxy.RevInfo{ Version: tag.Name, Time: tag.Commit.CreatedAt, Name: tag.Commit.ID, Short: tag.Commit.ShortID, }, nil } } return nil, errors.Newf("gitlab state: unknown revision %s for %s", rev, s.path) } func (s *gitlabModule) statWithPseudoVersion(ctx context.Context, rev string) (*goproxy.RevInfo, error) { commits, err := s.client.Commits(ctx, s.pathUnversioned, rev) if err != nil { commits, err = s.client.Commits(ctx, s.path, rev) if err != nil { return nil, errors.Wrapf(err, "getting commits for `%s`", rev) } } if len(commits) == 0 { return nil, errors.Newf("no commits found for revision %s", rev) } commitMap := make(map[string]*gitlabdata.Commit, len(commits)) for _, commit := range commits { commitMap[commit.ID] = commit } // looking for the most recent semver tag tags, err := s.client.Tags(ctx, s.pathUnversioned, "") // all tags if err != nil { tags, err = s.client.Tags(ctx, s.path, "") if err != nil { return nil, errors.Wrapf(err, "getting tags") } } maxVer := "v0.0.0" for _, tag := range tags { if _, ok := commitMap[tag.Commit.ID]; !ok { continue } if !semver.IsValid(tag.Name) { continue } maxVer = semver.Max(maxVer, tag.Name) } var base string if semver.Major(maxVer) < s.major { base = fmt.Sprintf("v%d.0.0-", s.major) } else { major, minor, patch := semver.MajorMinorPatch(maxVer) base = fmt.Sprintf("v%d.%d.%d-0.", major, minor, patch+1) } // Should set appropriate version commit := commits[0] moment := commit.CreatedAt var ( year = moment[:4] month = moment[5:7] day = moment[8:10] hour = moment[11:13] minute = moment[14:16] second = moment[17:19] ) pseudoVersion := fmt.Sprintf("%s%s%s%s%s%s%s-%s", base, year, month, day, hour, minute, second, commit.ShortID, ) return &goproxy.RevInfo{ Version: pseudoVersion, Time: moment, }, nil } func (s *gitlabModule) GoMod(ctx context.Context, version string) (data []byte, err error) { goMod, err := s.getGoMod(ctx, version) if err != nil { if os.IsNotExist(err) { return []byte("module " + s.fullPath), nil } return nil, errors.Wrap(err, "gitlab getting go.mod") } res, err := gomod.Parse("go.mod", goMod) if err != nil { return nil, errors.Wrapf(err, "gitlab parsing repository go.mod") } if res.Name != s.fullPath { return nil, errors.Newf("gitlab module path is not equal to go.mod module path: %s ≠ %s", res.Name, s.fullPath) } return goMod, nil } func (s *gitlabModule) getGoMod(ctx context.Context, version string) ([]byte, error) { // try with pseudo version first if sha := semver.Pseudo(version); len(sha) > 0 { res, err := s.client.File(ctx, s.pathUnversioned, "go.mod", sha) if err == nil { return res, nil } res, err = s.client.File(ctx, s.path, "go.mod", sha) if err == nil { return res, nil } } res, err := s.client.File(ctx, s.pathUnversioned, "go.mod", version) if err == nil { return res, nil } return s.client.File(ctx, s.path, "go.mod", version) } type bufferCloser struct { bytes.Buffer } // Close makes bufferCloser io.ReadCloser func (*bufferCloser) Close() error { return nil } func (s *gitlabModule) Zip(ctx context.Context, version string) (io.ReadCloser, error) { if sha := semver.Pseudo(version); len(sha) > 0 { res, err := s.getZip(ctx, sha, version) if err == nil { return res, nil } } return s.getZip(ctx, version, version) } func (s *gitlabModule) getZip(ctx context.Context, revision, version string) (io.ReadCloser, error) { modInfo, err := s.client.ProjectInfo(ctx, s.pathUnversioned) if err != nil { modInfo, err = s.client.ProjectInfo(ctx, s.path) if err != nil { return nil, errors.Wrapf(err, "gitlab getting project %s info", s.path) } } archive, err := s.client.Archive(ctx, modInfo.ID, revision) if err != nil { return nil, errors.Wrap(err, "getting zipped archive data") } repacker, err := fsrepack.Gitlab(s.fullPath, version) if err != nil { return nil, errors.Wrap(err, "gitlab initiating repacker for gitlab archive source") } // now need to repack archive content from <pkg-name>-<hash> → <full pkg name, such as gitlab.com/user/module>, e.g. // // > module-f5d5d62240829ba7f38614add00c4aba587cffb1: // > go.mod // > pkg.go // // from gitlab.com/user/module, where f5d5d62240829ba7f38614add00c4aba587cffb1 is a hash of the revision tagged // v0.0.1 will be repacked into // // > gitlab.com: // > user.name: // > module@v0.1.2: // > go.mod // > pkg.go zipped, err := ioutil.ReadAll(archive) if err != nil { return nil, errors.Wrap(err, "gitlab reading gitlab source archive") } zipReader, err := zip.NewReader(bytes.NewReader(zipped), int64(len(zipped))) if err != nil { return nil, errors.Wrap(err, "gitlab extracting zipped source data") } rawDest := &bufferCloser{} result := rawDest dest := zip.NewWriter(rawDest) defer func() { if err := dest.Close(); err != nil { logger := zerolog.Ctx(ctx) logger.Error().Err(err).Msgf("closing an output archive") } }() if err := dest.SetComment(zipReader.Comment); err != nil { return nil, errors.Wrap(err, "setting comment to output archive") } for _, file := range zipReader.File { tmp, err := repacker.Relativer(file.Name) if err != nil { return nil, errors.Wrap(err, "gitlab relative file name computation") } fileName := repacker.Destinator(tmp) isDir := file.FileInfo().IsDir() fh := file.FileHeader fh.Name = fileName fileWriter, err := dest.CreateHeader(&fh) if err != nil { return nil, errors.Wrapf(err, "copying attributes for %s", fileName) } if isDir { continue } fileData, err := file.Open() if err != nil { return nil, errors.Wrapf(err, "opening file for %s", fileName) } if _, err := io.Copy(fileWriter, fileData); err != nil { if cErr := fileData.Close(); cErr != nil { logger := zerolog.Ctx(ctx) logger.Error().Err(cErr).Msgf("closing a file for %s", fileName) } return nil, errors.Wrapf(err, "copying content for %s", fileName) } if err := fileData.Close(); err != nil { return nil, errors.Wrapf(err, "closing zip file for %s", fileName) } } return result, nil }
package loaders_test import ( "context" "testing" "time" "github.com/syncromatics/kafmesh/internal/graph/loaders" "github.com/syncromatics/kafmesh/internal/graph/model" gomock "github.com/golang/mock/gomock" "github.com/pkg/errors" "gotest.tools/assert" ) func Test_Services_Components(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() repository := NewMockServiceRepository(ctrl) repository.EXPECT(). ComponentsByServices(gomock.Any(), []int{12}). Return([][]*model.Component{ []*model.Component{&model.Component{}}, }, nil). Times(1) repository.EXPECT(). ComponentsByServices(gomock.Any(), []int{13}). Return(nil, errors.Errorf("boom")). Times(1) loader := loaders.NewServiceLoader(context.Background(), repository, 10*time.Millisecond) r, err := loader.ComponentsByService(12) assert.NilError(t, err) assert.Assert(t, r != nil) _, err = loader.ComponentsByService(13) assert.ErrorContains(t, err, "failed to get components from repository: boom") } func Test_Services_DependsOn(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() repository := NewMockServiceRepository(ctrl) repository.EXPECT(). DependsOn(gomock.Any(), []int{12}). Return([][]*model.Service{ []*model.Service{&model.Service{}}, }, nil). Times(1) repository.EXPECT(). DependsOn(gomock.Any(), []int{13}). Return(nil, errors.Errorf("boom")). Times(1) loader := loaders.NewServiceLoader(context.Background(), repository, 10*time.Millisecond) r, err := loader.DependsOn(12) assert.NilError(t, err) assert.Assert(t, r != nil) _, err = loader.DependsOn(13) assert.ErrorContains(t, err, "failed to get depends on services from repository: boom") }
package config import ( "encoding/json" "fmt" //used to print errors majorly. "io/ioutil" //it will be used to help us read our config.json file. ) var ( Token string //To store value of Token from config.json . BotPrefix string // To store value of BotPrefix from config.json. config *configStruct //To store value extracted from config.json. ) type configStruct struct { Token string `json : "Token"` BotPrefix string `json : "BotPrefix"` } func ReadConfig() error { fmt.Println("Reading config file...") file, err := ioutil.ReadFile("./config.json") // ioutil package's ReadFile method which we read config.json and return it's value we will then store it in file variable and if an error ocurrs it will be stored in err . //Handling error and printing it using fmt package's Println function and returning it . if err != nil { fmt.Println(err.Error()) return err } // We are here printing value of file variable by explicitly converting it to string . fmt.Println(string(file)) // Here we performing a simple task by copying value of file into config variable which we have declared above , and if there any error we are storing it in err . Unmarshal takes second arguments reference remember it . err = json.Unmarshal(file, &config) //Handling error if err != nil { fmt.Println(err.Error()) return err } // After storing value in config variable we will access it and storing it in our declared variables . Token = config.Token BotPrefix = config.BotPrefix //If there isn't any error we will return nil. return nil }
package azurecontrollers import ( "context" "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" "github.com/openshift/openshift-azure/pkg/controllers/customeradmin" "github.com/openshift/openshift-azure/pkg/util/log" ) func start(cfg *cmdConfig) error { ctx := context.Background() logrus.SetLevel(log.SanitizeLogLevel(cfg.LogLevel)) logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true}) log := logrus.NewEntry(logrus.StandardLogger()) log.Print("azure-controller pod starting") // TODO: Expose metrics port after SDK uses controller-runtime's dynamic client // sdk.ExposeMetricsPort() managerConfig, err := config.GetConfig() if err != nil { return err } m, err := manager.New(managerConfig, manager.Options{}) if err != nil { return err } stopCh := signals.SetupSignalHandler() if err := customeradmin.AddToManager(ctx, log, m, stopCh); err != nil { return err } log.Print("starting manager") return m.Start(stopCh) }
package vptree type heapItem struct { Item *Item Dist float32 } // A heap must be initialized before any of the heap operations // can be used. Init is idempotent with respect to the heap invariants // and may be called whenever the heap invariants may have been invalidated. // Its complexity is O(n) where n = h.Len(). // //func Init(h Interface) { // // heapify // n := h.Len() // for i := n/2 - 1; i >= 0; i-- { // down(h, i, n) // } //} // Push pushes the element x onto the heap. The complexity is // O(log(n)) where n = h.Len(). // func (pq *priorityQueue) Push(x *heapItem) { *pq = append(*pq, x) pq.up(len(*pq) - 1) } // Pop removes the minimum element (according to Less) from the heap // and returns it. The complexity is O(log(n)) where n = h.Len(). // It is equivalent to Remove(h, 0). // func (pq *priorityQueue) Pop() *heapItem { n := len(*pq) - 1 pq.Swap(0, n) pq.down(0, n) old := *pq n = len(old) item := old[n-1] *pq = old[0 : n-1] return item } // Remove removes the element at index i from the heap. // The complexity is O(log(n)) where n = h.Len(). // func (pq *priorityQueue) Remove(i int) *heapItem { n := len(*pq) - 1 if n != i { pq.Swap(i, n) pq.down(i, n) pq.up(i) } return pq.Pop() } // Fix re-establishes the heap ordering after the element at index i has changed its value. // Changing the value of the element at index i and then calling Fix is equivalent to, // but less expensive than, calling Remove(h, i) followed by a Push of the new value. // The complexity is O(log(n)) where n = h.Len(). func (pq *priorityQueue) Fix(i int) { pq.down(i, len(*pq)) pq.up(i) } func (pq *priorityQueue) up(j int) { for { i := (j - 1) / 2 // parent if i == j || !pq.Less(j, i) { break } pq.Swap(i, j) j = i } } func (pq *priorityQueue) down(i, n int) { for { j1 := 2*i + 1 if j1 >= n || j1 < 0 { // j1 < 0 after int overflow break } j := j1 // left child if j2 := j1 + 1; j2 < n && !pq.Less(j1, j2) { j = j2 // = 2*i + 2 // right child } if !pq.Less(j, i) { break } pq.Swap(i, j) i = j } } type priorityQueue []*heapItem func (pq priorityQueue) Less(i, j int) bool { // We want a max-heap, so we use greater-than here return pq[i].Dist > pq[j].Dist } func (pq priorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } func (pq priorityQueue) Top() *heapItem { return pq[0] }
package main import ( "fmt" "log" "net/http" "os" "github.com/BolajiOlajide/go-api/controllers" "github.com/BolajiOlajide/go-api/database" "github.com/gorilla/mux" "github.com/joho/godotenv" ) func main() { err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file") return } fmt.Println("Starting the application...") database.InitializeDB() router := mux.NewRouter() // routes router.HandleFunc("/person", controllers.CreatePerson).Methods("POST") router.HandleFunc("/people", controllers.GetPeople).Methods("GET") router.HandleFunc("/person/{id}", controllers.GetPerson).Methods("GET") port := ":" + os.Getenv("PORT") http.ListenAndServe(port, router) }
package handlers import ( "errors" "github.com/valyala/fasthttp" "github.com/authelia/authelia/v4/internal/middlewares" "github.com/authelia/authelia/v4/internal/model" "github.com/authelia/authelia/v4/internal/session" "github.com/authelia/authelia/v4/internal/storage" ) // UserTOTPInfoGET returns the users TOTP configuration. func UserTOTPInfoGET(ctx *middlewares.AutheliaCtx) { var ( userSession session.UserSession err error ) if userSession, err = ctx.GetSession(); err != nil { ctx.Logger.WithError(err).Error("Error occurred retrieving user session") ctx.ReplyForbidden() return } var config *model.TOTPConfiguration if config, err = ctx.Providers.StorageProvider.LoadTOTPConfiguration(ctx, userSession.Username); err != nil { if errors.Is(err, storage.ErrNoTOTPConfiguration) { ctx.SetStatusCode(fasthttp.StatusNotFound) ctx.SetJSONError("Could not find TOTP Configuration for user.") ctx.Logger.Errorf("Failed to lookup TOTP configuration for user '%s'", userSession.Username) } else { ctx.SetStatusCode(fasthttp.StatusInternalServerError) ctx.SetJSONError("Could not find TOTP Configuration for user.") ctx.Logger.Errorf("Failed to lookup TOTP configuration for user '%s' with unknown error: %v", userSession.Username, err) } return } if err = ctx.SetJSONBody(config); err != nil { ctx.Logger.Errorf("Unable to perform TOTP configuration response: %s", err) } ctx.SetStatusCode(fasthttp.StatusOK) }
package api import ( "budget-calendar/database" "github.com/gin-gonic/gin" ) //All the routes created by the package nested in // api/v1/* func Routes(r *gin.RouterGroup, db *database.DB) { }
package main import ( "github.com/uploadService/mPath" "math/rand" "strings" "time" ) type FileMsg struct { FullFileName string //全路径 C:\jalen\bin\aa.png FileSuffix string //文件类型 .png FileDir string //文件夹 c:\jalen\bin FileNameWithSuffix string //带文件类型的文件名 aa.png FileName string //单纯的文件名 aa SecFileName string //通过算法统一加密后的文件名 比如 2020102732232232.png } func (f *FileMsg) InitFileMsg() *FileMsg { f.FileNameWithSuffix = mPath.Base(f.FullFileName) //获取文件名带后缀 f.FileSuffix = mPath.Ext(f.FileNameWithSuffix) //获取文件后缀 f.FileName = strings.TrimSuffix(f.FileNameWithSuffix, f.FileSuffix) //获取文件名 return f } func (f *FileMsg) GenSecFileName() { if f.FileSuffix == "" { return } //当前系统时间 + 一个随机字符串 //f.SecFileName = time.Now().Format("20060102150405") + strconv.FormatInt(rand.Int63n(1000), 10) + f.FileSuffix f.SecFileName = time.Now().Format("20060102150405") + RandStr(10) + f.FileSuffix } func RandStr(length int) string { str := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" bytes := []byte(str) result := make([]byte, 0) rand.Seed(time.Now().UnixNano() + int64(rand.Intn(100))) for i := 0; i < length; i++ { result = append(result, bytes[rand.Intn(len(bytes))]) } return string(result) }
package csvwriter import ( "encoding/csv" "os" "regexp" ) func Export(array []string,name string) { hostChar := regexp.MustCompile(`https://|http://|/g`) nameWitoutHost := hostChar.ReplaceAllString(name, "") specialChar := regexp.MustCompile(`/|\:|\?|\.|"|<|>|\|\*|/g`) n := specialChar.ReplaceAllString(nameWitoutHost, "-") file, err := os.Create(n+".csv") if err != nil { panic(err) } defer file.Close() cw := csv.NewWriter(file) defer cw.Flush() for i := 0; i < len(array); i++ { col := []string{array[i]} cw.Write(col) } }
package spacesaving type Counter struct { Next *Counter Prev *Counter Value uint64 ErrorCount uint64 Key string ParentBucket *Bucket } type DoubleLinkedCounter struct { Head *Counter Tail *Counter } func NewDoubleLinkedCounter() *DoubleLinkedCounter { return &DoubleLinkedCounter{ Head: nil, Tail: nil, } } func (dlc *DoubleLinkedCounter) insertEnd(node *Counter) { if dlc.Tail == nil { dlc.insertBeginning(node) } else { dlc.insertAfter(dlc.Tail, node) } } func (dlc *DoubleLinkedCounter) insertBeginning(node *Counter) { if dlc.Head == nil { dlc.Head = node dlc.Tail = node node.Prev = nil node.Next = nil } else { dlc.insertBefore(dlc.Head, node) } } func (dlc *DoubleLinkedCounter) insertAfter(node *Counter, newNode *Counter) { newNode.Prev = node newNode.Next = node.Next if node.Next == nil { dlc.Tail = newNode } else { node.Next.Prev = newNode } node.Next = newNode } func (dlc *DoubleLinkedCounter) insertBefore(node *Counter, newNode *Counter) { newNode.Prev = node.Prev newNode.Next = node if node.Prev == nil { dlc.Head = newNode } else { node.Prev.Next = newNode } node.Prev = newNode } func (dlc *DoubleLinkedCounter) Remove(node *Counter) { if node.Prev == nil { dlc.Head = node.Next } else { node.Prev.Next = node.Next } if node.Next == nil { dlc.Tail = node.Prev } else { node.Next.Prev = node.Prev } } func (dlc *DoubleLinkedCounter) Empty() bool { return dlc.Head == nil }
// Sample use : // // ./pinger --ip http://dgraph.io/query --numuser 3 // // package main import ( "bytes" "encoding/json" "flag" "fmt" "io/ioutil" "log" "net/http" "sync" "time" "github.com/dgraph-io/dgraph/x" ) var ( numUser = flag.Int("numuser", 1, "number of users hitting simultaneously") numReq = flag.Int("numreq", 10, "number of request per user") serverAddr = flag.String("ip", ":8081", "IP addr of server") avg chan float64 jsonP chan float64 serverP chan float64 parsingP chan float64 totalP chan float64 glog = x.Log("Pinger") ) func runUser(wg *sync.WaitGroup) { var ti, proT, parT, jsonT, totT time.Duration var query = `{ me(_xid_: m.0f4vbz) { type.object.name.en film.actor.film { film.performance.film { type.object.name.en } } } }` client := &http.Client{Transport: &http.Transport{ MaxIdleConnsPerHost: 100, }} var dat map[string]interface{} var latency map[string]interface{} for i := 0; i < *numReq; i++ { r, _ := http.NewRequest("POST", *serverAddr, bytes.NewBufferString(query)) t0 := time.Now() //fmt.Println(i) resp, err := client.Do(r) t1 := time.Now() if err != nil { glog.WithField("Err", resp.Status).Fatalf("Error in query") } else { body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Fatalf("Couldn't parse response body. %+v", err) } resp.Body.Close() err = json.Unmarshal(body, &dat) if err != nil { glog.Fatalf("Error in reply") } //fmt.Println(dat["server_latency"]) ti += t1.Sub(t0) // json:1.144568ms parsing:4.031346ms processing:3.726298ms total:8.904975ms temp := dat["server_latency"] latency = temp.(map[string]interface{}) pro, _ := time.ParseDuration(latency["processing"].(string)) proT += pro js, _ := time.ParseDuration(latency["json"].(string)) jsonT += js par, _ := time.ParseDuration(latency["parsing"].(string)) parT += par tot, _ := time.ParseDuration(latency["total"].(string)) totT += tot } // fmt.Println("user", i) } avg <- ti.Seconds() serverP <- proT.Seconds() jsonP <- jsonT.Seconds() parsingP <- parT.Seconds() totalP <- totT.Seconds() fmt.Println("Done") wg.Done() } func main() { flag.Parse() var totTime, serTi, jsonTi, parTi, totTi float64 var wg sync.WaitGroup avg = make(chan float64, *numUser) serverP = make(chan float64, *numUser) totalP = make(chan float64, *numUser) parsingP = make(chan float64, *numUser) jsonP = make(chan float64, *numUser) wg.Add(*numUser) for i := 0; i < *numUser; i++ { fmt.Println("user", i) go runUser(&wg) } wg.Wait() close(avg) close(serverP) close(parsingP) close(jsonP) close(totalP) for it := range avg { totTime += it } for it := range serverP { serTi += it } for it := range parsingP { parTi += it } for it := range jsonP { jsonTi += it } for it := range totalP { totTi += it } fmt.Println("Average time : ", totTime, totTime/float64(*numUser*(*numReq))) fmt.Println("Total time : ", totTi, totTi/float64(*numUser*(*numReq))) fmt.Println("Json time : ", jsonTi, jsonTi/float64(*numUser*(*numReq))) fmt.Println("Processing time : ", serTi, serTi/float64(*numUser*(*numReq))) fmt.Println("Parsing time : ", parTi, parTi/float64(*numUser*(*numReq))) }
package main import ( "apiapp/api" "common/config" "common/logger" "common/model" "fmt" ) func main() { fmt.Println("Starting alcedo api server ......") //初始化配置文件 config.Init() fmt.Println("Init APP config.ini OK ...... ") //初始化日志 category := config.GetPropStr("api_log_category") outtype := config.GetPropStr("log_output") outfile := config.GetPropStr("api_log_file") logger.InitLogger(category, outtype, outfile) fmt.Println("Init Logger OK .....") //初始化MongoDB的连接 model.InitModel() fmt.Println("Init MongoDB OK .....") //启动rest api service api.InitHandler() }
package main import ( "bufio" "fmt" "io" "io/ioutil" "os" ) func main() { backup := exec.Command("gnome-terminal", "-x", "sh", "-c", "go run backup.go") CreateFile() PrimaryProcess() for{ if !IsAlive(){ backup.Run() } } dat, err := ioutil.ReadFile("backup") CheckError(err) fmt.Print(dat) c := make([]byte, 1) c[0] = 1 for{ time.se c[0] = 0 fmt.Println(c[0]) //send to backup err := ioutil.WriteFile("backup", c, os.ModeDevice) CheckError(err) err1 := os.Chtimes("backup", atime, mtime) CheckError(err1) } func CreateFile() { // detect if file exists var _, err = os.Stat(path) // create file if not exists if os.IsNotExist(err) { var file, err = os.Create(path) CheckError(err) defer file.Close() }else{ //readFile } }
package hash import ( "encoding/base64" "crypto/md5" "crypto/sha1" "hash/crc32" "encoding/hex" ) //与php base64_encode()相同 func Base64Encode(src string) string { return base64.StdEncoding.EncodeToString([]byte(src)) } //与php base64_decode()相同 func Base64Decode(src string) (string, error) { decode, err := base64.StdEncoding.DecodeString(src) return string(decode), err } //与php md5()相同 func Md5(src string) string { res := md5.Sum([]byte(src)) return hex.EncodeToString(res[:]) } //与php sha1()相同 func Sha1(src string) string { res := sha1.Sum([]byte(src)) return hex.EncodeToString(res[:]) } //与php crc32()相同 func Crc32(src string) uint32 { return crc32.ChecksumIEEE([]byte(src)) }
package 遍历 func verifyPostorder(postorder []int) bool { if len(postorder) <= 1 { return true } rightTreeLeftIndex, rightTreeRightIndex := getIndexOfFirstNumGreaterThanLastNum(postorder), len(postorder)-2 leftTreeLeftIndex, leftTreeRightIndex := 0, rightTreeLeftIndex-1 rootVal := postorder[len(postorder)-1] if isExistGreaterNum(postorder[leftTreeLeftIndex:leftTreeRightIndex+1], rootVal) { return false } return verifyPostorder(postorder[leftTreeLeftIndex:leftTreeRightIndex+1]) && verifyPostorder(postorder[rightTreeLeftIndex:rightTreeRightIndex+1]) } func isExistGreaterNum(arr []int, ref int) bool { for i := 0; i < len(arr); i++ { if arr[i] > ref { return true } } return false } func getIndexOfFirstNumGreaterThanLastNum(arr []int) int { lastNum := arr[len(arr)-1] for i := len(arr) - 2; i >= 0; i-- { if arr[i] < lastNum { return i + 1 } } return 0 } /* 题目链接: https://leetcode-cn.com/problems/er-cha-sou-suo-shu-de-hou-xu-bian-li-xu-lie-lcof/ 总结: 1. 官方还有人用单调栈做这题的! */
package sv import ( "bytes" "io/fs" "os" "sort" "text/template" "time" "github.com/Masterminds/semver/v3" ) type releaseNoteTemplateVariables struct { Release string Tag string Version *semver.Version Date time.Time Sections []ReleaseNoteSection AuthorNames []string } // OutputFormatter output formatter interface. type OutputFormatter interface { FormatReleaseNote(releasenote ReleaseNote) (string, error) FormatChangelog(releasenotes []ReleaseNote) (string, error) } // OutputFormatterImpl formater for release note and changelog. type OutputFormatterImpl struct { templates *template.Template } // NewOutputFormatter TemplateProcessor constructor. func NewOutputFormatter(templatesFS fs.FS) *OutputFormatterImpl { templateFNs := map[string]interface{}{ "timefmt": timeFormat, "getsection": getSection, "getenv": os.Getenv, } tpls := template.Must(template.New("templates").Funcs(templateFNs).ParseFS(templatesFS, "*")) return &OutputFormatterImpl{templates: tpls} } // FormatReleaseNote format a release note. func (p OutputFormatterImpl) FormatReleaseNote(releasenote ReleaseNote) (string, error) { var b bytes.Buffer if err := p.templates.ExecuteTemplate(&b, "releasenotes-md.tpl", releaseNoteVariables(releasenote)); err != nil { return "", err } return b.String(), nil } // FormatChangelog format a changelog. func (p OutputFormatterImpl) FormatChangelog(releasenotes []ReleaseNote) (string, error) { templateVars := make([]releaseNoteTemplateVariables, len(releasenotes)) for i, v := range releasenotes { templateVars[i] = releaseNoteVariables(v) } var b bytes.Buffer if err := p.templates.ExecuteTemplate(&b, "changelog-md.tpl", templateVars); err != nil { return "", err } return b.String(), nil } func releaseNoteVariables(releasenote ReleaseNote) releaseNoteTemplateVariables { release := releasenote.Tag if releasenote.Version != nil { release = "v" + releasenote.Version.String() } return releaseNoteTemplateVariables{ Release: release, Tag: releasenote.Tag, Version: releasenote.Version, Date: releasenote.Date, Sections: releasenote.Sections, AuthorNames: toSortedArray(releasenote.AuthorsNames), } } func toSortedArray(input map[string]struct{}) []string { result := make([]string, len(input)) i := 0 for k := range input { result[i] = k i++ } sort.Strings(result) return result }
package routetag import ( "context" "database/sql" "github.com/hardstylez72/bblog/ad/pkg/tag" "github.com/jmoiron/sqlx" ) func Merge(ctx context.Context, conn *sqlx.DB, tx *sqlx.Tx, routeId int, tagNames []string) ([]string, error) { currentTagIds, err := GetRouteTags(ctx, conn, routeId) if err != nil { return nil, err } newTagIds := make([]int, 0) for _, tagName := range tagNames { t, err := tag.GetByNameDb(ctx, conn, tagName) if err != nil { if err == tag.ErrNotFound { var creationTagError error t, creationTagError = tag.InsertTx(ctx, tx, &tag.Tag{Name: tagName}) if creationTagError != nil { return nil, creationTagError } } else { return nil, err } } if !contains(currentTagIds, t.Id) { err = insertPair(ctx, tx, routeId, t.Id) if err != nil { return nil, err } } newTagIds = append(newTagIds, t.Id) } for i := range currentTagIds { if !contains(newTagIds, currentTagIds[i]) { err = deletePair(ctx, tx, routeId, currentTagIds[i]) if err != nil { return nil, err } } } return tagNames, nil } func contains(set []int, el int) bool { for i := range set { if set[i] == el { return true } } return false } func GetRouteTags(ctx context.Context, conn *sqlx.DB, routeId int) ([]int, error) { query := ` select tag_id from ad.routes_tags where route_id = $1 ` ids := make([]int, 0) err := conn.SelectContext(ctx, &ids, query, routeId) if err != nil { return nil, err } return ids, nil } func isPairExist(ctx context.Context, conn *sqlx.DB, routeId, tagId int) (bool, error) { query := ` select count(*) from ad.routes_tags where tag_id = $1 and route_id = $2 ` var count int err := conn.GetContext(ctx, &count, query, tagId, routeId) if err != nil { if err == sql.ErrNoRows { return false, nil } return false, err } return count != 0, nil } func insertPair(ctx context.Context, tx *sqlx.Tx, routeId, tagId int) error { query := ` insert into ad.routes_tags ( tag_id, route_id ) values ( $1, $2 ) ` _, err := tx.ExecContext(ctx, query, tagId, routeId) if err != nil { return err } return nil } func deletePair(ctx context.Context, tx *sqlx.Tx, routeId, tagId int) error { query := `delete from ad.routes_tags where route_id = $1 and tag_id = $2` _, err := tx.ExecContext(ctx, query, routeId, tagId) if err != nil { return err } return nil }
package main func main() { var x int // x is declared too far from its use, x is defined but its current value isn't used if true { x = 3 // x is defined but its current value isn't used x = 4 // x is defined but its current value isn't used } }
package main import ( "strconv" "io" "bufio" "os" "flag" "fmt" "time" "algorithms/quicksort" "algorithms/bubblesort" ) // 名字,默认值,使用方法 var infile *string = flag.String("i", "unsorted.dat", "File contains values for sorting") var outfile *string = flag.String("o", "sorted.dat", "File to receive sorted values") var algorithm *string = flag.String("a", "qsort", "Sort algorithm") func read_values(infile string)(values []int, err error){ file, err := os.Open(infile) if err != nil{ fmt.Println("Failed to open the input file ", infile) return } // 在函数return之前关闭 defer file.Close() // 带缓存的IO操作 br := bufio.NewReader(file) // 直接创建初始元素为0个的数组切片 values = make([]int, 0) // 无限循环 for{ // 如果要返回的line太长,超过缓存区,那么isPrefix就是True,直到为False表示这一行已经结束 line, isPrefix, err1 := br.ReadLine() if err1 != nil{ if err1 != io.EOF{ err = err1 } // 如果有错误而且不是EOF,则会跳出循环,然后这里已经定义err,则返回的变量已经定义了 break } if isPrefix{ fmt.Println("A too long line, seems unexpected") return } fmt.Println(line) // 将字符数组转换为字符串,字符数组里面包含的是ASCII码 str := string(line) fmt.Println(str) // ascii to integer // strconv.Itoa integer to ascii value, err1 := strconv.Atoi(str) fmt.Println(value) if err1 != nil{ err = err1 return } values = append(values, value) fmt.Println(values) } return } func write_values(values []int, outfile string) (err error){ file, err := os.Create(outfile) if err != nil{ fmt.Println("Failed to create the output file ", outfile) return } defer file.Close() for _, value := range values{ str := strconv.Itoa(value) file.WriteString(str + "\n") } return } func main(){ flag.Parse() if infile != nil{ // 输入文件不为空 fmt.Println("infile = ", *infile, "outfile = ", *outfile, "algorithm = ", *algorithm) } // 读取数据 values, err := read_values(*infile) if err == nil{ fmt.Println("read values: ", values) t1 := time.Now() switch *algorithm{ case "qsort": quicksort.QuickSort2(values) case "bubblesort": bubblesort.BubbleSort(values) default: fmt.Println("sorting algorithm", *algorithm, "is either unknown or unsupported") } t2 := time.Now() fmt.Println("sorted values:", values) fmt.Println("the sorting process costs", t2.Sub(t1), "to complete") // 写入数据 err = write_values(values, *outfile) if err != nil{ fmt.Println("write file failed ", err) } else { fmt.Println("write success") } } else { fmt.Println(err) } }
// Package tx defines data types for transactions, transaction statuses, and // other related types. This package should not contain business-logic, and // should not depend on business-logic packages. // // Transactions are defined in this package, away from business-logic, so that // they can be imported into other projects without needing to import unwanted // components. For example, block explorers need to know about transactions, but // do not need to know about the transaction pool. package tx import ( "math/rand" "reflect" "testing/quick" "github.com/renproject/id" "github.com/renproject/pack" "github.com/renproject/surge" ) // Tx represents a RenVM cross-chain transaction. type Tx struct { // Hash of the transaction that uniquely identifies it. It is the SHA256 // hash of the "to" contract address and the "in" values. Hash id.Hash `json:"hash"` // Version of the transaction. This allows for backwards/forwards // compatibility when processing transactions. Version Version `json:"version"` // Selector of the gateway function that is being called by this // transaction. Selector Selector `json:"selector"` // Input values are provided by an external user when the transaction is // submitted. Input pack.Typed `json:"in"` // Output values are generated as part of execution. Output pack.Typed `json:"out"` } // NewTxHash returns the transaction hash for a transaction with the given // recipient and inputs. An error is returned when the recipient and inputs is // too large and cannot be marshaled into bytes without exceeding memory // allocation restrictions. func NewTxHash(version Version, selector Selector, input pack.Typed) (id.Hash, error) { buf := make([]byte, surge.SizeHintString(string(version))+surge.SizeHintString(string(selector))+surge.SizeHint(input)) return NewTxHashIntoBuffer(version, selector, input, buf) } // NewTxHashIntoBuffer write the transaction hash for a transaction with the // given recipient and inputs into a bytes buffer. An error is returned when the // recipient and inputs is too large and cannot be marshaled into bytes without // exceeding memory allocation restrictions. This function is useful when doing // a lot of hashing, because it allows for buffer re-use. func NewTxHashIntoBuffer(version Version, selector Selector, input pack.Typed, data []byte) (id.Hash, error) { var err error buf := data rem := surge.MaxBytes if buf, rem, err = version.Marshal(buf, rem); err != nil { return id.Hash{}, err } if buf, rem, err = selector.Marshal(buf, rem); err != nil { return id.Hash{}, err } if buf, rem, err = input.Marshal(buf, rem); err != nil { return id.Hash{}, err } return id.NewHash(data), nil } // NewTx returns a transaction with the given recipient and inputs. The hash of // the transaction is automatically computed and stored in the transaction. An // error is returned when the recipient and inputs is too large and cannot be // marshaled into bytes without exceeding memory allocation restrictions. func NewTx(selector Selector, input pack.Typed) (Tx, error) { hash, err := NewTxHash(Version1, selector, input) if err != nil { return Tx{}, err } return Tx{Version: Version1, Hash: hash, Selector: selector, Input: input}, nil } // Generate allows us to quickly generate random transactions. This is mostly // used for writing tests. func (tx Tx) Generate(r *rand.Rand, size int) reflect.Value { hash, _ := quick.Value(reflect.TypeOf(id.Hash{}), r) version, _ := quick.Value(reflect.TypeOf(Version("")), r) selector, _ := quick.Value(reflect.TypeOf(Selector("")), r) input, _ := quick.Value(reflect.TypeOf(pack.Typed{}), r) output, _ := quick.Value(reflect.TypeOf(pack.Typed{}), r) return reflect.ValueOf(Tx{ Hash: hash.Interface().(id.Hash), Version: version.Interface().(Version), Selector: selector.Interface().(Selector), Input: input.Interface().(pack.Typed), Output: output.Interface().(pack.Typed), }) } // MapToPtrs maps a slice of transactions to a slice of transaction pointers. func MapToPtrs(txs []Tx) []*Tx { txPtrs := make([]*Tx, len(txs)) for i := range txs { txPtrs[i] = &txs[i] } return txPtrs } // MapFromPtrs maps a slice of transaction pointers to a slice of transactions. func MapFromPtrs(txPtrs []*Tx) []Tx { txs := make([]Tx, len(txPtrs)) for i := range txPtrs { txs[i] = *txPtrs[i] } return txs } // MapToHashes maps a slice of transactions to a slice of transaction hashes. func MapToHashes(txs []Tx) []pack.Bytes32 { txHashes := make([]pack.Bytes32, len(txs)) for i := range txs { txHashes[i] = pack.Bytes32(txs[i].Hash) } return txHashes } // MapToIDs maps a slice of transactions to a slice of transaction hashes. func MapToIDs(txs []Tx) []id.Hash { txHashes := make([]id.Hash, len(txs)) for i := range txs { txHashes[i] = id.Hash(txs[i].Hash) } return txHashes } // MapToNoStatus maps a slice of transactions with statuses to a slice of just // transactions. func MapToNoStatus(txsWithStatus []WithStatus) []Tx { txs := make([]Tx, len(txsWithStatus)) for i := range txs { txs[i] = txsWithStatus[i].Tx } return txs }
package mongodbwrapper import ( "context" "fmt" "go.mongodb.org/mongo-driver/mongo" ) /****************************************************************************************************** * * Definition * *******************************************************************************************************/ // Database interface // This interface will be initialized by the init method of the wrapper // See wrapper for more informations type Database interface { // This method allows you to add one or several collections to the database structure // If a collection is already present, it will skip it AddCollections(collections ...string) // This method checks if a a collection is in the database structure or not // This methods is here to avoid some "dangerous" memory access // Warning: a collection that is not in the structure could be in the database // Just use the AddCollections method instead checkCollections(collections string) error // This method allows you to insert a document into the database // It's wrapped by the checkCollections method (SAFE) InsertOne(collection string, document interface{}) (*mongo.InsertOneResult, error) // This method allows you to insert several documents into the database // It's wrapped by the checkCollections method (SAFE) InsertMany(collection string, documents []interface{}) (*mongo.InsertManyResult, error) // This method allows you to delete a document from the database // It's wrapped by the checkCollections method (SAFE) DeleteOne(collection string, filter interface{}) (*mongo.DeleteResult, error) // This method allows you to delete several documents from the database // It's wrapped by the checkCollections method (SAFE) DeleteMany(collection string, filter interface{}) (*mongo.DeleteResult, error) // This method allows you to find one document from the database based on filter // It's wrapped by the checkCollections method (SAFE) FindOne(collection string, filter interface{}) (*mongo.SingleResult, error) // This method allows you to find several documents from the database based on a filter // It's wrapped by the checkCollections method (SAFE) FindMany(collection string, filter interface{}) (*mongo.Cursor, error) // This method allows you to update one document from the database // It's wrapped by the checkCollections method (SAFE) UpdateOne(collection string, filter interface{}, update interface{}) (*mongo.UpdateResult, error) // This method allows you to update serveral documents from the database // It's wrapped by the checkCollections method (SAFE) UpdateMany(collection string, filter interface{}, update interface{}) (*mongo.UpdateResult, error) // Return the number of collections added CollectionNumber() int // Return the context GetContext() context.Context } /****************************************************************************************************** * * Implementation * *******************************************************************************************************/ // Database structure type DatabaseInfo struct { name string collections map[string]*mongo.Collection ctx context.Context database *mongo.Database } func (v *DatabaseInfo) checkCollections(collections string) error { _, ok := v.collections[collections] if !ok { return fmt.Errorf("Collection %s does not exist", collections) } return nil } func (v *DatabaseInfo) InsertOne(collection string, document interface{}) (*mongo.InsertOneResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].InsertOne(v.ctx, document) } func (v *DatabaseInfo) InsertMany(collection string, documents []interface{}) (*mongo.InsertManyResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].InsertMany(v.ctx, documents) } func (v *DatabaseInfo) DeleteOne(collection string, document interface{}) (*mongo.DeleteResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].DeleteOne(v.ctx, document) } func (v *DatabaseInfo) DeleteMany(collection string, filter interface{}) (*mongo.DeleteResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].DeleteMany(v.ctx, filter) } func (v *DatabaseInfo) FindOne(collection string, filter interface{}) (*mongo.SingleResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].FindOne(v.ctx, filter), nil } func (v *DatabaseInfo) FindMany(collection string, filter interface{}) (*mongo.Cursor, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].Find(v.ctx, filter) } func (v *DatabaseInfo) UpdateOne(collection string, filter interface{}, update interface{}) (*mongo.UpdateResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].UpdateOne(v.ctx, filter, update) } func (v *DatabaseInfo) UpdateMany(collection string, filter interface{}, update interface{}) (*mongo.UpdateResult, error) { if err := v.checkCollections(collection); err != nil { return nil, err } return v.collections[collection].UpdateMany(v.ctx, filter, update) } func (v *DatabaseInfo) AddCollections(collections ...string) { for _, collection := range collections { _, ok := v.collections[collection] if !ok { v.collections[collection] = v.database.Collection(collection) } } } func (v *DatabaseInfo) GetContext() context.Context { return v.ctx } func (v *DatabaseInfo) CollectionNumber() int { return len(v.collections) }
package main import ( "flag" "fmt" ) var name string var quit bool func init() { flag.StringVar(&name, "Name", "Tom", "input Name") flag.BoolVar(&quit, "q", false, "is Quit") } func main() { flag.Parse() if quit { return } fmt.Println(name) fmt.Printf("%#v\n", flag.Lookup("Name")) fmt.Println(flag.Lookup("Name").Value) }
package config import ( log "github.com/sirupsen/logrus" "testing" ) func TestProcessingInvalidBody(t *testing.T) { cfg := Load() log.Debugf("Config: %+v\n", cfg) }
package leetcode import "math" func minPrices(prices []int) []int { min := math.MaxInt64 values := make([]int, len(prices)) for i, p := range prices { if p < min { min = p } values[i] = min } return values } func maxProfit(prices []int) int { max := 0 mins := minPrices(prices) for i, p := range prices { profit := p - mins[i] if profit > max { max = profit } } return max }
package main import ( "context" "encoding/json" "io/ioutil" "log" "os" "path" "golang.org/x/oauth2" ) const tokenFilename = "$HOME/.gomuche/token.json" // NewTokenFromFile reads token from file and returns it. func NewTokenFromFile() *oauth2.Token { filename := os.ExpandEnv(tokenFilename) bytes, err := ioutil.ReadFile(filename) if err != nil { log.Fatalln(err) } token := new(oauth2.Token) err = json.Unmarshal(bytes, &token) if err != nil { log.Fatalln(err) } return token } // NewTokenFromCode retrieves a new token from Google using code and returns it. func NewTokenFromCode(conf *oauth2.Config, code string) *oauth2.Token { ctx := context.Background() token, err := conf.Exchange(ctx, code) if err != nil { log.Fatalln(err) } SaveToken(token) return token } // SaveToken saves token to file. func SaveToken(token *oauth2.Token) { bytes, err := json.MarshalIndent(token, "", " ") if err != nil { log.Fatalln(err) } filename := os.ExpandEnv(tokenFilename) err = os.MkdirAll(path.Dir(filename), 0755) if err != nil { log.Fatalln(err) } err = ioutil.WriteFile(filename, bytes, 0755) if err != nil { log.Fatalln(err) } }
package keeper import ( "github.com/InjectiveLabs/injective-oracle-scaffold/injective-chain/modules/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" ) // GetParams returns the total set of oracle parameters. func (k BaseKeeper) GetParams(ctx sdk.Context) (params types.Params) { k.paramSpace.GetParamSet(ctx, &params) return params } // SetParams set the params func (k BaseKeeper) SetParams(ctx sdk.Context, params types.Params) { k.paramSpace.SetParamSet(ctx, &params) }
package game import ( "fmt" ) // Command interface defiens something that is a command type Command interface { fmt.Stringer Run(*Game) }
package transport import ( "context" "encoding/json" "fmt" "github.com/feng/future/go-kit/agfun/app-server/protocol/api" "net/http" //"github.com/gorilla/mux" ) func decodeAccountRequest(_ context.Context, r *http.Request) (interface{}, error) { var request api.AccountReq // if err := json.NewDecoder(r.Body).Decode(&request); err != nil { // return nil, err // } // vars := mux.Vars(r) vars := r.URL.Query() fmt.Println("decodeAccountRequest", vars) request.Account = vars["Account"][0] return request, nil } func decodeCreateAccountRequest(_ context.Context, r *http.Request) (interface{}, error) { var request api.CreateAccountReq if err := json.NewDecoder(r.Body).Decode(&request); err != nil { return nil, err } return request, nil } func decodeUpdateAccountRequest(_ context.Context, r *http.Request) (interface{}, error) { var request api.UpdateAccountReq if err := json.NewDecoder(r.Body).Decode(&request); err != nil { return nil, err } return request, nil }
package main import ( "bankBigData/AutomaticTask/module" "bankBigData/_public/config" "bankBigData/_public/log" "bankBigData/_public/table" "gitee.com/johng/gf/g" "gitee.com/johng/gf/g/os/os/gcron" ) func main() { g.Config().SetFileName("config.json") debug := g.Config().GetBool("debug") table.DbDefaultName = g.Config().GetString("dbDefaultName") log.Init(config.TaskNameSpace) g.DB().SetDebug(debug) g.DB(table.CDbName).SetDebug(debug) g.DB(table.IDBName).SetDebug(debug) g.DB(table.CRDBName).SetDebug(debug) module.InitConf() e := module.InitTable() if e != nil { return } // 自动定时执行任务 _ = gcron.Add(g.Config().GetString("taskRunTime"), func() { module.AutoLoadData() }) select {} }
/*Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package application import ( "context" "encoding/json" "os" "strings" "time" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" monitorContext "github.com/kubevela/pkg/monitor/context" pkgmulticluster "github.com/kubevela/pkg/multicluster" "github.com/kubevela/pkg/util/slices" workflowv1alpha1 "github.com/kubevela/workflow/api/v1alpha1" "github.com/kubevela/workflow/pkg/cue/model/value" "github.com/kubevela/workflow/pkg/executor" "github.com/kubevela/workflow/pkg/generator" "github.com/kubevela/workflow/pkg/providers" "github.com/kubevela/workflow/pkg/providers/kube" wfTypes "github.com/kubevela/workflow/pkg/types" "github.com/oam-dev/kubevela/apis/core.oam.dev/common" "github.com/oam-dev/kubevela/apis/core.oam.dev/condition" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" "github.com/oam-dev/kubevela/apis/types" "github.com/oam-dev/kubevela/pkg/appfile" "github.com/oam-dev/kubevela/pkg/auth" configprovider "github.com/oam-dev/kubevela/pkg/config/provider" "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1beta1/application/assemble" ctrlutil "github.com/oam-dev/kubevela/pkg/controller/utils" velaprocess "github.com/oam-dev/kubevela/pkg/cue/process" "github.com/oam-dev/kubevela/pkg/features" "github.com/oam-dev/kubevela/pkg/monitor/metrics" "github.com/oam-dev/kubevela/pkg/multicluster" "github.com/oam-dev/kubevela/pkg/oam" "github.com/oam-dev/kubevela/pkg/oam/util" "github.com/oam-dev/kubevela/pkg/stdlib" "github.com/oam-dev/kubevela/pkg/utils/apply" "github.com/oam-dev/kubevela/pkg/velaql/providers/query" multiclusterProvider "github.com/oam-dev/kubevela/pkg/workflow/providers/multicluster" oamProvider "github.com/oam-dev/kubevela/pkg/workflow/providers/oam" terraformProvider "github.com/oam-dev/kubevela/pkg/workflow/providers/terraform" "github.com/oam-dev/kubevela/pkg/workflow/template" ) func init() { if err := stdlib.SetupBuiltinImports(); err != nil { klog.ErrorS(err, "Unable to set up builtin imports on package initialization") os.Exit(1) } } var ( // DisableResourceApplyDoubleCheck optimize applyComponentFunc by disable post resource existing check after dispatch DisableResourceApplyDoubleCheck = false ) // GenerateApplicationSteps generate application steps. // nolint:gocyclo func (h *AppHandler) GenerateApplicationSteps(ctx monitorContext.Context, app *v1beta1.Application, appParser *appfile.Parser, af *appfile.Appfile) (*wfTypes.WorkflowInstance, []wfTypes.TaskRunner, error) { appRev := h.currentAppRev t := time.Now() defer func() { metrics.AppReconcileStageDurationHistogram.WithLabelValues("generate-app-steps").Observe(time.Since(t).Seconds()) }() appLabels := map[string]string{ oam.LabelAppName: app.Name, oam.LabelAppNamespace: app.Namespace, } handlerProviders := providers.NewProviders() kube.Install(handlerProviders, h.Client, appLabels, &kube.Handlers{ Apply: h.Dispatch, Delete: h.Delete, }) configprovider.Install(handlerProviders, h.Client, func(ctx context.Context, resources []*unstructured.Unstructured, applyOptions []apply.ApplyOption) error { for _, res := range resources { res.SetLabels(util.MergeMapOverrideWithDst(res.GetLabels(), appLabels)) } return h.resourceKeeper.Dispatch(ctx, resources, applyOptions) }) oamProvider.Install(handlerProviders, app, af, h.Client, h.applyComponentFunc( appParser, appRev, af), h.renderComponentFunc(appParser, appRev, af)) pCtx := velaprocess.NewContext(generateContextDataFromApp(app, appRev.Name)) renderer := func(ctx context.Context, comp common.ApplicationComponent) (*appfile.Component, error) { return appParser.ParseComponentFromRevisionAndClient(ctx, comp, appRev) } multiclusterProvider.Install(handlerProviders, h.Client, app, af, h.applyComponentFunc(appParser, appRev, af), h.checkComponentHealth(appParser, appRev, af), renderer) terraformProvider.Install(handlerProviders, app, renderer) query.Install(handlerProviders, h.Client, nil) instance := generateWorkflowInstance(af, app) executor.InitializeWorkflowInstance(instance) runners, err := generator.GenerateRunners(ctx, instance, wfTypes.StepGeneratorOptions{ Providers: handlerProviders, PackageDiscover: h.pd, ProcessCtx: pCtx, TemplateLoader: template.NewWorkflowStepTemplateRevisionLoader(appRev, h.Client.RESTMapper()), Client: h.Client, StepConvertor: map[string]func(step workflowv1alpha1.WorkflowStep) (workflowv1alpha1.WorkflowStep, error){ wfTypes.WorkflowStepTypeApplyComponent: func(lstep workflowv1alpha1.WorkflowStep) (workflowv1alpha1.WorkflowStep, error) { copierStep := lstep.DeepCopy() if err := convertStepProperties(copierStep, app); err != nil { return lstep, errors.WithMessage(err, "convert [apply-component]") } copierStep.Type = wfTypes.WorkflowStepTypeBuiltinApplyComponent return *copierStep, nil }, }, }) if err != nil { return nil, nil, err } return instance, runners, nil } // CheckWorkflowRestart check if application workflow need restart and return the desired // rev to be set in status // 1. If workflow status is empty, it means no previous running record, the // workflow will restart (cold start) // 2. If workflow status is not empty, and publishVersion is set, the desired // rev will be the publishVersion // 3. If workflow status is not empty, the desired rev will be the // ApplicationRevision name. For backward compatibility, the legacy style // <rev>:<hash> will be recognized and reduced into <rev> func (h *AppHandler) CheckWorkflowRestart(ctx monitorContext.Context, app *v1beta1.Application) { desiredRev, currentRev := h.currentAppRev.Name, "" if app.Status.Workflow != nil { currentRev = app.Status.Workflow.AppRevision } if metav1.HasAnnotation(app.ObjectMeta, oam.AnnotationPublishVersion) { desiredRev = app.GetAnnotations()[oam.AnnotationPublishVersion] } else { // nolint // backward compatibility // legacy versions use <rev>:<hash> as currentRev, extract <rev> if idx := strings.LastIndexAny(currentRev, ":"); idx >= 0 { currentRev = currentRev[:idx] } } if currentRev != "" && desiredRev == currentRev { return } // record in revision if h.latestAppRev != nil && h.latestAppRev.Status.Workflow == nil && app.Status.Workflow != nil { app.Status.Workflow.Terminated = true app.Status.Workflow.Finished = true if app.Status.Workflow.EndTime.IsZero() { app.Status.Workflow.EndTime = metav1.Now() } h.UpdateApplicationRevisionStatus(ctx, h.latestAppRev, app.Status.Workflow) } // clean recorded resources info. app.Status.Services = nil app.Status.AppliedResources = nil // clean conditions after render var reservedConditions []condition.Condition for i, cond := range app.Status.Conditions { condTpy, err := common.ParseApplicationConditionType(string(cond.Type)) if err == nil { if condTpy <= common.RenderCondition { reservedConditions = append(reservedConditions, app.Status.Conditions[i]) } } } app.Status.Conditions = reservedConditions app.Status.Workflow = &common.WorkflowStatus{ AppRevision: desiredRev, } } func generateWorkflowInstance(af *appfile.Appfile, app *v1beta1.Application) *wfTypes.WorkflowInstance { instance := &wfTypes.WorkflowInstance{ WorkflowMeta: wfTypes.WorkflowMeta{ Name: af.Name, Namespace: af.Namespace, Annotations: app.Annotations, Labels: app.Labels, UID: app.UID, ChildOwnerReferences: []metav1.OwnerReference{ { APIVersion: v1beta1.SchemeGroupVersion.String(), Kind: v1beta1.ApplicationKind, Name: app.Name, UID: app.GetUID(), Controller: pointer.Bool(true), }, }, }, Debug: af.Debug, Steps: af.WorkflowSteps, Mode: af.WorkflowMode, } status := app.Status.Workflow instance.Status = workflowv1alpha1.WorkflowRunStatus{ Mode: *af.WorkflowMode, Phase: status.Phase, Message: status.Message, Suspend: status.Suspend, SuspendState: status.SuspendState, Terminated: status.Terminated, Finished: status.Finished, ContextBackend: status.ContextBackend, Steps: status.Steps, StartTime: status.StartTime, EndTime: status.EndTime, } switch app.Status.Phase { case common.ApplicationRunning: instance.Status.Phase = workflowv1alpha1.WorkflowStateSucceeded case common.ApplicationWorkflowSuspending: instance.Status.Phase = workflowv1alpha1.WorkflowStateSuspending case common.ApplicationWorkflowTerminated: instance.Status.Phase = workflowv1alpha1.WorkflowStateTerminated default: instance.Status.Phase = workflowv1alpha1.WorkflowStateExecuting } return instance } func convertStepProperties(step *workflowv1alpha1.WorkflowStep, app *v1beta1.Application) error { o := struct { Component string `json:"component"` Cluster string `json:"cluster"` }{} js, err := common.RawExtensionPointer{RawExtension: step.Properties}.MarshalJSON() if err != nil { return err } if err := json.Unmarshal(js, &o); err != nil { return err } var componentNames []string for _, c := range app.Spec.Components { componentNames = append(componentNames, c.Name) } for _, c := range app.Spec.Components { if c.Name == o.Component { if dcName, ok := checkDependsOnValidComponent(c.DependsOn, componentNames); !ok { return errors.Errorf("component %s not found, which is depended by %s", dcName, c.Name) } step.Inputs = append(step.Inputs, c.Inputs...) for index := range step.Inputs { parameterKey := strings.TrimSpace(step.Inputs[index].ParameterKey) if parameterKey != "" && !strings.HasPrefix(parameterKey, "properties") && !strings.HasPrefix(parameterKey, "traits[") { parameterKey = "properties." + parameterKey } if parameterKey != "" { parameterKey = "value." + parameterKey } step.Inputs[index].ParameterKey = parameterKey } step.Outputs = append(step.Outputs, c.Outputs...) step.DependsOn = append(step.DependsOn, c.DependsOn...) c.Inputs = nil c.Outputs = nil c.DependsOn = nil stepProperties := map[string]interface{}{ "value": c, "cluster": o.Cluster, } step.Properties = util.Object2RawExtension(stepProperties) return nil } } return errors.Errorf("component %s not found", o.Component) } func checkDependsOnValidComponent(dependsOnComponentNames, allComponentNames []string) (string, bool) { // does not depend on other components if dependsOnComponentNames == nil { return "", true } for _, dc := range dependsOnComponentNames { if !slices.Contains(allComponentNames, dc) { return dc, false } } return "", true } func (h *AppHandler) renderComponentFunc(appParser *appfile.Parser, appRev *v1beta1.ApplicationRevision, af *appfile.Appfile) oamProvider.ComponentRender { return func(baseCtx context.Context, comp common.ApplicationComponent, patcher *value.Value, clusterName string, overrideNamespace string) (*unstructured.Unstructured, []*unstructured.Unstructured, error) { ctx := multicluster.ContextWithClusterName(baseCtx, clusterName) _, manifest, err := h.prepareWorkloadAndManifests(ctx, appParser, comp, appRev, patcher, af) if err != nil { return nil, nil, err } return renderComponentsAndTraits(h.Client, manifest, appRev, clusterName, overrideNamespace) } } func (h *AppHandler) checkComponentHealth(appParser *appfile.Parser, appRev *v1beta1.ApplicationRevision, af *appfile.Appfile) oamProvider.ComponentHealthCheck { return func(baseCtx context.Context, comp common.ApplicationComponent, patcher *value.Value, clusterName string, overrideNamespace string) (bool, *unstructured.Unstructured, []*unstructured.Unstructured, error) { ctx := multicluster.ContextWithClusterName(baseCtx, clusterName) ctx = contextWithComponentNamespace(ctx, overrideNamespace) ctx = contextWithReplicaKey(ctx, comp.ReplicaKey) wl, manifest, err := h.prepareWorkloadAndManifests(ctx, appParser, comp, appRev, patcher, af) if err != nil { return false, nil, nil, err } wl.Ctx.SetCtx(auth.ContextWithUserInfo(ctx, h.app)) readyWorkload, readyTraits, err := renderComponentsAndTraits(h.Client, manifest, appRev, clusterName, overrideNamespace) if err != nil { return false, nil, nil, err } checkSkipApplyWorkload(wl) dispatchResources := readyTraits if !wl.SkipApplyWorkload { dispatchResources = append([]*unstructured.Unstructured{readyWorkload}, readyTraits...) } if !h.resourceKeeper.ContainsResources(dispatchResources) { return false, nil, nil, err } _, output, outputs, isHealth, err := h.collectHealthStatus(auth.ContextWithUserInfo(ctx, h.app), wl, appRev, overrideNamespace, false) if err != nil { return false, nil, nil, err } return isHealth, output, outputs, err } } func (h *AppHandler) applyComponentFunc(appParser *appfile.Parser, appRev *v1beta1.ApplicationRevision, af *appfile.Appfile) oamProvider.ComponentApply { return func(baseCtx context.Context, comp common.ApplicationComponent, patcher *value.Value, clusterName string, overrideNamespace string) (*unstructured.Unstructured, []*unstructured.Unstructured, bool, error) { t := time.Now() defer func() { metrics.ApplyComponentTimeHistogram.WithLabelValues("-").Observe(time.Since(t).Seconds()) }() ctx := multicluster.ContextWithClusterName(baseCtx, clusterName) ctx = contextWithComponentNamespace(ctx, overrideNamespace) ctx = contextWithReplicaKey(ctx, comp.ReplicaKey) wl, manifest, err := h.prepareWorkloadAndManifests(ctx, appParser, comp, appRev, patcher, af) if err != nil { return nil, nil, false, err } if len(manifest.PackagedWorkloadResources) != 0 { if err := h.Dispatch(ctx, clusterName, common.WorkflowResourceCreator, manifest.PackagedWorkloadResources...); err != nil { return nil, nil, false, errors.WithMessage(err, "cannot dispatch packaged workload resources") } } wl.Ctx.SetCtx(auth.ContextWithUserInfo(ctx, h.app)) readyWorkload, readyTraits, err := renderComponentsAndTraits(h.Client, manifest, appRev, clusterName, overrideNamespace) if err != nil { return nil, nil, false, err } checkSkipApplyWorkload(wl) isHealth := true if utilfeature.DefaultMutableFeatureGate.Enabled(features.MultiStageComponentApply) { manifestDispatchers, err := h.generateDispatcher(appRev, readyWorkload, readyTraits, overrideNamespace) if err != nil { return nil, nil, false, errors.WithMessage(err, "generateDispatcher") } for _, dispatcher := range manifestDispatchers { if isHealth, err := dispatcher.run(ctx, wl, appRev, clusterName); !isHealth || err != nil { return nil, nil, false, err } } } else { dispatchResources := readyTraits if !wl.SkipApplyWorkload { dispatchResources = append([]*unstructured.Unstructured{readyWorkload}, readyTraits...) } if err := h.Dispatch(ctx, clusterName, common.WorkflowResourceCreator, dispatchResources...); err != nil { return nil, nil, false, errors.WithMessage(err, "Dispatch") } _, _, _, isHealth, err = h.collectHealthStatus(ctx, wl, appRev, overrideNamespace, false) if err != nil { return nil, nil, false, errors.WithMessage(err, "CollectHealthStatus") } } if DisableResourceApplyDoubleCheck { return readyWorkload, readyTraits, isHealth, nil } workload, traits, err := getComponentResources(auth.ContextWithUserInfo(ctx, h.app), manifest, wl.SkipApplyWorkload, h.Client) return workload, traits, isHealth, err } } // overrideTraits will override cluster field to be local for traits which are control plane only func overrideTraits(appRev *v1beta1.ApplicationRevision, readyTraits []*unstructured.Unstructured) []*unstructured.Unstructured { traits := readyTraits for index, readyTrait := range readyTraits { for _, trait := range appRev.Spec.TraitDefinitions { if trait.Spec.ControlPlaneOnly && trait.Name == readyTrait.GetLabels()[oam.TraitTypeLabel] { oam.SetCluster(traits[index], multicluster.ClusterLocalName) traits[index].SetNamespace(appRev.GetNamespace()) break } } } return traits } func (h *AppHandler) prepareWorkloadAndManifests(ctx context.Context, appParser *appfile.Parser, comp common.ApplicationComponent, appRev *v1beta1.ApplicationRevision, patcher *value.Value, af *appfile.Appfile) (*appfile.Component, *types.ComponentManifest, error) { wl, err := appParser.ParseComponentFromRevisionAndClient(ctx, comp, appRev) if err != nil { return nil, nil, errors.WithMessage(err, "ParseWorkload") } wl.Patch = patcher manifest, err := af.GenerateComponentManifest(wl, func(ctxData *velaprocess.ContextData) { if ns := componentNamespaceFromContext(ctx); ns != "" { ctxData.Namespace = ns } if rk := replicaKeyFromContext(ctx); rk != "" { ctxData.ReplicaKey = rk } ctxData.Cluster = pkgmulticluster.Local if cluster, ok := pkgmulticluster.ClusterFrom(ctx); ok && cluster != "" { ctxData.Cluster = cluster } // cluster info are secrets stored in the control plane cluster ctxData.ClusterVersion = multicluster.GetVersionInfoFromObject(pkgmulticluster.WithCluster(ctx, types.ClusterLocalName), h.Client, ctxData.Cluster) ctxData.CompRevision, _ = ctrlutil.ComputeSpecHash(comp) }) if err != nil { return nil, nil, errors.WithMessage(err, "GenerateComponentManifest") } if err := af.SetOAMContract(manifest); err != nil { return nil, nil, errors.WithMessage(err, "SetOAMContract") } return wl, manifest, nil } func renderComponentsAndTraits(client client.Client, manifest *types.ComponentManifest, appRev *v1beta1.ApplicationRevision, clusterName string, overrideNamespace string) (*unstructured.Unstructured, []*unstructured.Unstructured, error) { readyWorkload, readyTraits, err := assemble.PrepareBeforeApply(manifest, appRev, []assemble.WorkloadOption{assemble.DiscoveryHelmBasedWorkload(context.TODO(), client)}) if err != nil { return nil, nil, errors.WithMessage(err, "assemble resources before apply fail") } if clusterName != "" { oam.SetClusterIfEmpty(readyWorkload, clusterName) for _, readyTrait := range readyTraits { oam.SetClusterIfEmpty(readyTrait, clusterName) } } if overrideNamespace != "" { readyWorkload.SetNamespace(overrideNamespace) for _, readyTrait := range readyTraits { readyTrait.SetNamespace(overrideNamespace) } } readyTraits = overrideTraits(appRev, readyTraits) return readyWorkload, readyTraits, nil } func checkSkipApplyWorkload(comp *appfile.Component) { for _, trait := range comp.Traits { if trait.FullTemplate.TraitDefinition.Spec.ManageWorkload { comp.SkipApplyWorkload = true break } } } func getComponentResources(ctx context.Context, manifest *types.ComponentManifest, skipStandardWorkload bool, cli client.Client) (*unstructured.Unstructured, []*unstructured.Unstructured, error) { var ( workload *unstructured.Unstructured traits []*unstructured.Unstructured ) if !skipStandardWorkload { v := manifest.StandardWorkload.DeepCopy() if err := cli.Get(ctx, client.ObjectKeyFromObject(manifest.StandardWorkload), v); err != nil { return nil, nil, err } workload = v } for _, trait := range manifest.Traits { v := trait.DeepCopy() remoteCtx := multicluster.ContextWithClusterName(ctx, oam.GetCluster(v)) if err := cli.Get(remoteCtx, client.ObjectKeyFromObject(trait), v); err != nil { return workload, nil, err } traits = append(traits, v) } return workload, traits, nil } func generateContextDataFromApp(app *v1beta1.Application, appRev string) velaprocess.ContextData { data := velaprocess.ContextData{ Namespace: app.Namespace, AppName: app.Name, CompName: app.Name, AppRevisionName: appRev, } if app.Annotations != nil { data.WorkflowName = app.Annotations[oam.AnnotationWorkflowName] data.PublishVersion = app.Annotations[oam.AnnotationPublishVersion] } return data }
package cmd import ( "fmt" "log" "time" "github.com/gen2brain/beeep" "github.com/spf13/cobra" ) const ( timerStartedMessage = "timer started!\n" timerDoneMessage = "timer done!" notEnoughArgumentMessage = "timer: not enough argument" tooManyArgumentMessage = "timer: to many argument" ) // timerCmd represents the timer command var timerCmd = &cobra.Command{ Use: "timer", Short: "Start a new timer", Long: `Start a new gemit timer of a specified duration. the duration used for the timer is from the argument passed.`, Run: func(cmd *cobra.Command, args []string) { gemitTimer(args) }, } func init() { rootCmd.AddCommand(timerCmd) } func gemitTimer(args []string) { log.SetPrefix("gemit: ") log.SetFlags(0) if len(args) < 1 { log.Fatal(notEnoughArgumentMessage) } else if len(args) > 1 { log.Fatal(tooManyArgumentMessage) } inputtedDuration := args[0] duration, err := time.ParseDuration(inputtedDuration) if err != nil { log.Fatal(err) } timer := time.NewTimer(duration) fmt.Println(timerStartedMessage) <-timer.C fmt.Println(timerDoneMessage) // no icon is just a placeholder so that beeep doesnt find a icon to use notificationErr := beeep.Notify("Gemit", timerDoneMessage, "no icon") if notificationErr != nil { log.Fatal(notificationErr) } }