text
stringlengths
11
4.05M
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconcilers import ( "context" "fmt" "testing" corev1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/fake" ) func TestEndpointsAdapterGet(t *testing.T) { endpoints1, _ := generateEndpointsAndSlice("foo", "testing", []int{80, 443}, []string{"10.1.2.3", "10.1.2.4"}) testCases := map[string]struct { endpointSlicesEnabled bool expectedError error expectedEndpoints *corev1.Endpoints endpoints []*corev1.Endpoints namespaceParam string nameParam string }{ "single-existing-endpoints": { endpointSlicesEnabled: false, expectedError: nil, expectedEndpoints: endpoints1, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "testing", nameParam: "foo", }, "single-existing-endpoints-slices-enabled": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpoints: endpoints1, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "testing", nameParam: "foo", }, "wrong-namespace": { endpointSlicesEnabled: false, expectedError: errors.NewNotFound(schema.GroupResource{Group: "", Resource: "endpoints"}, "foo"), expectedEndpoints: nil, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "foo", nameParam: "foo", }, "wrong-name": { endpointSlicesEnabled: false, expectedError: errors.NewNotFound(schema.GroupResource{Group: "", Resource: "endpoints"}, "bar"), expectedEndpoints: nil, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "testing", nameParam: "bar", }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { client := fake.NewSimpleClientset() epAdapter := EndpointsAdapter{endpointClient: client.CoreV1()} if testCase.endpointSlicesEnabled { epAdapter.endpointSliceClient = client.DiscoveryV1beta1() } for _, endpoints := range testCase.endpoints { _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } } endpoints, err := epAdapter.Get(testCase.namespaceParam, testCase.nameParam, metav1.GetOptions{}) if !apiequality.Semantic.DeepEqual(testCase.expectedError, err) { t.Errorf("Expected error: %v, got: %v", testCase.expectedError, err) } if !apiequality.Semantic.DeepEqual(endpoints, testCase.expectedEndpoints) { t.Errorf("Expected endpoints: %v, got: %v", testCase.expectedEndpoints, endpoints) } }) } } func TestEndpointsAdapterCreate(t *testing.T) { endpoints1, epSlice1 := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.3", "10.1.2.4"}) // even if an Endpoints resource includes an IPv6 address, it should not be // included in the corresponding EndpointSlice. endpoints2, _ := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.5", "10.1.2.6", "1234::5678:0000:0000:9abc:def0"}) _, epSlice2 := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.5", "10.1.2.6"}) // ensure that Endpoints with only IPv6 addresses result in EndpointSlice // with an IPv6 address type. endpoints3, epSlice3 := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"1234::5678:0000:0000:9abc:def0"}) epSlice3.AddressType = discovery.AddressTypeIPv6 testCases := map[string]struct { endpointSlicesEnabled bool expectedError error expectedEndpoints *corev1.Endpoints expectedEndpointSlice *discovery.EndpointSlice endpoints []*corev1.Endpoints endpointSlices []*discovery.EndpointSlice namespaceParam string endpointsParam *corev1.Endpoints }{ "single-endpoint": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpoints: endpoints1, expectedEndpointSlice: epSlice1, endpoints: []*corev1.Endpoints{}, namespaceParam: endpoints1.Namespace, endpointsParam: endpoints1, }, "single-endpoint-partial-ipv6": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpoints: endpoints2, expectedEndpointSlice: epSlice2, endpoints: []*corev1.Endpoints{}, namespaceParam: endpoints2.Namespace, endpointsParam: endpoints2, }, "single-endpoint-full-ipv6": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpoints: endpoints3, expectedEndpointSlice: epSlice3, endpoints: []*corev1.Endpoints{}, namespaceParam: endpoints3.Namespace, endpointsParam: endpoints3, }, "single-endpoint-no-slices": { endpointSlicesEnabled: false, expectedError: nil, expectedEndpoints: endpoints1, expectedEndpointSlice: nil, endpoints: []*corev1.Endpoints{}, namespaceParam: endpoints1.Namespace, endpointsParam: endpoints1, }, "existing-endpoint": { endpointSlicesEnabled: true, expectedError: errors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: "endpoints"}, "foo"), expectedEndpoints: nil, expectedEndpointSlice: nil, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: endpoints1.Namespace, endpointsParam: endpoints1, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { client := fake.NewSimpleClientset() epAdapter := EndpointsAdapter{endpointClient: client.CoreV1()} if testCase.endpointSlicesEnabled { epAdapter.endpointSliceClient = client.DiscoveryV1beta1() } for _, endpoints := range testCase.endpoints { _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } } endpoints, err := epAdapter.Create(testCase.namespaceParam, testCase.endpointsParam) if !apiequality.Semantic.DeepEqual(testCase.expectedError, err) { t.Errorf("Expected error: %v, got: %v", testCase.expectedError, err) } if !apiequality.Semantic.DeepEqual(endpoints, testCase.expectedEndpoints) { t.Errorf("Expected endpoints: %v, got: %v", testCase.expectedEndpoints, endpoints) } epSliceList, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Error listing Endpoint Slices: %v", err) } if testCase.expectedEndpointSlice == nil { if len(epSliceList.Items) != 0 { t.Fatalf("Expected no Endpoint Slices, got: %v", epSliceList.Items) } } else { if len(epSliceList.Items) == 0 { t.Fatalf("No Endpoint Slices found, expected: %v", testCase.expectedEndpointSlice) } if len(epSliceList.Items) > 1 { t.Errorf("Only 1 Endpoint Slice expected, got: %v", testCase.expectedEndpointSlice) } if !apiequality.Semantic.DeepEqual(*testCase.expectedEndpointSlice, epSliceList.Items[0]) { t.Errorf("Expected Endpoint Slice: %v, got: %v", testCase.expectedEndpointSlice, epSliceList.Items[0]) } } }) } } func TestEndpointsAdapterUpdate(t *testing.T) { endpoints1, _ := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.3", "10.1.2.4"}) endpoints2, epSlice2 := generateEndpointsAndSlice("foo", "testing", []int{80, 443}, []string{"10.1.2.3", "10.1.2.4", "10.1.2.5"}) endpoints3, _ := generateEndpointsAndSlice("bar", "testing", []int{80, 443}, []string{"10.1.2.3", "10.1.2.4", "10.1.2.5"}) // ensure that EndpointSlice with deprecated IP address type is replaced // with one that has an IPv4 address type. endpoints4, _ := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.7", "10.1.2.8"}) _, epSlice4IP := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.7", "10.1.2.8"}) epSlice4IP.AddressType = discovery.AddressTypeIP _, epSlice4IPv4 := generateEndpointsAndSlice("foo", "testing", []int{80}, []string{"10.1.2.7", "10.1.2.8"}) testCases := map[string]struct { endpointSlicesEnabled bool expectedError error expectedEndpoints *corev1.Endpoints expectedEndpointSlice *discovery.EndpointSlice endpoints []*corev1.Endpoints endpointSlices []*discovery.EndpointSlice namespaceParam string endpointsParam *corev1.Endpoints }{ "single-existing-endpoints-no-change": { endpointSlicesEnabled: false, expectedError: nil, expectedEndpoints: endpoints1, expectedEndpointSlice: nil, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "testing", endpointsParam: endpoints1, }, "existing-endpointslice-replaced-with-updated-ipv4-address-type": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpoints: endpoints4, expectedEndpointSlice: epSlice4IPv4, endpoints: []*corev1.Endpoints{endpoints4}, endpointSlices: []*discovery.EndpointSlice{epSlice4IP}, namespaceParam: "testing", endpointsParam: endpoints4, }, "add-ports-and-ips": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpoints: endpoints2, expectedEndpointSlice: epSlice2, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "testing", endpointsParam: endpoints2, }, "missing-endpoints": { endpointSlicesEnabled: true, expectedError: errors.NewNotFound(schema.GroupResource{Group: "", Resource: "endpoints"}, "bar"), expectedEndpoints: nil, expectedEndpointSlice: nil, endpoints: []*corev1.Endpoints{endpoints1}, namespaceParam: "testing", endpointsParam: endpoints3, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { client := fake.NewSimpleClientset() epAdapter := EndpointsAdapter{endpointClient: client.CoreV1()} if testCase.endpointSlicesEnabled { epAdapter.endpointSliceClient = client.DiscoveryV1beta1() } for _, endpoints := range testCase.endpoints { _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } } endpoints, err := epAdapter.Update(testCase.namespaceParam, testCase.endpointsParam) if !apiequality.Semantic.DeepEqual(testCase.expectedError, err) { t.Errorf("Expected error: %v, got: %v", testCase.expectedError, err) } if !apiequality.Semantic.DeepEqual(endpoints, testCase.expectedEndpoints) { t.Errorf("Expected endpoints: %v, got: %v", testCase.expectedEndpoints, endpoints) } epSliceList, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Error listing Endpoint Slices: %v", err) } if testCase.expectedEndpointSlice == nil { if len(epSliceList.Items) != 0 { t.Fatalf("Expected no Endpoint Slices, got: %v", epSliceList.Items) } } else { if len(epSliceList.Items) == 0 { t.Fatalf("No Endpoint Slices found, expected: %v", testCase.expectedEndpointSlice) } if len(epSliceList.Items) > 1 { t.Errorf("Only 1 Endpoint Slice expected, got: %v", testCase.expectedEndpointSlice) } if !apiequality.Semantic.DeepEqual(*testCase.expectedEndpointSlice, epSliceList.Items[0]) { t.Errorf("Expected Endpoint Slice: %v, got: %v", testCase.expectedEndpointSlice, epSliceList.Items[0]) } } }) } } func generateEndpointsAndSlice(name, namespace string, ports []int, addresses []string) (*corev1.Endpoints, *discovery.EndpointSlice) { objectMeta := metav1.ObjectMeta{Name: name, Namespace: namespace} trueBool := true epSlice := &discovery.EndpointSlice{ObjectMeta: objectMeta, AddressType: discovery.AddressTypeIPv4} epSlice.Labels = map[string]string{discovery.LabelServiceName: name} subset := corev1.EndpointSubset{} for i, port := range ports { endpointPort := corev1.EndpointPort{ Name: fmt.Sprintf("port-%d", i), Port: int32(port), Protocol: corev1.ProtocolTCP, } subset.Ports = append(subset.Ports, endpointPort) epSlice.Ports = append(epSlice.Ports, discovery.EndpointPort{ Name: &endpointPort.Name, Port: &endpointPort.Port, Protocol: &endpointPort.Protocol, }) } for i, address := range addresses { endpointAddress := corev1.EndpointAddress{ IP: address, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: fmt.Sprintf("pod-%d", i), }, } subset.Addresses = append(subset.Addresses, endpointAddress) epSlice.Endpoints = append(epSlice.Endpoints, discovery.Endpoint{ Addresses: []string{endpointAddress.IP}, TargetRef: endpointAddress.TargetRef, Conditions: discovery.EndpointConditions{Ready: &trueBool}, }) } return &corev1.Endpoints{ ObjectMeta: objectMeta, Subsets: []corev1.EndpointSubset{subset}, }, epSlice } func TestEndpointsAdapterEnsureEndpointSliceFromEndpoints(t *testing.T) { endpoints1, epSlice1 := generateEndpointsAndSlice("foo", "testing", []int{80, 443}, []string{"10.1.2.3", "10.1.2.4"}) endpoints2, epSlice2 := generateEndpointsAndSlice("foo", "testing", []int{80, 443}, []string{"10.1.2.3", "10.1.2.4", "10.1.2.5"}) testCases := map[string]struct { endpointSlicesEnabled bool expectedError error expectedEndpointSlice *discovery.EndpointSlice endpointSlices []*discovery.EndpointSlice namespaceParam string endpointsParam *corev1.Endpoints }{ "existing-endpointslice-no-change": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpointSlice: epSlice1, endpointSlices: []*discovery.EndpointSlice{epSlice1}, namespaceParam: "testing", endpointsParam: endpoints1, }, "existing-endpointslice-change": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpointSlice: epSlice2, endpointSlices: []*discovery.EndpointSlice{epSlice1}, namespaceParam: "testing", endpointsParam: endpoints2, }, "missing-endpointslice": { endpointSlicesEnabled: true, expectedError: nil, expectedEndpointSlice: epSlice1, endpointSlices: []*discovery.EndpointSlice{}, namespaceParam: "testing", endpointsParam: endpoints1, }, "endpointslices-disabled": { endpointSlicesEnabled: false, expectedError: nil, expectedEndpointSlice: nil, endpointSlices: []*discovery.EndpointSlice{}, namespaceParam: "testing", endpointsParam: endpoints1, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { client := fake.NewSimpleClientset() epAdapter := EndpointsAdapter{endpointClient: client.CoreV1()} if testCase.endpointSlicesEnabled { epAdapter.endpointSliceClient = client.DiscoveryV1beta1() } for _, endpointSlice := range testCase.endpointSlices { _, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating EndpointSlice: %v", err) } } err := epAdapter.EnsureEndpointSliceFromEndpoints(testCase.namespaceParam, testCase.endpointsParam) if !apiequality.Semantic.DeepEqual(testCase.expectedError, err) { t.Errorf("Expected error: %v, got: %v", testCase.expectedError, err) } endpointSlice, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).Get(context.TODO(), testCase.endpointsParam.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { t.Fatalf("Error getting Endpoint Slice: %v", err) } if !apiequality.Semantic.DeepEqual(endpointSlice, testCase.expectedEndpointSlice) { t.Errorf("Expected Endpoint Slice: %v, got: %v", testCase.expectedEndpointSlice, endpointSlice) } }) } }
package requests type SendRequest struct { BaseRequest `mapstructure:",squash"` Source string `json:"source" mapstructure:"source"` Destination string `json:"destination" mapstructure:"destination"` Amount string `json:"amount" mapstructure:"amount"` ID *string `json:"id,omitempty" mapstructure:"id,omitempty"` Work *string `json:"work,omitempty" mapstructure:"work,omitempty"` }
package cloudstorage import ( "bytes" "errors" "io/ioutil" "mime/multipart" "net/http" "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) type S3Storage struct { Region string AccessKeyId string AccessKeySecret string Bucket string } func NewS3Storage(accessKeyId, accessKeySecret, region, bucket string) Storage { return &S3Storage{ Region: region, AccessKeyId: accessKeyId, AccessKeySecret: accessKeySecret, Bucket: bucket, } } // 获取上传的session func (s *S3Storage) GetSession() (*session.Session, error) { return session.NewSession(&aws.Config{ Region: aws.String(s.Region), Credentials: credentials.NewStaticCredentials(s.AccessKeyId, s.AccessKeySecret, ""), }) } func (s *S3Storage) UploadObject(objectKey string, localFile string, publicRead bool) error { // 获取session sess, err := s.GetSession() if err != nil { return err } // 打开文件 file, err := os.Open(localFile) if err != nil { return err } defer file.Close() // 获取文件信息 fileInfo, ferr := file.Stat() if ferr != nil { return ferr } // 将文件读入buffer buffer := make([]byte, fileInfo.Size()) file.Read(buffer) input := &s3.PutObjectInput{ Bucket: aws.String(s.Bucket), Key: aws.String(objectKey), Body: bytes.NewReader(buffer), ContentLength: aws.Int64(fileInfo.Size()), ContentType: aws.String(http.DetectContentType(buffer)), } if publicRead { // 可以公开访问 input.ACL = aws.String(s3.ObjectCannedACLPublicRead) } _, rerr := s3.New(sess).PutObject(input) return rerr } func (s *S3Storage) UploadByContent(objectKey string, content []byte, publicRead bool) error { // 获取session sess, err := s.GetSession() if err != nil { return err } input := &s3.PutObjectInput{ Bucket: aws.String(s.Bucket), Key: aws.String(objectKey), Body: bytes.NewReader(content), ContentLength: aws.Int64(int64(len(content))), ContentType: aws.String(http.DetectContentType(content)), } if publicRead { // 可以公开访问 input.ACL = aws.String(s3.ObjectCannedACLPublicRead) } _, rerr := s3.New(sess).PutObject(input) return rerr } func (s *S3Storage) UploadByMultipartFileHeader(objectKey string, fileHeader *multipart.FileHeader, publicRead bool) error { // 获取session sess, err := s.GetSession() if err != nil { return err } // 获取文件内容 fileContent, err := fileHeader.Open() if err != nil { return err } // 读取文件内容 bytesContent, err := ioutil.ReadAll(fileContent) if err != nil { return err } input := &s3.PutObjectInput{ Bucket: aws.String(s.Bucket), Key: aws.String(objectKey), Body: bytes.NewReader(bytesContent), ContentLength: aws.Int64(fileHeader.Size), ContentType: aws.String(http.DetectContentType(bytesContent)), } if publicRead { // 可以公开访问 input.ACL = aws.String(s3.ObjectCannedACLPublicRead) } _, rerr := s3.New(sess).PutObject(input) return rerr } func (s *S3Storage) GetObject(objectKey string) ([]byte, error) { // 获取session sess, err := s.GetSession() if err != nil { return nil, err } input := &s3.GetObjectInput{ Bucket: aws.String(s.Bucket), Key: aws.String(objectKey), } result, err := s3.New(sess).GetObject(input) if err != nil { return nil, err } return ioutil.ReadAll(result.Body) } func (s *S3Storage) CopyObject(srcObjectKey string, destObjectKey string, publicRead bool) error { // 获取session sess, err := s.GetSession() if err != nil { return err } input := &s3.CopyObjectInput{ Bucket: aws.String(s.Bucket), CopySource: aws.String("/" + s.Bucket + "/" + srcObjectKey), Key: aws.String(destObjectKey), } if publicRead { // 可以公开访问 input.ACL = aws.String(s3.ObjectCannedACLPublicRead) } _, rerr := s3.New(sess).CopyObject(input) return rerr } func (s *S3Storage) DeleteObject(objectKey string) error { // 获取session sess, err := s.GetSession() if err != nil { return err } _, rerr := s3.New(sess).DeleteObject(&s3.DeleteObjectInput{ Bucket: aws.String(s.Bucket), Key: aws.String(objectKey), }) return rerr } func (s *S3Storage) DeleteObjects(objectKeys []string) error { // 获取session sess, err := s.GetSession() if err != nil { return err } if len(objectKeys) == 0 { return errors.New("objectKeys不能为空") } var objList []*s3.ObjectIdentifier for _, v := range objectKeys { objList = append(objList, &s3.ObjectIdentifier{Key: aws.String(v)}) } _, err = s3.New(sess).DeleteObjects(&s3.DeleteObjectsInput{ Bucket: aws.String(s.Bucket), Delete: &s3.Delete{ Objects: objList, }, }) return err }
package networking import ( "github.com/AminRezaei0x443/quickjs-go" ) func AddNetModule(context *quickjs.Context) *quickjs.Module { m := context.DefineModule("networking", func(ctx *quickjs.Context, module *quickjs.Module) int { net := InitHttpClass(ctx) module.AddProperty("HttpClient", net) return 1 }) m.ExportName("HttpClient") return m }
package middleware import ( "encoding/json" "fmt" "net/http" "strings" "github.com/labstack/echo" ) const ( AppJsonHeader = "application/vnd.api+json" ) type AppBinder struct { echo.Binder } func (AppBinder) Bind(i interface{}, c echo.Context) (err error) { req := c.Request() ctype := req.Header().Get(echo.HeaderContentType) if req.Body() == nil { return echo.NewHTTPError(http.StatusBadRequest, "request body can't be empty") } switch { case strings.Contains(ctype, AppJsonHeader): ct := strings.Split(ctype, ";") if len(ct) > 0 { ctype = ct[0] } if strings.EqualFold(ctype, AppJsonHeader) { if err = json.NewDecoder(req.Body()).Decode(i); err != nil { if ute, ok := err.(*json.UnmarshalTypeError); ok { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("type error: expected=%v, got=%v, offset=%v", ute.Type, ute.Value, ute.Offset)) } else if se, ok := err.(*json.SyntaxError); ok { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Syntax error: offset=%v, error=%v", se.Offset, se.Error())) } else { return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } } else { c.Error(echo.ErrUnsupportedMediaType) return } default: c.Error(echo.ErrUnsupportedMediaType) return } return }
package main import ( "image" "image/draw" ) type Mouse struct { Loc image.Point Buttons int } // Context represents the context for a GUI client. type Context struct { // W receives an value when the window changes. W <-chan *Window // K receives an value when a key a pressed. K <-chan rune // M receives an value when the mouse moves. M <-chan Mouse // Dragging receives a value when another window // is dragging something into this context. Dragging <-chan DragEvent } type DragEventKind int const ( _ DragEventKind = iota DragEnter DragMove DragDrop DragLeave ) // A DragEvent notifies a GUI client of an object being dragged // onto it. type DragEvent struct { Kind DragEventKind // Loc gives the current position of the dragged object. Loc image.Point // Data holds the data associated with the dragged object. Data interface{} // On a DragDrop event, Reply will be non-nil // and must be used to indicate whether the dropped // object has been accepted. Reply chan<- bool } // Window represents a GUI window. type Window struct { // TODO } func (w *Window) Image() draw.Image { // TODO return nil } func (w *Window) Flush() { // TODO } func (w *Window) Drag(data interface{}, icon image.Image, from image.Point) bool { // TODO read from mouse channel and distribute dragging events // to appropriate clients. return false } // ---------------------------------------------- // Some sample client code. // The client has a window with multiple slots, each // of which can contain an item. The user can drag // items between slots in different windows. type SomeClient struct { slots []*Slot } type Content struct { s string } // image returns an image for the being-dragged item. func (c *Content) image() image.Image { // TODO return nil } type Slot struct { // r holds the rectangle in the window covered by the slot. r image.Rectangle // content holds the current contents of the slot. content *Content // willing signifies that this slot is showing readiness to accept a // dragged object. willing bool } func NewContext() *Context { return &Context{ // TODO } } func (c *SomeClient) Run() { ctxt := NewContext() win := <-ctxt.W for { select { case win = <-ctxt.W: // window replaced. case m := <-ctxt.M: if slot := c.SlotAtPoint(m.Loc); slot != nil && slot.content != nil { if win.Drag(slot.content, slot.content.image(), m.Loc) { c.UpdateSlot(slot, nil) } } case e := <-ctxt.Dragging: // Some other window is dragging an object into this one. c.dragging(e, ctxt.Dragging) } } } func (c *SomeClient) dragging(e DragEvent, dragc <-chan DragEvent) { var hover *Slot // slot the drag is currently hovering over. for { content, _ := e.Data.(*Content) slot := c.SlotAtPoint(e.Loc) switch e.Kind { case DragLeave: return case DragMove: // ignore unknown drags if content == nil || slot == hover { break } if hover != nil { c.ShowWilling(hover, false) } if slot != nil { c.ShowWilling(slot, true) } hover = slot case DragDrop: if hover != nil { c.ShowWilling(hover, false) } if content == nil || slot == nil { e.Reply <- false return } // Accept the drop and complete the drag-and-drop action. c.UpdateSlot(slot, content) e.Reply <- true return } e = <-dragc } } func (c *SomeClient) SlotAtPoint(p image.Point) *Slot { // TODO find slot under given point. return c.slots[0] } func (c *SomeClient) ShowWilling(slot *Slot, willing bool) { // TODO update displayed image to reflect new status. slot.willing = willing } func (c *SomeClient) UpdateSlot(slot *Slot, content *Content) { // TODO update displayed image to reflect new content } func main() { }
package main import ( "./routers" "net/http" ) func main(){ router := routers.InitRouters() http.ListenAndServe(":8080",router) } //TODO common configuration file should be implemented //TODO Init folder should be implemented and necessary code should move into that
package sqlx import ( "database/sql" "fmt" "github.com/ellsol/gox/typex" _ "github.com/lib/pq" "log" "strings" ) const ( CreateDatabaseStatement = "CREATE DATABASE %v;" DropDatabaseStatement = "DROP DATABASE IF EXISTS %v;" CreateSchemaStatement = "CREATE SCHEMA %v;" DropSchemaStatement = "DROP SCHEMA IF EXISTS %v CASCADE;" CreateTableStatement = "CREATE TABLE %v %v;" DropTableStatement = "DROP TABLE IF EXISTS %v;" DeleteStatement = "DELETE FROM %v WHERE %v = $1;" InsertStatementWithReturn = "INSERT INTO %v(%v) VALUES(%v) returning %v;" InsertStatement = "INSERT INTO %v(%v) VALUES(%v);" NumberOfRowsStatement = "SELECT count(*) FROM %v;" MaxStatement = "SELECT max(%v) FROM %v;" ) type SQLDB struct { Connection *sql.DB } type SqlDBInfo struct { Host string User string Password string DBName string } func (pi *SqlDBInfo) dbinfo() string { return fmt.Sprintf("host=%s user=%s password=%s dbname=%s sslmode=disable", pi.Host, pi.User, pi.Password, pi.DBName) } type SQLTable interface { ColumnNames() []string Name() string CreateStatement() string } func OpenSqlDB(params string) (*SQLDB, error) { fmt.Println("Trying to open connection to postgres with: ", params) connection, err := sql.Open("postgres", params) if err != nil { return nil, err } connection.SetMaxIdleConns(20) return &SQLDB{ Connection: connection, }, nil } func (db *SQLDB) InitializeDatabase(databaseName string, schema string, tables map[string]SQLTable, forceRecreate bool) error { logMsg(fmt.Sprintf("initializing db %v with scheme %v and forceRecreate: %v", databaseName, schema, forceRecreate)) if forceRecreate { err := db.DropSchemaIfExist(schema) if err != nil { return err } } err := db.MaybeCreateScheme(schema) if err != nil { return err } err = db.MaybeInitializeTables(tables) if err != nil { return err } return nil } func (it *SQLDB) MaybeCreateDatabase(database string) error { statement := fmt.Sprintf(CreateDatabaseStatement, database) stmt, err := it.Connection.Prepare(statement) if err != nil { return nil } defer stmt.Close() _, err = stmt.Exec() if err != nil { if strings.Contains(err.Error(), "already exists") { return nil } return err } return nil } func (it *SQLDB) DropDatabaseIfExist(database string) (error) { statement := fmt.Sprintf(DropDatabaseStatement, database) stmt, err := it.Connection.Prepare(statement) defer stmt.Close() _, err = stmt.Exec() if err != nil { return err } return nil } func (it *SQLDB) MaybeCreateScheme(scheme string) error { logMsg(fmt.Sprintf("Maybe create schema %v", scheme)) statement := fmt.Sprintf(CreateSchemaStatement, scheme) logMsg(fmt.Sprintf("Maybe create schema statement: %v", statement)) stmt, err := it.Connection.Prepare(statement) if err != nil { return nil } defer stmt.Close() _, err = stmt.Exec() if err != nil { logMsg(err.Error()) if strings.Contains(err.Error(), "already exists") { return nil } return err } return nil } func (it *SQLDB) DropSchemaIfExist(schema string) (error) { logMsg(fmt.Sprintf("Dropping schema %v", schema)) statement := fmt.Sprintf(DropSchemaStatement, schema) logMsg(fmt.Sprintf("Dropping schema statement: %v", statement)) stmt, err := it.Connection.Prepare(statement) if err != nil { return err } defer stmt.Close() _, err = stmt.Exec() return err } func (it *SQLDB) MaybeCreateTable(table SQLTable) (error) { logMsg(table.CreateStatement()) stmt, err := it.Connection.Prepare(table.CreateStatement()) if err != nil { return err } defer stmt.Close() _, err = stmt.Exec() if err != nil { if strings.Contains(err.Error(), "already exists") { return nil } return err } return nil } func (it *SQLDB) DropTableIfExist(table SQLTable) (error) { statement := fmt.Sprintf(DropTableStatement, table.Name()) stmt, err := it.Connection.Prepare(statement) if err != nil { return err } defer stmt.Close() _, err = stmt.Exec() if err != nil { return err } return nil } func (db *SQLDB) MaybeInitializeTables(tables map[string]SQLTable) error { for _, v := range tables { err := db.MaybeCreateTable(v) if err != nil { return err } } return nil } ///////////////////////////////////////////////////////////////// // // Statements nobody needs, abstracted but sometimes helpful // ///////////////////////////////////////////////////////////////// func (pg *SQLDB) Insert(table SQLTable, values []interface{}) (int, error) { statement := GetPostgresInsertStatementNoIncrement(table) o, err := pg.Connection.Query(statement, values...) if err != nil { return -1, err } var lastInsertId int o.Scan(&lastInsertId) o.Close() return lastInsertId, nil } func (pg *SQLDB) InsertOmitPrimary(table SQLTable, values []interface{}) (int, error) { statement := GetPostgresInsertStatementNoIncrementOmitPrimary(table) o, err := pg.Connection.Query(statement, values...) if err != nil { return -1, err } var lastInsertId int o.Scan(&lastInsertId) o.Close() return lastInsertId, nil } func (pg *SQLDB) Update(table SQLTable, keyLabel string, values []interface{}) error { statement := CreateUpdateStatement(table, keyLabel) return pg.UpdateWithStatement(statement, table, values) } func (pg *SQLDB) UpdateWithStatement(statement string, table SQLTable, values []interface{}) error { updated, err := pg.Connection.Exec(statement, values...) if err != nil { return err } count, err := updated.RowsAffected() if err != nil { return err } if count != 1 { return fmt.Errorf("failed to update %v", table.Name()) } return nil } // Delete Row func (pg *SQLDB) Delete(key interface{}, keyLabel string, table SQLTable) error { sqlStatement := fmt.Sprintf(DeleteStatement, table.Name(), keyLabel) _, err := pg.Connection.Exec(sqlStatement, key) if err != nil { return err } return nil } // Number Of Rows func (pg *SQLDB) Count(table SQLTable) (int, error) { sqlStatement := fmt.Sprintf(NumberOfRowsStatement, table.Name()) rows, err := pg.Connection.Query(sqlStatement) if err != nil { return -1, err } defer rows.Close() if rows.Next() { var count int err := rows.Scan(&count) if err != nil { return -1, err } return count, nil } return -1, nil } func (it *SQLDB) CountByStatement(table SQLTable, statement string, params ... interface{}) (int, error) { var count int row := it.Connection.QueryRow(statement, params...) err := row.Scan(&count) if err != nil { return -1, err } return count, nil } // Number Of Rows func (pg *SQLDB) Max(table SQLTable, column string) (int64, error) { sqlStatement := fmt.Sprintf(MaxStatement, column, table.Name()) rows, err := pg.Connection.Query(sqlStatement) if err != nil { return -1, err } defer rows.Close() if rows.Next() { var max int64 err := rows.Scan(&max) if err != nil { return 0, nil } return max, nil } return -1, nil } // Statements /* Transforms a table a model key list [tag1, tag2,...] into and a returningStatement INSERT INTO table(tag1, tag2,...) VALUES(1,2,...) returning returningStatement */ func GetPostgresInsertStatementNoIncrement(t SQLTable) string { paramsJoin := typex.CommaSeparatedString(t.ColumnNames()) paramsPlaceholder := typex.CommaSeparatedString(typex.MapStringListWithPos(t.ColumnNames(), func(key int, value string) string { return fmt.Sprintf("$%v", key+1) })) return fmt.Sprintf(InsertStatement, t.Name(), paramsJoin, paramsPlaceholder) } func GetPostgresInsertStatementNoIncrementOmitPrimary(t SQLTable) string { paramsJoin := typex.CommaSeparatedString(t.ColumnNames()[1:]) paramsPlaceholder := typex.CommaSeparatedString(typex.MapStringListWithPos(t.ColumnNames()[1:], func(key int, value string) string { return fmt.Sprintf("$%v", key+1) })) return fmt.Sprintf(InsertStatement, t.Name(), paramsJoin, paramsPlaceholder) } /* Maps SQLTable to update statement */ func CreateUpdateStatement(table SQLTable, keyLabel string) string { set := typex.MapStringListWithPos(table.ColumnNames()[1:], func(pos int, tag string) string { return fmt.Sprintf("%v = $%v", tag, pos+2) }) return fmt.Sprintf("UPDATE %v SET %v WHERE %v = $1;", table.Name(), typex.CommaSeparatedString(set), keyLabel) } var LogDatabase bool = true func logMsg(msg string) { if(LogDatabase) { log.Println(msg) } }
package gedcom import ( "reflect" "sync" ) type Nodes []Node // nodeCache is used by NodesWithTag. Even though the lookup of child tags are // fairly inexpensive it happens a lot and its common for the same paths to be // looked up many time. Especially when doing larger task like comparing GEDCOM // files. var nodeCache = &sync.Map{} // map[Node]map[Tag]Nodes{} func NewNodes(ns interface{}) (nodes Nodes) { v := reflect.ValueOf(ns) for i := 0; i < v.Len(); i++ { nodes = append(nodes, v.Index(i).Interface().(Node)) } return } // NodesWithTag returns the zero or more nodes that have a specific GEDCOM tag. // If the provided node is nil then an empty slice will always be returned. // // If the node is nil the result will also be nil. func NodesWithTag(node Node, tag Tag) (result Nodes) { if v1, ok1 := nodeCache.Load(node); ok1 { if v2, ok2 := v1.(*sync.Map).Load(tag); ok2 { return v2.(Nodes) } } defer func() { if v1, ok := nodeCache.Load(node); ok { v1.(*sync.Map).Store(tag, result) } else { nodeCache.Store(node, &sync.Map{}) } }() if IsNil(node) { return nil } nodes := Nodes{} n := node.Nodes() for _, node := range n { if node.Tag().Is(tag) { nodes = append(nodes, node) } } return nodes } // NodesWithTagPath return all of the nodes that have an exact tag path. The // number of nodes returned can be zero and tag must match the tag path // completely and exactly. // // birthPlaces := NodesWithTagPath(individual, TagBirth, TagPlace) // // If the node is nil the result will also be nil. func NodesWithTagPath(node Node, tagPath ...Tag) Nodes { if IsNil(node) { return nil } if len(tagPath) == 0 { return Nodes{} } return nodesWithTagPath(node, tagPath...) } func nodesWithTagPath(node Node, tagPath ...Tag) Nodes { if len(tagPath) == 0 { return Nodes{node} } matches := Nodes{} for _, next := range NodesWithTag(node, tagPath[0]) { matches = append(matches, nodesWithTagPath(next, tagPath[1:]...)...) } return matches } // HasNestedNode checks if node contains lookingFor at any depth. If node and // lookingFor are the same false is returned. If either node or lookingFor is // nil then false is always returned. // // Nodes are matched by reference, not value so nodes that represent exactly the // same value will not be considered equal. func HasNestedNode(node Node, lookingFor Node) bool { if node == nil || lookingFor == nil { return false } for _, node := range node.Nodes() { if node == lookingFor || HasNestedNode(node, lookingFor) { return true } } return false } // CastTo creates a slice of a more specific node type. // // All Nodes must be the same type and the same as the provided t. func (nodes Nodes) CastTo(t interface{}) interface{} { size := len(nodes) nodeType := reflect.TypeOf(t) sliceType := reflect.SliceOf(nodeType) slice := reflect.MakeSlice(sliceType, size, size) for i, node := range nodes { value := reflect.ValueOf(node) slice.Index(i).Set(value) } return slice.Interface() } func castNodesWithTag(node Node, tag Tag, t interface{}) interface{} { return NodesWithTag(node, tag).CastTo(t) } func DeleteNodesWithTag(node Node, tag Tag) { for _, n := range node.Nodes() { if n.Tag().Is(tag) { node.DeleteNode(n) } } } // FlattenAll works as Flatten with multiple inputs that are returned as a // single slice. // // If any of the nodes are nil they will be ignored. // // The document parameter is required for copying some nodes like individuals // that need a document context. These new nodes will be attached to the // provided document. func (nodes Nodes) FlattenAll(document *Document, result Nodes) { for _, node := range nodes { if IsNil(node) { continue } result = append(result, Flatten(document, node)...) } return } func (nodes Nodes) deleteNode(n Node) (Nodes, bool) { for i, node2 := range nodes { if node2 == n { return append(nodes[:i], nodes[i+1:]...), true } } return nodes, false }
package goil import ( "fmt" "strconv" "time" ) type Category uint8 const ( Divers Category = 8 News = 6 Photos = 1 Videos = 2 Journal = 3 Gazettes = 10 Podcasts = 4 Evenements = 5 Sondages = 9 Annales = 7 ) func (c Category) format() string { return strconv.FormatUint(uint64(c), 10) } func (c Category) Check() error { var err error if c < 1 || c > 10 { err = fmt.Errorf("Given category (%d) not in acceptable range [1;10]", c) } return err } type Group uint8 // Groups ID const ( NoGroup Group = 0 HustleISEP = 46 // Please contact me if you have the other codes, or just do a pull request ! ) // format() stringifies a group func (g Group) format() string { return strconv.FormatUint(uint64(g), 10) } // timeLayout is the necessary layout to publish dates on the endpoint const timeLayout string = "02/01/2006 à 15:04" // An event // Currently only filled in / used when publishing type Event struct { // The name of the event, mandatory Name string // Start and end times Start time.Time End time.Time // Start and end times will be formatted as such: "DD/MM/YYYY à HH:MM" } // populated() returns true if an event is populated func (e Event) populated() bool { return e.Name != "" } // Check() checks an event's validity func (e Event) Check() error { switch { case e.Name == "": return fmt.Errorf("No event name indicated") case e.Start.Before(time.Now()): return fmt.Errorf("Event start time (%s) is before(!) current time (%s)", e.Start, time.Now()) case e.End.Before(time.Now()): return fmt.Errorf("Event end time (%s) is before(!) current time (%s)", e.End, time.Now()) case e.End.Before(e.Start): return fmt.Errorf("Evend end time (%s) is before(!) start time (%s)", e.End, e.Start) } return nil } // A survey // Currently only filled in / used when publishing type Survey struct { // Mandatory Question string End time.Time // Format in "DD/MM/YYYY à HH:MM" Answers []string // Whether the survey accepts multiple answers Multiple bool } // populated() returns true if an event is populated func (s Survey) populated() bool { return s.Question != "" } // Check validity func (s Survey) Check() error { switch { case s.Question == "": return fmt.Errorf("No question included in survey") case s.End.Before(time.Now()): return fmt.Errorf("End time (%s) for survey is before(!) current time (%s)", s.End, time.Now()) case len(s.Answers) < 2: return fmt.Errorf("A survey needs at least two answers to be a survey, duh !") } return nil } // A publication's ID type PublicationID uint // A publication on Iseplive // Not all fields are filled in when getting publications type Publication struct { // Publication's ID // Not populated when posting, currently unused ID PublicationID // Message is the text body of a publication // Mandatory for publishing Message string // The category of the publication meant to be published // Only filled in on publishing // Mandatory for publishing Category Category // The Group responsible for the publication // Only filled in on publishing Group Group // Whether that messages is official in reference to the group // Only used when publishing // If Group == 0 then it has no effect Official bool // Whether the publication should be private Private bool // Whether the dislike button should be activated Dislike bool // Event Event Event // Survey Survey Survey // Attachments paths for upload Attachments Attachments } func (p *Publication) Check() error { switch { case len(p.Message) == 0: return fmt.Errorf("Publication message is empty. It shouldn't") case p.Category.Check() != nil: return fmt.Errorf("Publication's category (%d) is invalid: %s", p.Category, p.Category.Check()) case p.Event.populated() && p.Event.Check() != nil: return fmt.Errorf("Publication's event is invalid: %s", p.Event.Check()) case p.Survey.populated() && p.Survey.Check() != nil: return fmt.Errorf("Publication's survey is invalid: %s", p.Survey.Check()) /*case err := p.Attachments.Check(); err != nil { return fmt.Errorf("Publication's attachments are invalid: %s",err.Error())*/ } return nil }
package stock type Data struct { Symbol string CurrentPrice float64 } type Client interface { FetchCurrentPrice(tickers ...string) ([]Data, error) }
package app import ( "errors" "fmt" "github.com/codegangsta/cli" "github.com/phillihq/racoon/config" "github.com/phillihq/racoon/util" "os" ) var configFileFlag = util.AddFlagString(cli.StringFlag{ Name: "config", EnvVar: "CONFIG", Value: "config.json", Usage: "the path of your config file", }) //应用执行方法 func appAction(c *cli.Context) (err error) { confileFilePath := c.String(configFileFlag.Name) //读取配置信息 conf, err := config.LoadConfigFromFile(confileFilePath) if err != nil { return errors.New(fmt.Sprintf("load config file failed, %v", err)) } if err = conf.RunInputs(); err != nil { return } if err = conf.RunFilters(); err != nil { return } if err = conf.RunOutputs(); err != nil { return } //退出信号处理 signalCH := util.InitSignal() util.HandleSignal(signalCH) return } func Main() { app := cli.NewApp() app.Name = "racoon" app.Usage = "log collector, base on Go" app.Version = "0.0.1" app.Flags = util.GetAppFlags() app.Action = util.ActionWrapper(appAction) app.Run(os.Args) }
package main import ( "fmt" "main/config" "main/router" ) func main() { e := router.New() e.Logger.Fatal(e.Start(fmt.Sprintf(":%d", config.Port))) }
package todo import ( "context" "github.com/silverspase/todo/internal/modules/todo/model" ) type UseCase interface { CreateItem(ctx context.Context, items model.Item) (string, error) GetAllItems(ctx context.Context, page int) ([]model.Item, error) GetItem(ctx context.Context, id string) (model.Item, error) UpdateItem(ctx context.Context, item model.Item) (string, error) DeleteItem(ctx context.Context, id string) (string, error) }
package culturegen import ( "bytes" "html/template" "math/rand" "github.com/ironarachne/random" ) // MusicStyle is a cultural music style type MusicStyle struct { Structure int Vocals int Beat int Tonality int Descriptors []string Instruments []Instrument } // Instrument is a musical instrument type Instrument struct { Name string Description string Type string BaseMaterialOptions []string SupportMaterialOptions []string BaseMaterial string SupportMaterial string DescriptionTemplate string } func (culture Culture) generateMusicDescriptors() []string { descriptors := []string{} possibleDescriptors := []string{ "airy", "bombastic", "booming", "breathy", "bright", "cheerful", "driving", "dynamic", "energetic", "ethereal", "euphonic", "fast", "full-toned", "haunting", "lilting", "lofty", "mellifluous", "mellow", "melodic", "moody", "operatic", "orotund", "percussive", "powerful", "primitive", "regimented", "resonant", "rigid", "savage", "somber", "structured", "tumid", "uplifting", "vibrant", "warm", } numberOfDescriptors := rand.Intn(4) + 1 for i := 0; i < numberOfDescriptors; i++ { descriptors = append(descriptors, random.Item(possibleDescriptors)) } return descriptors } func (culture Culture) generateMusicalInstruments() []Instrument { var instrument Instrument var materialType string var availableBaseMaterials []string var availableSupportMaterials []string availableHides := []string{} availableMetals := []string{} availableWoods := []string{} availableMaterials := []string{} allInstruments := getAllInstruments() availableInstruments := []Instrument{} instruments := []Instrument{} for _, i := range culture.HomeClimate.CommonMetals { availableMetals = append(availableMetals, i.Name) } for _, i := range culture.HomeClimate.PreciousMetals { availableMetals = append(availableMetals, i.Name) } for _, i := range culture.HomeClimate.Plants { if i.IsTree { availableWoods = append(availableWoods, i.Name) } } for _, i := range culture.HomeClimate.Animals { if i.GivesHide { availableHides = append(availableHides, i.Name) } } if len(availableHides) > 0 { availableMaterials = append(availableMaterials, "hide") } if len(availableMetals) > 0 { availableMaterials = append(availableMaterials, "metal") } if len(availableWoods) > 0 { availableMaterials = append(availableMaterials, "wood") } for _, i := range allInstruments { if slicePartlyWithin(i.BaseMaterialOptions, availableMaterials) { if slicePartlyWithin(i.SupportMaterialOptions, availableMaterials) { availableInstruments = append(availableInstruments, i) } } } numberOfInstruments := rand.Intn(3) + 1 for i := 0; i < numberOfInstruments; i++ { instrument = availableInstruments[rand.Intn(len(availableInstruments)-1)] availableBaseMaterials = []string{} availableSupportMaterials = []string{} for _, m := range instrument.BaseMaterialOptions { if inSlice(m, availableMaterials) { availableBaseMaterials = append(availableBaseMaterials, m) } } for _, m := range instrument.SupportMaterialOptions { if inSlice(m, availableMaterials) { availableSupportMaterials = append(availableSupportMaterials, m) } } materialType = random.Item(availableBaseMaterials) if materialType == "hide" { instrument.BaseMaterial = random.Item(availableHides) } else if materialType == "metal" { instrument.BaseMaterial = random.Item(availableMetals) } else if materialType == "wood" { instrument.BaseMaterial = random.Item(availableWoods) } materialType = random.Item(availableSupportMaterials) if materialType == "hide" { instrument.SupportMaterial = random.Item(availableHides) } else if materialType == "metal" { instrument.SupportMaterial = random.Item(availableMetals) } else if materialType == "wood" { instrument.SupportMaterial = random.Item(availableWoods) } instrument.Description = instrument.getDescription() instruments = append(instruments, instrument) } return instruments } func (instrument Instrument) getDescription() string { t := template.New("instrument description") var err error t, err = t.Parse(instrument.DescriptionTemplate) if err != nil { panic(err) } var tpl bytes.Buffer if err := t.Execute(&tpl, instrument); err != nil { panic(err) } result := tpl.String() return result } func getAllInstruments() []Instrument { instruments := []Instrument{ Instrument{ Name: "short flute", Type: "flute", BaseMaterialOptions: []string{"metal", "wood"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} trimmed with {{.SupportMaterial}}", }, Instrument{ Name: "long flute", Type: "flute", BaseMaterialOptions: []string{"metal", "wood"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} trimmed with {{.SupportMaterial}}", }, Instrument{ Name: "twin flute", Type: "flute", BaseMaterialOptions: []string{"metal", "wood"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} trimmed with {{.SupportMaterial}}", }, Instrument{ Name: "short harp", Type: "harp", BaseMaterialOptions: []string{"metal", "wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "long harp", Type: "harp", BaseMaterialOptions: []string{"metal", "wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "full harp", Type: "harp", BaseMaterialOptions: []string{"metal", "wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "lyre", Type: "lyre", BaseMaterialOptions: []string{"wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "lijerica", Type: "lyre", BaseMaterialOptions: []string{"wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "long-necked lute", Type: "lute", BaseMaterialOptions: []string{"wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "pierced lute", Type: "lute", BaseMaterialOptions: []string{"wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "short-necked lute", Type: "lute", BaseMaterialOptions: []string{"wood"}, SupportMaterialOptions: []string{"hide"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} strung with {{.SupportMaterial}} sinew", }, Instrument{ Name: "single-drone bagpipes", Type: "bagpipes", BaseMaterialOptions: []string{"hide"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}}-hide {{.Name}} with {{.SupportMaterial}} drone", }, Instrument{ Name: "multiple-drone bagpipes", Type: "bagpipes", BaseMaterialOptions: []string{"hide"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}}-hide {{.Name}} with {{.SupportMaterial}} drones", }, Instrument{ Name: "hand drum", Type: "drum", BaseMaterialOptions: []string{"hide"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} skinned with {{.SupportMaterial}} hide", }, Instrument{ Name: "short drum", Type: "drum", BaseMaterialOptions: []string{"hide"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} skinned with {{.SupportMaterial}} hide", }, Instrument{ Name: "walking drum", Type: "drum", BaseMaterialOptions: []string{"hide"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} skinned with {{.SupportMaterial}} hide", }, Instrument{ Name: "heavy drum", Type: "drum", BaseMaterialOptions: []string{"hide"}, SupportMaterialOptions: []string{"metal", "wood"}, DescriptionTemplate: "{{.BaseMaterial}} {{.Name}} skinned with {{.SupportMaterial}} hide", }, } return instruments } func (culture Culture) randomMusicStyle() MusicStyle { style := MusicStyle{} style.Beat = rand.Intn(3) style.Structure = rand.Intn(3) style.Tonality = rand.Intn(3) style.Vocals = rand.Intn(3) style.Descriptors = culture.generateMusicDescriptors() style.Instruments = culture.generateMusicalInstruments() return style }
package model type VideoListItem struct { Id string `json:"id"` Name string `json:"name"` Duration int `json:"duration"` Thumbnail string `json:"thumbnail"` Uploaded string `json:"uploaded"` Views string `json:"views"` Status int `json:"status"` Quality string `json:"quality"` }
package mutual import ( "log" "math/rand" "time" ) func init() { log.SetFlags(log.LstdFlags | log.Lmicroseconds) debugPrintf("程序开始运行") } // debugPrintf 根据设置打印输出 func debugPrintf(format string, a ...interface{}) { if needDebug { log.Printf(format, a...) } } func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b } func randSleep() { timeout := time.Duration(1+rand.Intn(3)) * time.Millisecond time.Sleep(timeout) } func sleep1SecondPer100Occupieds(count int) { if count%100 == 0 { time.Sleep(time.Second) } }
package auth import ( "encoding/json" "errors" "io" ) type authResponse struct { Token string `json:"token"` } func decodeAuthResponse(serverResponse io.Reader) (response authResponse, err error) { decoder := json.NewDecoder(serverResponse) err = decoder.Decode(&response) if err == nil && response.Token == "" { err = errors.New("malformed auth server response") } return }
package cryptutil import ( "encoding/pem" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // generated using: // // openssl genpkey -algorithm x25519 -out priv.pem // openssl pkey -in priv.pem -out pub.pem -pubout var ( rawPrivateX25519Key = []byte(`-----BEGIN PRIVATE KEY----- MC4CAQAwBQYDK2VuBCIEIKALoNgzCksH0v0Bc7Ghl8vGin4MAIKpmtZSmaMN0Vtb -----END PRIVATE KEY----- `) rawPublicX25519Key = []byte(`-----BEGIN PUBLIC KEY----- MCowBQYDK2VuAyEAk63g8PY1JJTkrranWTxGSd/yA5kAgJlPk4/srMKg9mg= -----END PUBLIC KEY----- `) ) func TestPKCS8PrivateKey(t *testing.T) { block, _ := pem.Decode(rawPrivateX25519Key) kek, err := ParsePKCS8PrivateKey(block.Bytes) assert.NoError(t, err) assert.IsType(t, &PrivateKeyEncryptionKey{}, kek) t.Run("marshal", func(t *testing.T) { der, err := MarshalPKCS8PrivateKey(kek) require.NoError(t, err) actual := pem.EncodeToMemory(&pem.Block{ Type: "PRIVATE KEY", Bytes: der, }) assert.Equal(t, rawPrivateX25519Key, actual) }) } func TestPKIXPublicKey(t *testing.T) { block, _ := pem.Decode(rawPublicX25519Key) kek, err := ParsePKIXPublicKey(block.Bytes) assert.NoError(t, err) assert.IsType(t, &PublicKeyEncryptionKey{}, kek) t.Run("marshal", func(t *testing.T) { der, err := MarshalPKIXPublicKey(kek) require.NoError(t, err) actual := pem.EncodeToMemory(&pem.Block{ Type: "PUBLIC KEY", Bytes: der, }) assert.Equal(t, rawPublicX25519Key, actual) }) }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. package azurestack import ( "context" "github.com/Azure/aks-engine/pkg/armhelpers" "github.com/Azure/azure-sdk-for-go/services/authorization/mgmt/2015-07-01/authorization" "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac" "github.com/Azure/go-autorest/autorest" "github.com/pkg/errors" ) const ( // AADContributorRoleID is the role id that exists in every subscription for 'Contributor' AADContributorRoleID = "b24988ac-6180-42a0-ab88-20f7382dd24c" // AADRoleReferenceTemplate is a template for a roleDefinitionId AADRoleReferenceTemplate = "/subscriptions/%s/providers/Microsoft.Authorization/roleDefinitions/%s" // AADRoleResourceGroupScopeTemplate is a template for a roleDefinition scope AADRoleResourceGroupScopeTemplate = "/subscriptions/%s/resourceGroups/%s" ) // CreateGraphApplication creates an application via the graphrbac client func (az *AzureClient) CreateGraphApplication(ctx context.Context, applicationCreateParameters graphrbac.ApplicationCreateParameters) (graphrbac.Application, error) { errorMessage := "error azure stack does not support creating application" return graphrbac.Application{}, errors.New(errorMessage) } // DeleteGraphApplication deletes an application via the graphrbac client func (az *AzureClient) DeleteGraphApplication(ctx context.Context, applicationObjectID string) (result autorest.Response, err error) { errorMessage := "error azure stack does not support deleting application" return autorest.Response{}, errors.New(errorMessage) } // CreateGraphPrincipal creates a service principal via the graphrbac client func (az *AzureClient) CreateGraphPrincipal(ctx context.Context, servicePrincipalCreateParameters graphrbac.ServicePrincipalCreateParameters) (graphrbac.ServicePrincipal, error) { errorMessage := "error azure stack does not support creating service principal" return graphrbac.ServicePrincipal{}, errors.New(errorMessage) } // CreateRoleAssignment creates a role assignment via the authorization client func (az *AzureClient) CreateRoleAssignment(ctx context.Context, scope string, roleAssignmentName string, parameters authorization.RoleAssignmentCreateParameters) (authorization.RoleAssignment, error) { errorMessage := "error azure stack does not support creating role assignement" return authorization.RoleAssignment{}, errors.New(errorMessage) } // DeleteRoleAssignmentByID deletes a roleAssignment via its unique identifier func (az *AzureClient) DeleteRoleAssignmentByID(ctx context.Context, roleAssignmentID string) (authorization.RoleAssignment, error) { errorMessage := "error azure stack does not support deleting role assignement" return authorization.RoleAssignment{}, errors.New(errorMessage) } // ListRoleAssignmentsForPrincipal (e.g. a VM) via the scope and the unique identifier of the principal func (az *AzureClient) ListRoleAssignmentsForPrincipal(ctx context.Context, scope string, principalID string) (armhelpers.RoleAssignmentListResultPage, error) { errorMessage := "error azure stack does not support listing role assignement" return nil, errors.New(errorMessage) } // CreateApp is a simpler method for creating an application func (az *AzureClient) CreateApp(ctx context.Context, appName, appURL string, replyURLs *[]string, requiredResourceAccess *[]graphrbac.RequiredResourceAccess) (applicationResp graphrbac.Application, servicePrincipalObjectID, servicePrincipalClientSecret string, err error) { errorMessage := "error azure stack does not support creating application" return graphrbac.Application{}, "", "", errors.New(errorMessage) } // DeleteApp is a simpler method for deleting an application and the associated spn func (az *AzureClient) DeleteApp(ctx context.Context, applicationName, applicationObjectID string) (autorest.Response, error) { errorMessage := "error azure stack does not support deleting application" return autorest.Response{}, errors.New(errorMessage) } // CreateRoleAssignmentSimple is a wrapper around RoleAssignmentsClient.Create func (az *AzureClient) CreateRoleAssignmentSimple(ctx context.Context, resourceGroup, servicePrincipalObjectID string) error { errorMessage := "error azure stack does not support creating role assignment" return errors.New(errorMessage) }
package database import ( "log" "time" "database/sql" "portal/config" _ "github.com/go-sql-driver/mysql" "github.com/gomodule/redigo/redis" ) // 定义数据库访问实例 var db *sql.DB var RedisPool *redis.Pool // Connect mysql // dbConfig: "user:password@tcp(127.0.0.1:3306)/dbname" func initMysql() { _db, err := sql.Open("mysql", config.MysqlConfig.URL) if err != nil { log.Fatal(err) } db = _db } // Close mysql connect func CloseDB() { err := db.Close() if err != nil { log.Fatal(err) } } // return conn object func ConnDB() *sql.DB { return db } // Connect redis func initRedis() { RedisPool = &redis.Pool{ MaxIdle: config.RedisConfig.MaxIdle, MaxActive: config.RedisConfig.MaxActive, IdleTimeout: 240 * time.Second, Wait: true, Dial: func() (redis.Conn, error) { c, err := redis.Dial("tcp", config.RedisConfig.URL, redis.DialPassword(config.RedisConfig.Password)) if err != nil { return nil, err } return c, nil }, } } // 初始化连接 func init() { initMysql() initRedis() }
package cve // CVE type CVE struct { Affects *Affects `json:"affects,omitempty"` CVEDataMeta *CVEDataMeta `json:"CVE_data_meta"` DataFormat string `json:"data_format"` DataType string `json:"data_type"` DataVersion string `json:"data_version"` Description *Description `json:"description"` Problemtype *Problemtype `json:"problemtype"` References *References `json:"references"` } // Affects type Affects struct { Vendor *Vendor `json:"vendor"` } // CVEDataMeta type CVEDataMeta struct { ASSIGNER string `json:"ASSIGNER"` ID string `json:"ID"` STATE string `json:"STATE,omitempty"` } // Description type Description struct { DescriptionData []*LangString `json:"description_data"` } // LangString type LangString struct { Lang string `json:"lang"` Value string `json:"value"` } // Problemtype type Problemtype struct { ProblemtypeData []*ProblemtypeDataItems `json:"problemtype_data"` } // ProblemtypeDataItems type ProblemtypeDataItems struct { Description []*LangString `json:"description"` } // Product type Product struct { ProductData []*Product `json:"product_data"` } // Reference type Reference struct { Name string `json:"name,omitempty"` Refsource string `json:"refsource,omitempty"` Tags []string `json:"tags,omitempty"` Url string `json:"url"` } // References type References struct { ReferenceData []*Reference `json:"reference_data"` } // Vendor type Vendor struct { VendorData []*VendorDataItems `json:"vendor_data"` } // VendorDataItems type VendorDataItems struct { Product *Product `json:"product"` VendorName string `json:"vendor_name"` } // Version type Version struct { VersionData []*VersionDataItems `json:"version_data"` } // VersionDataItems type VersionDataItems struct { VersionAffected string `json:"version_affected,omitempty"` VersionValue string `json:"version_value"` }
package main import ( "log" "strconv" "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" _ "github.com/jinzhu/gorm/dialects/sqlite" "github.com/lcsphantom/savenote-server/api" "github.com/lcsphantom/savenote-server/api/login" "github.com/lcsphantom/savenote-server/api/note" "github.com/lcsphantom/savenote-server/api/refresh" "github.com/lcsphantom/savenote-server/api/signup" "github.com/lcsphantom/savenote-server/config" "github.com/lcsphantom/savenote-server/db" noterouter "github.com/lcsphantom/savenote-server/public/router" ) func main() { var conf config.ServerConfig conf = api.GetServerConfig() db.InitDb(conf) router := gin.Default() router.Use(cors.Default()) router.LoadHTMLGlob(conf.PublicFiles.HTMLTemplates) router.POST("/user/register", signup.Register) router.POST("/user/login", login.Login) router.POST("/user/refresh", refresh.Refresh) router.POST("/note/save", note.Save) router.GET("/note/getnotes", note.GetNotes) router.GET("/user/getallusers", login.GetAllUsers) router.GET("/index", noterouter.Home) router.GET("/user/index", noterouter.UserHome) log.Fatal(router.Run(":" + strconv.Itoa(conf.Database.Port))) }
package main import ( "fmt" "html/template" "net/http" ) func signup(w http.ResponseWriter, r *http.Request) { fmt.Println("method:", r.Method) //get request method if r.Method == "GET" { fmt.Println("Inside signup") t, _ := template.ParseFiles("signup.html") t.Execute(w, nil) } else { r.ParseForm() // logic part of log in fmt.Println("username:", r.Form["username"]) fmt.Println("password:", r.Form["password"]) message := insert(r.Form["username"][0], r.Form["password"][0]) responsevalue := "" if message != "" { responsevalue = "Username: " + r.Form["username"][0] + " Password: " + r.Form["password"][0] + " registration failed" } else { responsevalue = "Username: " + r.Form["username"][0] + " Password: " + r.Form["password"][0] + " registered successfully" printaccounts() } fmt.Fprintf(w, responsevalue) // write data to response } } func index(w http.ResponseWriter, r *http.Request) { fmt.Println("method:", r.Method) //get request method if r.Method == "GET" { fmt.Println("Inside login") t, _ := template.ParseFiles("index.html") t.Execute(w, nil) } else { r.ParseForm() // logic part of log in flag := checkuser(r.Form["username"][0], r.Form["password"][0]) responsevalue := "" if flag == "yes" { responsevalue = "Username: " + r.Form["username"][0] + " Password: " + r.Form["password"][0] + " logged in successfully" fmt.Println("LOGIN CORRECT") } else { responsevalue = "Username: " + r.Form["username"][0] + " Password: " + r.Form["password"][0] + "INVALID CREDENTIALS OR UNREGISTERED USER " fmt.Println("INVALID CREDENTIALS OR UNREGISTERED USER") } fmt.Println("username:", r.Form["username"]) fmt.Println("password:", r.Form["password"]) fmt.Fprintf(w, responsevalue) // write data to response } }
package interfaces import "fmt" type Testers interface { Do() } type myInt int func (i *myInt) Do() { fmt.Println(i) } func testSkill() { var myint myInt = myInt(9) var myint2 myInt = 8 fmt.Println(myint) myint.Do() myint2.Do() } // again test //type Animals interface { // Shark() //} type Sayer interface { Say(message string) SayHi() } type Animal struct { Name string } func (a *Animal) Say(message string) { fmt.Printf("Animal[%v] say: %v\n", a.Name, message) } func (a *Animal) SayHi() { a.Say("Hi") } //func (a *Dog) SayHi() { // // fmt.Println(a.Name) //} // override Animal.Say func (d *Dog) Say(message string) { fmt.Printf("Dog[%v] say: %v\n", d.Name, message) } type Dog struct { Animal } func runCat() { var sayer Sayer sayer = &Dog{Animal{Name: "Yoda"}} sayer.Say("hello world") //var animal *Animal = &Dog{Animal{Name: "Yoda"}} // 不支持父类指针指向子类,下面这种写法是不允许的 } func runCat2() { var sayer Sayer sayer = &Dog{Animal{Name: "Yoda"}} sayer.Say("hello world") // Dog[Yoda] say: hello world sayer.SayHi() // Animal[Yoda] say: Hi } // test inherit type ParentInterfacer interface { myParent() string } type ParentSt struct { name string } func (p *ParentSt) myParent() string { return p.name } type ChildSt struct { *ParentSt } func runCat3() { //var s ParentSt = &ChildSt{ParentSt{"parent"}} // 不支持父类指针指向子类,下面这种写法是不允许的 var s ParentInterfacer = &ChildSt{&ParentSt{"parent"}} fmt.Println(s) }
package pgtune import ( "fmt" "math/rand" "testing" "github.com/timescale/timescaledb-tune/internal/parse" ) const ( walDiskUnset = 0 walDiskDivideUnevenly = 8 * parse.Gigabyte walDiskDivideEvenly = 8800 * parse.Megabyte ) // memoryToWALBuffers provides a mapping from test case memory levels to the // expected WAL buffers setting. This is used to generate the test cases for // WALRecommender, stored in walSettingsMatrix. var memoryToWALBuffers = map[uint64]uint64{ 1 * parse.Gigabyte: 7864 * parse.Kilobyte, uint64(1.5 * float64(parse.Gigabyte)): 11796 * parse.Kilobyte, 2 * parse.Gigabyte: walBuffersDefault, 10 * parse.Gigabyte: walBuffersDefault, } var walDiskToMaxBytes = map[uint64]uint64{ walDiskUnset: defaultMaxWALBytes, walDiskDivideUnevenly: 4928 * parse.Megabyte, // nearest 16MB segment walDiskDivideEvenly: 5280 * parse.Megabyte, } var promscaleWALDiskToMaxBytes = map[uint64]uint64{ walDiskUnset: promscaleDefaultMaxWALBytes, walDiskDivideUnevenly: 4928 * parse.Megabyte, // nearest 16MB segment walDiskDivideEvenly: 5280 * parse.Megabyte, } // walSettingsMatrix stores the test cases for WALRecommender along with the // expected values for WAL keys. var walSettingsMatrix = map[uint64]map[uint64]map[string]string{} // walSettingsMatrix stores the test cases for WALRecommender along with the // expected values for WAL keys. var promscaleWalSettingsMatrix = map[uint64]map[uint64]map[string]string{} func init() { for memory, walBuffers := range memoryToWALBuffers { walSettingsMatrix[memory] = make(map[uint64]map[string]string) for walSize := range walDiskToMaxBytes { walSettingsMatrix[memory][walSize] = make(map[string]string) walSettingsMatrix[memory][walSize][MinWALKey] = parse.BytesToPGFormat(walDiskToMaxBytes[walSize] / 2) walSettingsMatrix[memory][walSize][MaxWALKey] = parse.BytesToPGFormat(walDiskToMaxBytes[walSize]) walSettingsMatrix[memory][walSize][WALBuffersKey] = parse.BytesToPGFormat(walBuffers) walSettingsMatrix[memory][walSize][CheckpointTimeoutKey] = NoRecommendation walSettingsMatrix[memory][walSize][WALCompressionKey] = NoRecommendation } } for memory, walBuffers := range memoryToWALBuffers { promscaleWalSettingsMatrix[memory] = make(map[uint64]map[string]string) for walSize := range walDiskToMaxBytes { promscaleWalSettingsMatrix[memory][walSize] = make(map[string]string) promscaleWalSettingsMatrix[memory][walSize][MinWALKey] = parse.BytesToPGFormat(promscaleWALDiskToMaxBytes[walSize] / 2) promscaleWalSettingsMatrix[memory][walSize][MaxWALKey] = parse.BytesToPGFormat(promscaleWALDiskToMaxBytes[walSize]) promscaleWalSettingsMatrix[memory][walSize][WALBuffersKey] = parse.BytesToPGFormat(walBuffers) promscaleWalSettingsMatrix[memory][walSize][CheckpointTimeoutKey] = promscaleDefaultCheckpointTimeout promscaleWalSettingsMatrix[memory][walSize][WALCompressionKey] = promscaleDefaultWALCompression } } } func TestWALSettingsGroup_GetRecommender(t *testing.T) { cases := []struct { profile Profile recommender string }{ {DefaultProfile, "*pgtune.WALRecommender"}, {PromscaleProfile, "*pgtune.PromscaleWALRecommender"}, } sg := WALSettingsGroup{totalMemory: 1, walDiskSize: 1} for _, k := range cases { r := sg.GetRecommender(k.profile) y := fmt.Sprintf("%T", r) if y != k.recommender { t.Errorf("Expected to get a %s using the %s profile but got %s", k.recommender, k.profile, y) } } } func TestNewWALRecommender(t *testing.T) { for i := 0; i < 1000000; i++ { mem := rand.Uint64() r := NewWALRecommender(mem, walDiskUnset) if r == nil { t.Errorf("unexpected nil recommender") } if got := r.totalMemory; got != mem { t.Errorf("recommender has incorrect memory: got %d want %d", got, mem) } if !r.IsAvailable() { t.Errorf("unexpectedly not available") } } } func TestWALRecommenderRecommend(t *testing.T) { for totalMemory, outerMatrix := range walSettingsMatrix { for walSize, matrix := range outerMatrix { r := NewWALRecommender(totalMemory, walSize) testRecommender(t, r, WALKeys, matrix) } } } func TestPromscaleWALRecommenderRecommend(t *testing.T) { for totalMemory, outerMatrix := range promscaleWalSettingsMatrix { for walSize, matrix := range outerMatrix { r := NewPromscaleWALRecommender(totalMemory, walSize) testRecommender(t, r, WALKeys, matrix) } } } func TestPromscaleWALRecommenderCheckpointTimeout(t *testing.T) { // recommendation for checkpoint timeout should not be impacted by totalMemory or walDiskSize for i := uint64(0); i < 1000000; i++ { r := NewPromscaleWALRecommender(i, i) if v := r.Recommend(CheckpointTimeoutKey); v != promscaleDefaultCheckpointTimeout { t.Errorf("Expected %s for %s, but got %s", promscaleDefaultCheckpointTimeout, CheckpointTimeoutKey, v) } } } func TestWALRecommenderNoRecommendation(t *testing.T) { r := NewWALRecommender(0, 0) if r.Recommend("foo") != NoRecommendation { t.Errorf("Recommendation was provided for %s when there should have been none", "foo") } if r.Recommend(CheckpointTimeoutKey) != NoRecommendation { t.Errorf("Recommendation was provided for %s when there should have been none", CheckpointTimeoutKey) } } func TestWALSettingsGroup(t *testing.T) { for totalMemory, outerMatrix := range walSettingsMatrix { for walSize, matrix := range outerMatrix { config := getDefaultTestSystemConfig(t) config.Memory = totalMemory config.WALDiskSize = walSize sg := GetSettingsGroup(WALLabel, config) testSettingGroup(t, sg, DefaultProfile, matrix, WALLabel, WALKeys) } } for totalMemory, outerMatrix := range promscaleWalSettingsMatrix { for walSize, matrix := range outerMatrix { config := getDefaultTestSystemConfig(t) config.Memory = totalMemory config.WALDiskSize = walSize sg := GetSettingsGroup(WALLabel, config) testSettingGroup(t, sg, PromscaleProfile, matrix, WALLabel, WALKeys) } } } func TestWALFloatParserParseFloat(t *testing.T) { v := &WALFloatParser{} s := "8" + parse.GB want := float64(8 * parse.Gigabyte) got, err := v.ParseFloat(MaxWALKey, s) if err != nil { t.Errorf("unexpected error: %v", err) } if got != want { t.Errorf("incorrect result: got %f want %f", got, want) } s = "1000" want = 1000.0 got, err = v.ParseFloat(WALBuffersKey, s) if err != nil { t.Errorf("unexpected error: %v", err) } if got != want { t.Errorf("incorrect result: got %f want %f", got, want) } s = "33" + parse.Minutes.String() conversion, _ := parse.TimeConversion(parse.Minutes, parse.Milliseconds) want = 33.0 * conversion got, err = v.ParseFloat(CheckpointTimeoutKey, s) if err != nil { t.Errorf("unexpected error: %v", err) } if got != want { t.Errorf("incorrect result: got %f want %f", got, want) } }
package main import ( "fmt" "net/http" "os" "os/signal" "syscall" "github.com/gin-gonic/gin" "github.com/swaggo/gin-swagger" "github.com/swaggo/gin-swagger/swaggerFiles" "github.com/therudite/api/common" "github.com/therudite/api/config" _ "github.com/therudite/api/docs" "github.com/therudite/api/errors" "github.com/therudite/api/resources" "github.com/therudite/api/routes" ) type serverConfig struct { Port string } func interrupt(errc chan error) { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) errc <- fmt.Errorf("%s", <-c) } // @APIVersion 1.0.0 // @APITitle Swagger API // @APIDescription Swagger API // @Contact niteshagarwal1.618@gmail.com // @TermsOfServiceUrl http://agarn.in // @License E_CORP // @LicenseUrl http://agarn.in func main() { var err error // instantiating config manager var configManager config.ConfigManager configManager, err = config.NewConfigManager() if err != nil { panic(err) panic("Error instantiating config manager") } // instantiating logger var loggerConfig = new(common.LoggerConfig) configManager.Load("logger", loggerConfig) common.InitializeLogger(loggerConfig) common.LogString("Logger loaded, starting server ...") // instantiating resource manager var resourcemanager resources.ResourceManagerInterface resourcemanager, err = resources.NewResourceManager(configManager) if err != nil { panic(err) panic(errors.ResourceInitializationError.Error()) } defer resourcemanager.Close() // Server Config serverconfig := new(serverConfig) configManager.Load("server", serverconfig) // instantiating router engine := gin.Default() errc := make(chan error) // Listen interrupts go interrupt(errc) // instantiating server go func() { fmt.Print("Creating route") engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) routes.CreateFeedbackRoutes(engine, resourcemanager) server := http.Server{ Addr: ":" + serverconfig.Port, Handler: engine, } server.SetKeepAlivesEnabled(false) errc <- server.ListenAndServe() }() common.LogJSON(map[string]string{"message": "exit"}, <-errc) }
package workloads import ( "fmt" "golang-distributed-parallel-image-processing/api/helpers" "golang-distributed-parallel-image-processing/scheduler" "io" "net/http" "os" "strconv" "strings" "github.com/dgrijalva/jwt-go" "github.com/labstack/echo" "go.mongodb.org/mongo-driver/bson/primitive" ) var NoOfTests int = 0 var NoOfJobs int = 0 func WorkloadsResponse(c echo.Context) error { fmt.Println("[ACCESS] New connection to:\t/workloads/test") user := c.Get("user").(*jwt.Token) token := user.Raw valid := helpers.IsTokenActive(token) if !valid { return helpers.ReturnJSON(c, http.StatusConflict, "Token is invalid or revoked") } cc := c.(*helpers.CustomContext) if len(cc.DB) == 0 { return helpers.ReturnJSON(c, http.StatusConflict, "There are no registered workers") } /*TEST*/ NoOfTests++ for e := 0; e < 20; e++ { cc.JOBS <- scheduler.Job{RPCName: "test"} } return helpers.ReturnJSONMap(c, http.StatusOK, map[string]interface{}{ "Workload": "test", "Job ID": NoOfTests, "Status": "Completed 20 tasks", "Result": "Done!", }) } func WorkloadsFilterResponse(c echo.Context) error { user := c.Get("user").(*jwt.Token) token := user.Raw valid := helpers.IsTokenActive(token) if !valid { return helpers.ReturnJSON(c, http.StatusConflict, "Token is invalid or revoked") } cc := c.(*helpers.CustomContext) if len(cc.DB) == 0 { return helpers.ReturnJSON(c, http.StatusConflict, "There are no registered workers") } lastJobID := NoOfJobs NoOfJobs++ /* Params */ workloadID := c.FormValue("workload-id") filter := c.FormValue("filter") if filter != "binary" && filter != "inverse" && filter != "filtertest" && filter != "zero" { return helpers.ReturnJSON(c, http.StatusConflict, "There is no filter with that name on the server!") } /* Folder Creation */ _ = os.MkdirAll("public/download/"+workloadID+"/", 0755) _ = os.MkdirAll("public/results/"+workloadID+"/", 0755) /* File receiving */ image, err := c.FormFile("data") if err != nil { return helpers.ReturnJSON(c, http.StatusConflict, "[ERR] There was no file sent into the input. ("+err.Error()+")") } src, err := image.Open() if err != nil { return helpers.ReturnJSON(c, http.StatusConflict, "[ERR] Error opening image. ("+err.Error()+")") } defer src.Close() objID := primitive.NewObjectID() fileURLOnServer := "public/download/" + workloadID + "/" + objID.Hex() + "_" + image.Filename fileNameOnServer := objID.Hex() + "_" + image.Filename dst, err := os.Create(fileURLOnServer) if err != nil { return helpers.ReturnJSON(c, http.StatusConflict, "[ERR] Error creating Image on Server. ("+err.Error()+")") } defer dst.Close() if _, err = io.Copy(dst, src); err != nil { return helpers.ReturnJSON(c, http.StatusConflict, "[ERR] Error copying information on new image. ("+err.Error()+")") } fileID := int64(0) /* Get File ID */ if _, ok := cc.WorkloadsFileID[workloadID]; ok { cc.WorkloadsFileID[workloadID] = cc.WorkloadsFileID[workloadID] + int64(1) fileID = cc.WorkloadsFileID[workloadID] } else { cc.WorkloadsFileID[workloadID] = int64(1) fileID = int64(1) } fileExt := strings.Split(image.Filename, ".")[1] preJobString := workloadID + "|" + filter + "|" + strconv.Itoa(int(fileID)) + "|" + fileNameOnServer + "|" + fileExt /*RPC Job*/ cc.JOBS <- scheduler.Job{RPCName: "filter", Data: preJobString} return helpers.ReturnJSONMap(c, http.StatusOK, map[string]interface{}{ "Workload ID": workloadID, "Job ID": lastJobID, "Status": "Scheduling", "Results": "http://localhost:8080/results/" + workloadID + "/", }) }
/* In this challenge you will need to determine whether it's Pi Day, Pi Minute, or Pi Second. Because Pi is irrational, it wants your code to be as short as possible. Examples No input is provided, your program should use the system time. I've just added it for clarity March 14, 2016 0:00:00 Pi Day December 25, 2015 3:14:45 Pi Minute December 29, 2015 0:03:14 Pi Second January 1, 2016 0:00:00 <No Output> What is Pi Day / Minute / Second Pi Day is when the month is March, and the date is the 14th Pi Minute is when the hour is 3, and the minute is 14 Pi Second is when the minute is 3, and the second is 14 Pi Day should be preferred instead of Pi Minute or Pi Second, and Pi Minute should be preferred instead of Pi Second. For this challenge you should use 12-hour time (15:14 == 3:14). The date/time used to determine the Pi Day/Minute/Second should be based on system time. Scoring & Bonus -15 byte Bonus: If you print "No Pi Time" when it's not Pi time. As always, standard loopholes are disallowed. This is code-golf shortest code in bytes wins! */ package main import ( "fmt" "time" ) func main() { fmt.Println(pitime()) } func pitime() string { t := time.Now() switch { case t.Month() == time.March && t.Day() == 14: return "Pi Day" case t.Hour() == 3 && t.Minute() == 14: return "Pi Minute" case t.Minute() == 3 && t.Second() == 14: return "Pi Second" } return "No Pi Time" }
package main import ( "bufio" "flag" "fmt" "github.com/fatih/color" "io" "log" "os" "strings" ) const ( versionString = "kubectl-repl {{{VERSION}}}" ) var ( input *bufio.Reader namespace string context string verbose bool ) func prompt() (string, error) { color.New(color.Bold).Print("# ") if context != "" { color.New(color.FgBlack, color.Italic).Print(context) fmt.Print(" ") } if namespace != "" { color.New(color.Bold).Print(namespace) } else { color.New(color.Bold).Print("namespace") } fmt.Print(" ") line, err := input.ReadString('\n') if err != nil { return "", err } response := strings.TrimSpace(line) return substituteForVars(response) } func printIndexedLine(index, line string) { coloredIndex := color.New(color.FgBlue).Sprintf("$%s", index) fmt.Printf("%s \t%s\n", coloredIndex, line) } func repl(commands Commands) error { command, err := prompt() if err != nil { return err } if command == "" { return nil } for _, builtin := range commands { if builtin.filter(command) { return builtin.run(command) } } return sh(kubectl(command)) } func main() { var version bool flag.BoolVar(&verbose, "verbose", false, "Verbose") flag.BoolVar(&version, "version", false, "Print current version") flag.StringVar(&context, "context", "", "Override current context") flag.StringVar(&namespace, "namespace", "", "Override current context namespace") flag.Parse() if namespace == "" { namespace = os.Getenv("KUBECTL_NAMESPACE") } if version { fmt.Println(versionString) return } commands := Commands{ builtinExit{}, builtinNamespace{}, builtinShell{}, builtinGet{}, } err := commands.Init() if err != nil { log.Fatal(err) } variables = make(map[string][]string) input = bufio.NewReader(os.Stdin) if namespace == "" { err = pickNamespace() if err == io.EOF { return } else if err != nil { log.Fatal(err) } } for { err = repl(commands) if err == io.EOF { break } else if err != nil { color.New(color.FgRed).Println(err) } } }
package main import ( "net/http" "io" // "io/ioutil" // "fmt" "log" ) func main() { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { resp, err := http.Get("https://drive.google.com/uc?export=download&id=1u2NNJK-2pCe8_8PLc3k8eNJYtHsp5X0j") check(err) defer resp.Body.Close() for k, v := range resp.Header { for _, vv := range v { w.Header().Set(k, vv) } } io.Copy(w, resp.Body) }) http.ListenAndServe(":8002", nil) } func check (err error) { if err != nil { log.Fatal(err) } }
package entity type UserReq struct { Username string `json:"username"` Password string `json:"password"` } func (al *UserReq) NewUserLogin(username, password string) *UserReq { return &UserReq{ Username: username, Password: password, } }
package cfrida import "errors" func Frida_device_manager_new() uintptr { r, _, _ := frida_device_manager_new.Call() return r } func Frida_remote_device_options_new() uintptr { r, _, _ := frida_remote_device_options_new.Call() return r } func Frida_remote_device_options_set_certificate(obj uintptr,val uintptr) { frida_remote_device_options_set_certificate.Call(obj,val) } func Frida_remote_device_options_set_origin(obj uintptr,val string) { frida_remote_device_options_set_origin.Call(obj,GoStrToCStr(val)) } func Frida_remote_device_options_set_token(obj uintptr,val string) { frida_remote_device_options_set_token.Call(obj,GoStrToCStr(val)) } func Frida_remote_device_options_set_keepalive_interval(obj uintptr,n int) { frida_remote_device_options_set_keepalive_interval.Call(obj, uintptr(n)) } func Frida_device_manager_close_sync(obj uintptr, cancellable uintptr)error{ gerr:=MakeGError() frida_device_manager_close_sync.Call(obj,cancellable, gerr.Input()) return gerr.ToError() } func Frida_device_manager_enumerate_devices_sync(obj uintptr, _cancellable uintptr) (uintptr,error) { gerr:=MakeGError() r, _, _ := frida_device_manager_enumerate_devices_sync.Call(obj, _cancellable, gerr.Input()) return r,gerr.ToError() } func Frida_device_list_size(obj uintptr) int { r, _, _ := frida_device_list_size.Call(obj) return int(r) } func Frida_device_list_get(obj uintptr,index int) uintptr { r, _, _ := frida_device_list_get.Call(obj,uintptr(index)) return r } func Frida_device_manager_add_remote_device_sync(obj uintptr,address string,ops uintptr,cancellable uintptr) (uintptr,error){ if address==""{ return 0,errors.New("address is emtry") } gerr:=MakeGError() r,_,_:=frida_device_manager_add_remote_device_sync.Call(obj,GoStrToCStr(address),ops,cancellable,gerr.Input()) return r,gerr.ToError() } func Frida_device_manager_remove_remote_device_sync(obj uintptr,address string,cancellable uintptr) error{ if address==""{ return errors.New("address is emtry") } gerr:=MakeGError() frida_device_manager_add_remote_device_sync.Call(obj,GoStrToCStr(address),cancellable,gerr.Input()) return gerr.ToError() } func Frida_device_manager_get_device_by_id_sync(obj uintptr,id string,timeout int,cancellable uintptr) (uintptr,error){ if id==""{ return 0,errors.New("id is emtry") } gerr:=MakeGError() r,_,_:=frida_device_manager_get_device_by_id_sync.Call(obj,GoStrToCStr(id), uintptr(timeout),cancellable,gerr.Input()) return r,gerr.ToError() } func Frida_device_manager_get_device_by_type_sync(obj uintptr,tp int,timeout int,cancellable uintptr) (uintptr,error){ gerr:=MakeGError() r,_,_:=frida_device_manager_get_device_by_type_sync.Call(obj, uintptr(tp), uintptr(timeout),cancellable,gerr.Input()) return r,gerr.ToError() } func Frida_device_manager_find_device_by_id_sync(obj uintptr,id string,timeout int,cancellable uintptr) (uintptr,error){ if id==""{ return 0,errors.New("id is emtry") } gerr:=MakeGError() r,_,_:=frida_device_manager_find_device_by_id_sync.Call(obj,GoStrToCStr(id), uintptr(timeout),cancellable,gerr.Input()) return r,gerr.ToError() } func Frida_device_manager_find_device_by_type_sync(obj uintptr,tp int,timeout int,cancellable uintptr) (uintptr,error){ gerr:=MakeGError() r,_,_:=frida_device_manager_find_device_by_type_sync.Call(obj, uintptr(tp), uintptr(timeout),cancellable,gerr.Input()) return r,gerr.ToError() }
package main import ( _ "embed" ) //go:embed main.go var s string func main() { println(s) }
// Copyright 2018 Andrew Bates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package insteon import ( "errors" "time" ) var ( // ErrAlreadyLinked is returned when creating a link and an existing matching link is found ErrAlreadyLinked = errors.New("Responder already linked to controller") ) const ( // BaseLinkDBAddress is the base address of devices All-Link database BaseLinkDBAddress = MemAddress(0x0fff) ) // MemAddress is an integer representing a specific location in a device's memory type MemAddress int func (ma MemAddress) String() string { return sprintf("%02x.%02x", byte(ma>>8), byte(ma&0xff)) } // There are two link request types, one used to read the link database and // one used to write links const ( ReadLink LinkRequestType = 0x00 WriteLink LinkRequestType = 0x02 ) // LinkRequestType is used to indicate whether an ALDB request is for reading // or writing the database type LinkRequestType byte func (lrt LinkRequestType) String() string { switch lrt { case 0x00: return "Link Read" case 0x01: return "Link Resp" case 0x02: return "Link Write" } return "Unknown" } // LinkRequest is the message sent to a device to request reading or writing // all-link database records type LinkRequest struct { Type LinkRequestType MemAddress MemAddress NumRecords int Link *LinkRecord } func (lr *LinkRequest) String() string { if lr.Link == nil { return sprintf("%s %s %d", lr.Type, lr.MemAddress, lr.NumRecords) } return sprintf("%s %s %d %s", lr.Type, lr.MemAddress, lr.NumRecords, lr.Link) } // UnmarshalBinary will take the byte slice and convert it to a LinkRequest object func (lr *LinkRequest) UnmarshalBinary(buf []byte) (err error) { if len(buf) < 5 { return newBufError(ErrBufferTooShort, 6, len(buf)) } lr.Type = LinkRequestType(buf[1]) lr.MemAddress = MemAddress(buf[2]) << 8 lr.MemAddress |= MemAddress(buf[3]) switch lr.Type { case 0x00: lr.NumRecords = int(buf[4]) case 0x01: lr.Link = &LinkRecord{} case 0x02: lr.NumRecords = int(buf[4]) lr.Link = &LinkRecord{} } if lr.Link != nil { err = lr.Link.UnmarshalBinary(buf[5:]) lr.Link.memAddress = lr.MemAddress } return err } // MarshalBinary will convert the LinkRequest to a byte slice appropriate for // sending out to the insteon network func (lr *LinkRequest) MarshalBinary() (buf []byte, err error) { var linkData []byte buf = make([]byte, 14) buf[1] = byte(lr.Type) buf[2] = byte(lr.MemAddress >> 8) buf[3] = byte(lr.MemAddress & 0xff) switch lr.Type { case 0x00: buf[4] = byte(lr.NumRecords) case 0x01: buf[4] = 0x00 linkData, err = lr.Link.MarshalBinary() copy(buf[5:], linkData) case 0x02: buf[4] = 0x08 linkData, err = lr.Link.MarshalBinary() copy(buf[5:], linkData) } return buf, err } // FindDuplicateLinks will perform a linear search of the // LinkDB and return any links that are duplicates. Duplicate // links are those that are equivalent as reported by LinkRecord.Equal func FindDuplicateLinks(linkable LinkableDevice) ([]*LinkRecord, error) { duplicates := make([]*LinkRecord, 0) links, err := linkable.Links() if err == nil { for i, l1 := range links { for _, l2 := range links[i+1:] { if l1.Equal(l2) { duplicates = append(duplicates, l2) } } } } return duplicates, err } // FindLinkRecord will perform a linear search of the database and return // a LinkRecord that matches the group, address and controller/responder // indicator func FindLinkRecord(linkable LinkableDevice, controller bool, address Address, group Group) (*LinkRecord, error) { links, err := linkable.Links() if err == nil { for _, link := range links { if link.Flags.Controller() == controller && link.Address == address && link.Group == group { return link, nil } } } return nil, err } // CrossLinkAll will create bi-directional links among all the devices // listed. This is useful for creating virtual N-Way connections func CrossLinkAll(group Group, linkable ...LinkableDevice) error { for i, l1 := range linkable { for _, l2 := range linkable[i:] { if l1 != l2 { err := CrossLink(group, l1, l2) if err != nil { return err } } } } return nil } // CrossLink will create bi-directional links between the two linkable // devices. Each device will get both a controller and responder // link for the given group. When using lighting control devices, this // will effectively create a 3-Way light switch configuration func CrossLink(group Group, l1, l2 LinkableDevice) error { err := Link(group, l1, l2) if err == nil || err == ErrAlreadyLinked { err = Link(group, l2, l1) if err == ErrAlreadyLinked { err = nil } } return err } // ForceLink will create links in the controller and responder All-Link // databases without first checking if the links exist. The links are // created by simulating set button presses (using EnterLinkingMode) func ForceLink(group Group, controller, responder LinkableDevice) (err error) { Log.Debugf("Putting controller %s into linking mode", controller) // controller enters all-linking mode err = controller.EnterLinkingMode(group) defer controller.ExitLinkingMode() time.Sleep(2 * time.Second) // responder pushes the set button responder if err == nil { Log.Debugf("Assigning responder to group") err = responder.EnterLinkingMode(group) defer responder.ExitLinkingMode() } time.Sleep(time.Second) return } // UnlinkAll will unlink all groups between a controller and // a responder device func UnlinkAll(controller, responder LinkableDevice) (err error) { links, err := controller.Links() if err == nil { for _, link := range links { if link.Address == responder.Address() { err = Unlink(link.Group, responder, controller) } } } return err } // Unlink will unlink a controller from a responder for a given Group. The // controller is put into UnlinkingMode (analogous to unlinking mode via // the set button) and then the responder is put into unlinking mode (also // analogous to the set button pressed) func Unlink(group Group, controller, responder LinkableDevice) (err error) { // controller enters all-linking mode err = controller.EnterUnlinkingMode(group) defer controller.ExitLinkingMode() // wait a moment for messages to propagate time.Sleep(2 * time.Second) // responder pushes the set button responder if err == nil { Log.Debugf("Unlinking responder from group") err = responder.EnterLinkingMode(group) defer responder.ExitLinkingMode() } // wait a moment for messages to propagate time.Sleep(time.Second) return } // Link will add appropriate entries to the controller's and responder's All-Link // database. Each devices' ALDB will be searched for existing links, if both entries // exist (a controller link and a responder link) then nothing is done. If only one // entry exists than the other is deleted and new links are created. Once the link // check/cleanup has taken place the new links are created using ForceLink func Link(group Group, controller, responder LinkableDevice) (err error) { Log.Debugf("Looking for existing links") var controllerLink *LinkRecord controllerLink, err = FindLinkRecord(controller, true, responder.Address(), group) if err == nil { var responderLink *LinkRecord responderLink, err = FindLinkRecord(responder, false, controller.Address(), group) if err == nil { if controllerLink != nil && responderLink != nil { err = ErrAlreadyLinked } else { // correct a mismatch by deleting the one link found // and recreating both if controllerLink != nil { Log.Debugf("Controller link already exists, deleting it") err = controller.RemoveLinks(controllerLink) } if err == nil && responderLink != nil { Log.Debugf("Responder link already exists, deleting it") err = responder.RemoveLinks(controllerLink) } ForceLink(group, controller, responder) } } } return err }
package handler import ( "encoding/json" "fmt" "net/url" "strings" "github.com/parthiban-srinivasan/mserv/geocode/googlemap" proto "github.com/parthiban-srinivasan/mserv/geocode/proto" "github.com/micro/go-micro/errors" "golang.org/x/net/context" ) type Geomap struct{} func (g *Geomap) Geocode(ctx context.Context, req *proto.GeocodeRequest, rsp *proto.GeocodeResponse) error { u := url.Values{} if len(req.Address) > 0 { u.Set("address", req.Address) } if len(req.Language) > 0 { u.Set("language", req.Language) } if len(req.Region) > 0 { u.Set("region", req.Region) } if req.Components != nil { var components []string for component, value := range req.Components { components = append(components, component+":"+value) } u.Set("components", strings.Join(components, "|")) } if req.Bounds != nil { var bounds []string bounds = append(bounds, fmt.Sprintf("%.6f,%.6f", req.Bounds.Northeast.Lat, req.Bounds.Northeast.Lng)) bounds = append(bounds, fmt.Sprintf("%.6f,%.6f", req.Bounds.Southwest.Lat, req.Bounds.Southwest.Lng)) u.Set("bounds", strings.Join(bounds, "|")) } b, err := googlemap.Do("geocode", u) if err != nil { return errors.InternalServerError("go.micro.srv.geocode.Google.Geocode", err.Error()) } if err := json.Unmarshal(b, &rsp); err != nil { return errors.InternalServerError("go.micro.srv.slack", err.Error()) } return nil }
// This file was generated by github.com/EGT-Ukraine/go2gql. DO NOT EDIT IT package test import ( context "context" fmt "fmt" debug "runtime/debug" errors "github.com/pkg/errors" graphql "github.com/saturn4er/graphql" interceptors "github.com/EGT-Ukraine/go2gql/api/interceptors" scalars "github.com/EGT-Ukraine/go2gql/api/scalars" tracer "github.com/EGT-Ukraine/go2gql/api/tracer" testdata "github.com/EGT-Ukraine/go2gql/testdata" common_1 "github.com/EGT-Ukraine/go2gql/testdata/common" common "github.com/EGT-Ukraine/go2gql/testdata/out/github.com/EGT-Ukraine/go2gql/testdata/common" ) // Enums var ExmplRootEnum = graphql.NewEnum(graphql.EnumConfig{ Name: "ExmplRootEnum", Description: "", Values: graphql.EnumValueConfigMap{ "RootEnumVal0": &graphql.EnumValueConfig{ Value: 0, }, "RootEnumVal1": &graphql.EnumValueConfig{ Value: 1, }, "RootEnumVal2": &graphql.EnumValueConfig{ Value: 2, Description: "It's a RootEnumVal2", }, }, }) var ExmplNestedEnum = graphql.NewEnum(graphql.EnumConfig{ Name: "ExmplNestedEnum", Description: "", Values: graphql.EnumValueConfigMap{ "NestedEnumVal0": &graphql.EnumValueConfig{ Value: 0, }, "NestedEnumVal1": &graphql.EnumValueConfig{ Value: 1, }, }, }) var ExmplNestedNestedEnum = graphql.NewEnum(graphql.EnumConfig{ Name: "ExmplNestedNestedEnum", Description: "", Values: graphql.EnumValueConfigMap{ "NestedNestedEnumVal0": &graphql.EnumValueConfig{ Value: 0, }, "NestedNestedEnumVal1": &graphql.EnumValueConfig{ Value: 1, }, "NestedNestedEnumVal2": &graphql.EnumValueConfig{ Value: 2, }, "NestedNestedEnumVal3": &graphql.EnumValueConfig{ Value: 3, }, }, }) // Input object var ExmplRootMessageInput = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessageInput", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "r_msg": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageNestedMessage))}, "r_scalar": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(scalars.GraphQLInt32Scalar))}, "r_enum": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootEnum))}, "r_empty_msg": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(scalars.NoDataScalar))}, "n_r_enum": &graphql.InputObjectFieldConfig{Type: common.CommonEnum}, "n_r_scalar": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "n_r_msg": &graphql.InputObjectFieldConfig{Type: common.CommonMessage}, "scalar_from_context": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "n_r_empty_msg": &graphql.InputObjectFieldConfig{Type: scalars.NoDataScalar}, "map_enum": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__MapEnum))}, "map_scalar": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__MapScalar))}, "map_msg": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__MapMsg))}, "ctx_map": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__CtxMap))}, "ctx_map_enum": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__CtxMapEnum))}, "e_f_o_e": &graphql.InputObjectFieldConfig{Type: common.CommonEnum}, "e_f_o_s": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "e_f_o_m": &graphql.InputObjectFieldConfig{Type: common.CommonMessage}, "e_f_o_em": &graphql.InputObjectFieldConfig{Type: scalars.NoDataScalar}, "s_f_o_s": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "s_f_o_e": &graphql.InputObjectFieldConfig{Type: ExmplRootEnum}, "s_f_o_mes": &graphql.InputObjectFieldConfig{Type: ExmplRootMessage2}, "s_f_o_m": &graphql.InputObjectFieldConfig{Type: scalars.NoDataScalar}, "m_f_o_m": &graphql.InputObjectFieldConfig{Type: ExmplRootMessage2}, "m_f_o_s": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "m_f_o_e": &graphql.InputObjectFieldConfig{Type: ExmplRootEnum}, "m_f_o_em": &graphql.InputObjectFieldConfig{Type: scalars.NoDataScalar}, "em_f_o_em": &graphql.InputObjectFieldConfig{Type: scalars.NoDataScalar}, "em_f_o_s": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "em_f_o_en": &graphql.InputObjectFieldConfig{Type: ExmplRootEnum}, "em_f_o_m": &graphql.InputObjectFieldConfig{Type: ExmplRootMessage2}, } }), }) var ExmplRootMessageNestedMessageInput = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessage__NestedMessageInput", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "sub_r_enum": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplNestedEnum))}, "sub_sub_r_enum": &graphql.InputObjectFieldConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplNestedNestedEnum))}, } }), }) var ExmplMessageWithEmptyInput = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplMessageWithEmptyInput", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "empt": &graphql.InputObjectFieldConfig{Type: scalars.NoDataScalar}, } }), }) var ExmplRootMessage2Input = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessage2Input", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "some_field": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, } }), }) // Input objects resolvers func ResolveExmplRootMessageInput(tr tracer.Tracer, ctx context.Context, i interface{}) (_ *testdata.RootMessage, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessageInput") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } args := i.(map[string]interface{}) _ = args var result = new(testdata.RootMessage) if args["r_msg"] != nil { in := args["r_msg"].([]interface{}) result.RMsg = make([]*testdata.RootMessage_NestedMessage, len(in)) for i, val := range in { v, err := ResolveExmplRootMessage__NestedMessageInput(tr, tr.ContextWithSpan(ctx, span), val) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.RMsg[i] = v } } if args["r_scalar"] != nil { in := args["r_scalar"].([]interface{}) result.RScalar = make([]int32, len(in)) for i, val := range in { result.RScalar[i] = val.(int32) } } if args["r_enum"] != nil { in := args["r_enum"].([]interface{}) result.REnum = make([]testdata.RootEnum, len(in)) for i, val := range in { result.REnum[i] = testdata.RootEnum(val.(int)) } } if args["r_empty_msg"] != nil { in := args["r_empty_msg"].([]interface{}) result.REmptyMsg = make([]*testdata.Empty, len(in)) for i, val := range in { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), val) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.REmptyMsg[i] = v } } if args["n_r_enum"] != nil { result.NREnum = common_1.CommonEnum(args["n_r_enum"].(int)) } if args["n_r_scalar"] != nil { result.NRScalar = args["n_r_scalar"].(int32) } if args["n_r_msg"] != nil { v, err := common.ResolveCommonMessageInput(tr, tr.ContextWithSpan(ctx, span), args["n_r_msg"]) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.NRMsg = v } if args["scalar_from_context"] != nil { result.ScalarFromContext = ctx.Value("ctx_key").(int32) } if args["n_r_empty_msg"] != nil { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), args["n_r_empty_msg"]) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.NREmptyMsg = v } if args["map_enum"] != nil { v, err := ResolveExmplRootMessageInput__MapEnum(tr, tr.ContextWithSpan(ctx, span), args["map_enum"]) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.MapEnum = v } if args["map_scalar"] != nil { v, err := ResolveExmplRootMessageInput__MapScalar(tr, tr.ContextWithSpan(ctx, span), args["map_scalar"]) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.MapScalar = v } if args["map_msg"] != nil { v, err := ResolveExmplRootMessageInput__MapMsg(tr, tr.ContextWithSpan(ctx, span), args["map_msg"]) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.MapMsg = v } if args["ctx_map"] != nil { result.CtxMap = ctx.Value("ctx_map").(map[string]*testdata.RootMessage_NestedMessage) } if args["ctx_map_enum"] != nil { result.CtxMapEnum = ctx.Value("ctx_map_enum").(map[string]testdata.RootMessage_NestedEnum) } if e_f_o_e_, ok := args["e_f_o_e"]; ok && e_f_o_e_ != nil { v := common_1.CommonEnum(e_f_o_e_.(int)) result.EnumFirstOneoff = &testdata.RootMessage_EFOE{v} } else if e_f_o_s_, ok := args["e_f_o_s"]; ok && e_f_o_s_ != nil { v := e_f_o_s_.(int32) result.EnumFirstOneoff = &testdata.RootMessage_EFOS{v} } else if e_f_o_m_, ok := args["e_f_o_m"]; ok && e_f_o_m_ != nil { v, err := common.ResolveCommonMessageInput(tr, tr.ContextWithSpan(ctx, span), e_f_o_m_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field e_f_o_m") } result.EnumFirstOneoff = &testdata.RootMessage_EFOM{v} } else if e_f_o_em_, ok := args["e_f_o_em"]; ok && e_f_o_em_ != nil { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), e_f_o_em_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field e_f_o_em") } result.EnumFirstOneoff = &testdata.RootMessage_EFOEm{v} } if s_f_o_s_, ok := args["s_f_o_s"]; ok && s_f_o_s_ != nil { v := s_f_o_s_.(int32) result.ScalarFirstOneoff = &testdata.RootMessage_SFOS{v} } else if s_f_o_e_, ok := args["s_f_o_e"]; ok && s_f_o_e_ != nil { v := testdata.RootEnum(s_f_o_e_.(int)) result.ScalarFirstOneoff = &testdata.RootMessage_SFOE{v} } else if s_f_o_mes_, ok := args["s_f_o_mes"]; ok && s_f_o_mes_ != nil { v, err := ResolveExmplRootMessage2Input(tr, tr.ContextWithSpan(ctx, span), s_f_o_mes_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field s_f_o_mes") } result.ScalarFirstOneoff = &testdata.RootMessage_SFOMes{v} } else if s_f_o_m_, ok := args["s_f_o_m"]; ok && s_f_o_m_ != nil { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), s_f_o_m_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field s_f_o_m") } result.ScalarFirstOneoff = &testdata.RootMessage_SFOM{v} } if m_f_o_m_, ok := args["m_f_o_m"]; ok && m_f_o_m_ != nil { v, err := ResolveExmplRootMessage2Input(tr, tr.ContextWithSpan(ctx, span), m_f_o_m_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field m_f_o_m") } result.MessageFirstOneoff = &testdata.RootMessage_MFOM{v} } else if m_f_o_s_, ok := args["m_f_o_s"]; ok && m_f_o_s_ != nil { v := m_f_o_s_.(int32) result.MessageFirstOneoff = &testdata.RootMessage_MFOS{v} } else if m_f_o_e_, ok := args["m_f_o_e"]; ok && m_f_o_e_ != nil { v := testdata.RootEnum(m_f_o_e_.(int)) result.MessageFirstOneoff = &testdata.RootMessage_MFOE{v} } else if m_f_o_em_, ok := args["m_f_o_em"]; ok && m_f_o_em_ != nil { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), m_f_o_em_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field m_f_o_em") } result.MessageFirstOneoff = &testdata.RootMessage_MFOEm{v} } if em_f_o_em_, ok := args["em_f_o_em"]; ok && em_f_o_em_ != nil { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), em_f_o_em_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field em_f_o_em") } result.EmptyFirstOneoff = &testdata.RootMessage_EmFOEm{v} } else if em_f_o_s_, ok := args["em_f_o_s"]; ok && em_f_o_s_ != nil { v := em_f_o_s_.(int32) result.EmptyFirstOneoff = &testdata.RootMessage_EmFOS{v} } else if em_f_o_en_, ok := args["em_f_o_en"]; ok && em_f_o_en_ != nil { v := testdata.RootEnum(em_f_o_en_.(int)) result.EmptyFirstOneoff = &testdata.RootMessage_EmFOEn{v} } else if em_f_o_m_, ok := args["em_f_o_m"]; ok && em_f_o_m_ != nil { v, err := ResolveExmplRootMessage2Input(tr, tr.ContextWithSpan(ctx, span), em_f_o_m_) if err != nil { return nil, errors.Wrap(err, "failed to resolve oneOf object field em_f_o_m") } result.EmptyFirstOneoff = &testdata.RootMessage_EmFOM{v} } return result, nil } func ResolveExmplRootMessage__NestedMessageInput(tr tracer.Tracer, ctx context.Context, i interface{}) (_ *testdata.RootMessage_NestedMessage, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessage__NestedMessageInput") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } args := i.(map[string]interface{}) _ = args var result = new(testdata.RootMessage_NestedMessage) if args["sub_r_enum"] != nil { in := args["sub_r_enum"].([]interface{}) result.SubREnum = make([]testdata.RootMessage_NestedEnum, len(in)) for i, val := range in { result.SubREnum[i] = testdata.RootMessage_NestedEnum(val.(int)) } } if args["sub_sub_r_enum"] != nil { in := args["sub_sub_r_enum"].([]interface{}) result.SubSubREnum = make([]testdata.RootMessage_NestedMessage_NestedNestedEnum, len(in)) for i, val := range in { result.SubSubREnum[i] = testdata.RootMessage_NestedMessage_NestedNestedEnum(val.(int)) } } return result, nil } func ResolveExmplEmptyInput(tr tracer.Tracer, ctx context.Context, i interface{}) (_ *testdata.Empty, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplEmptyInput") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } args := i.(map[string]interface{}) _ = args var result = new(testdata.Empty) return result, nil } func ResolveExmplMessageWithEmptyInput(tr tracer.Tracer, ctx context.Context, i interface{}) (_ *testdata.MessageWithEmpty, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplMessageWithEmptyInput") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } args := i.(map[string]interface{}) _ = args var result = new(testdata.MessageWithEmpty) if args["empt"] != nil { v, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(ctx, span), args["empt"]) if err != nil { return nil, errors.Wrap(err, "failed to resolve input object field") } result.Empt = v } return result, nil } func ResolveExmplRootMessage2Input(tr tracer.Tracer, ctx context.Context, i interface{}) (_ *testdata.RootMessage2, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessage2Input") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } args := i.(map[string]interface{}) _ = args var result = new(testdata.RootMessage2) if args["some_field"] != nil { result.SomeField = args["some_field"].(int32) } return result, nil } // Output objects var ExmplRootMessage = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage.AddFieldConfig("r_msg", &graphql.Field{ Name: "r_msg", Type: ExmplRootMessageNestedMessage, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.RMsg, nil case testdata.RootMessage: return src.RMsg, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("r_scalar", &graphql.Field{ Name: "r_scalar", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.RScalar, nil case testdata.RootMessage: return src.RScalar, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("r_enum", &graphql.Field{ Name: "r_enum", Type: ExmplRootEnum, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.REnum, nil case testdata.RootMessage: return src.REnum, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("r_empty_msg", &graphql.Field{ Name: "r_empty_msg", Type: scalars.NoDataScalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.REmptyMsg, nil case testdata.RootMessage: return src.REmptyMsg, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("n_r_enum", &graphql.Field{ Name: "n_r_enum", Type: common.CommonEnum, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.NREnum, nil case testdata.RootMessage: return src.NREnum, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("n_r_scalar", &graphql.Field{ Name: "n_r_scalar", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.NRScalar, nil case testdata.RootMessage: return src.NRScalar, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("n_r_msg", &graphql.Field{ Name: "n_r_msg", Type: common.CommonMessage, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.NRMsg, nil case testdata.RootMessage: return src.NRMsg, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("scalar_from_context", &graphql.Field{ Name: "scalar_from_context", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.ScalarFromContext, nil case testdata.RootMessage: return src.ScalarFromContext, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessage.AddFieldConfig("n_r_empty_msg", &graphql.Field{ Name: "n_r_empty_msg", Type: scalars.NoDataScalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage: if src == nil { return nil, nil } return src.NREmptyMsg, nil case testdata.RootMessage: return src.NREmptyMsg, nil } return nil, errors.New("source of unknown type") }, }) } var ExmplRootMessageNestedMessage = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage__NestedMessage", Fields: graphql.Fields{}, }) func init() { ExmplRootMessageNestedMessage.AddFieldConfig("sub_r_enum", &graphql.Field{ Name: "sub_r_enum", Type: ExmplNestedEnum, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage_NestedMessage: if src == nil { return nil, nil } return src.SubREnum, nil case testdata.RootMessage_NestedMessage: return src.SubREnum, nil } return nil, errors.New("source of unknown type") }, }) ExmplRootMessageNestedMessage.AddFieldConfig("sub_sub_r_enum", &graphql.Field{ Name: "sub_sub_r_enum", Type: ExmplNestedNestedEnum, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage_NestedMessage: if src == nil { return nil, nil } return src.SubSubREnum, nil case testdata.RootMessage_NestedMessage: return src.SubSubREnum, nil } return nil, errors.New("source of unknown type") }, }) } var ExmplEmpty = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplEmpty", Fields: graphql.Fields{}, }) func init() { } var ExmplMessageWithEmpty = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplMessageWithEmpty", Fields: graphql.Fields{}, }) func init() { ExmplMessageWithEmpty.AddFieldConfig("empt", &graphql.Field{ Name: "empt", Type: scalars.NoDataScalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.MessageWithEmpty: if src == nil { return nil, nil } return src.Empt, nil case testdata.MessageWithEmpty: return src.Empt, nil } return nil, errors.New("source of unknown type") }, }) } var ExmplRootMessage2 = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage2", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage2.AddFieldConfig("some_field", &graphql.Field{ Name: "some_field", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { switch src := p.Source.(type) { case *testdata.RootMessage2: if src == nil { return nil, nil } return src.SomeField, nil case testdata.RootMessage2: return src.SomeField, nil } return nil, errors.New("source of unknown type") }, }) } // Maps input objects var ExmplRootMessageInput__MapEnum = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessageInput__MapEnum", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "key": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "value": &graphql.InputObjectFieldConfig{Type: ExmplNestedEnum}, } }), }) var ExmplRootMessageInput__MapScalar = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessageInput__MapScalar", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "key": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, "value": &graphql.InputObjectFieldConfig{Type: scalars.GraphQLInt32Scalar}, } }), }) var ExmplRootMessageInput__MapMsg = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessageInput__MapMsg", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "key": &graphql.InputObjectFieldConfig{Type: graphql.String}, "value": &graphql.InputObjectFieldConfig{Type: ExmplRootMessageNestedMessage}, } }), }) var ExmplRootMessageInput__CtxMap = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessageInput__CtxMap", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "key": &graphql.InputObjectFieldConfig{Type: graphql.String}, "value": &graphql.InputObjectFieldConfig{Type: ExmplRootMessageNestedMessage}, } }), }) var ExmplRootMessageInput__CtxMapEnum = graphql.NewInputObject(graphql.InputObjectConfig{ Name: "ExmplRootMessageInput__CtxMapEnum", Fields: graphql.InputObjectConfigFieldMapThunk(func() graphql.InputObjectConfigFieldMap { return graphql.InputObjectConfigFieldMap{ "key": &graphql.InputObjectFieldConfig{Type: graphql.String}, "value": &graphql.InputObjectFieldConfig{Type: ExmplNestedEnum}, } }), }) // Maps input objects resolvers func ResolveExmplRootMessageInput__MapEnum(tr tracer.Tracer, ctx context.Context, i interface{}) (_ map[int32]testdata.RootMessage_NestedEnum, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessageInput__MapEnum") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } in := i.([]interface{}) result := make(map[int32]testdata.RootMessage_NestedEnum) for i, ival := range in { _ = i val := ival.(map[string]interface{}) k, v := val["key"], val["value"] _, _ = k, v kk := k.(int32) vv := testdata.RootMessage_NestedEnum(v.(int)) result[kk] = vv } return result, nil } func ResolveExmplRootMessageInput__MapScalar(tr tracer.Tracer, ctx context.Context, i interface{}) (_ map[int32]int32, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessageInput__MapScalar") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } in := i.([]interface{}) result := make(map[int32]int32) for i, ival := range in { _ = i val := ival.(map[string]interface{}) k, v := val["key"], val["value"] _, _ = k, v kk := k.(int32) vv := v.(int32) result[kk] = vv } return result, nil } func ResolveExmplRootMessageInput__MapMsg(tr tracer.Tracer, ctx context.Context, i interface{}) (_ map[string]*testdata.RootMessage_NestedMessage, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessageInput__MapMsg") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } in := i.([]interface{}) result := make(map[string]*testdata.RootMessage_NestedMessage) for i, ival := range in { _ = i val := ival.(map[string]interface{}) k, v := val["key"], val["value"] _, _ = k, v kk := k.(string) vv, err := ResolveExmplRootMessage__NestedMessageInput(tr, tr.ContextWithSpan(ctx, span), v) if err != nil { return nil, errors.Wrapf(err, "failed to resolve #%d map element value", i) } result[kk] = vv } return result, nil } func ResolveExmplRootMessageInput__CtxMap(tr tracer.Tracer, ctx context.Context, i interface{}) (_ map[string]*testdata.RootMessage_NestedMessage, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessageInput__CtxMap") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } in := i.([]interface{}) result := make(map[string]*testdata.RootMessage_NestedMessage) for i, ival := range in { _ = i val := ival.(map[string]interface{}) k, v := val["key"], val["value"] _, _ = k, v kk := k.(string) vv, err := ResolveExmplRootMessage__NestedMessageInput(tr, tr.ContextWithSpan(ctx, span), v) if err != nil { return nil, errors.Wrapf(err, "failed to resolve #%d map element value", i) } result[kk] = vv } return result, nil } func ResolveExmplRootMessageInput__CtxMapEnum(tr tracer.Tracer, ctx context.Context, i interface{}) (_ map[string]testdata.RootMessage_NestedEnum, rerr error) { span := tr.CreateChildSpanFromContext(ctx, "ResolveExmplRootMessageInput__CtxMapEnum") defer span.Finish() defer func() { if perr := recover(); perr != nil { span.SetTag("error", "true").SetTag("error_message", perr).SetTag("error_stack", string(debug.Stack())) } if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if i == nil { return nil, nil } in := i.([]interface{}) result := make(map[string]testdata.RootMessage_NestedEnum) for i, ival := range in { _ = i val := ival.(map[string]interface{}) k, v := val["key"], val["value"] _, _ = k, v kk := k.(string) vv := testdata.RootMessage_NestedEnum(v.(int)) result[kk] = vv } return result, nil } // Maps output objects var ExmplRootMessage__map_enum = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage__map_enum", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage__map_enum.AddFieldConfig("key", &graphql.Field{ Name: "key", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["key"], nil }, }) ExmplRootMessage__map_enum.AddFieldConfig("value", &graphql.Field{ Name: "value", Type: ExmplNestedEnum, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["value"], nil }, }) } var ExmplRootMessage__map_scalar = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage__map_scalar", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage__map_scalar.AddFieldConfig("key", &graphql.Field{ Name: "key", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["key"], nil }, }) ExmplRootMessage__map_scalar.AddFieldConfig("value", &graphql.Field{ Name: "value", Type: scalars.GraphQLInt32Scalar, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["value"], nil }, }) } var ExmplRootMessage__map_msg = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage__map_msg", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage__map_msg.AddFieldConfig("key", &graphql.Field{ Name: "key", Type: graphql.String, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["key"], nil }, }) ExmplRootMessage__map_msg.AddFieldConfig("value", &graphql.Field{ Name: "value", Type: ExmplRootMessageNestedMessage, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["value"], nil }, }) } var ExmplRootMessage__ctx_map = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage__ctx_map", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage__ctx_map.AddFieldConfig("key", &graphql.Field{ Name: "key", Type: graphql.String, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["key"], nil }, }) ExmplRootMessage__ctx_map.AddFieldConfig("value", &graphql.Field{ Name: "value", Type: ExmplRootMessageNestedMessage, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["value"], nil }, }) } var ExmplRootMessage__ctx_map_enum = graphql.NewObject(graphql.ObjectConfig{ Name: "ExmplRootMessage__ctx_map_enum", Fields: graphql.Fields{}, }) func init() { ExmplRootMessage__ctx_map_enum.AddFieldConfig("key", &graphql.Field{ Name: "key", Type: graphql.String, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["key"], nil }, }) ExmplRootMessage__ctx_map_enum.AddFieldConfig("value", &graphql.Field{ Name: "value", Type: ExmplNestedEnum, Resolve: func(p graphql.ResolveParams) (interface{}, error) { src := p.Source.(map[string]interface{}) if src == nil { return nil, nil } return src["value"], nil }, }) } // Services func GetServiceExampleServiceMethods(c testdata.ServiceExampleClient, ih *interceptors.InterceptorHandler, tr tracer.Tracer) graphql.Fields { return graphql.Fields{ "mutationMethod": &graphql.Field{ Name: "mutationMethod", Type: ExmplRootMessageNestedMessage, Args: graphql.FieldConfigArgument{ "some_field": &graphql.ArgumentConfig{Type: scalars.GraphQLInt32Scalar}, }, Resolve: func(p graphql.ResolveParams) (_ interface{}, rerr error) { span := tr.CreateChildSpanFromContext(p.Context, "ServiceExample.mutationMethod Resolver") defer span.Finish() defer func() { if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if ih == nil { req, err := ResolveExmplRootMessage2Input(tr, tr.ContextWithSpan(p.Context, span), p.Args) if err != nil { return nil, err } return c.MutationMethod(p.Context, req) } ctx := &interceptors.Context{ Service: "ServiceExample", Method: "mutationMethod", Params: p, } req, err := ih.ResolveArgs(ctx, func(ctx *interceptors.Context, next interceptors.ResolveArgsInvoker) (result interface{}, err error) { return ResolveExmplRootMessage2Input(tr, tr.ContextWithSpan(p.Context, span), p.Args) }) if err != nil { return nil, errors.Wrap(err, "failed to resolve args") } return ih.Call(ctx, req, func(ctx *interceptors.Context, req interface{}, next interceptors.CallMethodInvoker) (result interface{}, err error) { r, ok := req.(*testdata.RootMessage2) if !ok { return nil, errors.New(fmt.Sprintf("Resolve args interceptor returns bad request type(%T). Should be: *testdata.RootMessage2", req)) } return c.MutationMethod(ctx.Params.Context, r) }) }, }, "EmptyMsgs": &graphql.Field{ Name: "EmptyMsgs", Type: scalars.NoDataScalar, Resolve: func(p graphql.ResolveParams) (_ interface{}, rerr error) { span := tr.CreateChildSpanFromContext(p.Context, "ServiceExample.EmptyMsgs Resolver") defer span.Finish() defer func() { if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if ih == nil { req, err := ResolveExmplEmptyInput(tr, tr.ContextWithSpan(p.Context, span), p.Args) if err != nil { return nil, err } return c.EmptyMsgs(p.Context, req) } ctx := &interceptors.Context{ Service: "ServiceExample", Method: "EmptyMsgs", Params: p, } req, err := ih.ResolveArgs(ctx, func(ctx *interceptors.Context, next interceptors.ResolveArgsInvoker) (result interface{}, err error) { return ResolveExmplEmptyInput(tr, tr.ContextWithSpan(p.Context, span), p.Args) }) if err != nil { return nil, errors.Wrap(err, "failed to resolve args") } return ih.Call(ctx, req, func(ctx *interceptors.Context, req interface{}, next interceptors.CallMethodInvoker) (result interface{}, err error) { r, ok := req.(*testdata.Empty) if !ok { return nil, errors.New(fmt.Sprintf("Resolve args interceptor returns bad request type(%T). Should be: *testdata.Empty", req)) } return c.EmptyMsgs(ctx.Params.Context, r) }) }, }, "MsgsWithEpmty": &graphql.Field{ Name: "MsgsWithEpmty", Type: ExmplMessageWithEmpty, Args: graphql.FieldConfigArgument{ "empt": &graphql.ArgumentConfig{Type: scalars.NoDataScalar}, }, Resolve: func(p graphql.ResolveParams) (_ interface{}, rerr error) { span := tr.CreateChildSpanFromContext(p.Context, "ServiceExample.MsgsWithEpmty Resolver") defer span.Finish() defer func() { if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if ih == nil { req, err := ResolveExmplMessageWithEmptyInput(tr, tr.ContextWithSpan(p.Context, span), p.Args) if err != nil { return nil, err } return c.MsgsWithEpmty(p.Context, req) } ctx := &interceptors.Context{ Service: "ServiceExample", Method: "MsgsWithEpmty", Params: p, } req, err := ih.ResolveArgs(ctx, func(ctx *interceptors.Context, next interceptors.ResolveArgsInvoker) (result interface{}, err error) { return ResolveExmplMessageWithEmptyInput(tr, tr.ContextWithSpan(p.Context, span), p.Args) }) if err != nil { return nil, errors.Wrap(err, "failed to resolve args") } return ih.Call(ctx, req, func(ctx *interceptors.Context, req interface{}, next interceptors.CallMethodInvoker) (result interface{}, err error) { r, ok := req.(*testdata.MessageWithEmpty) if !ok { return nil, errors.New(fmt.Sprintf("Resolve args interceptor returns bad request type(%T). Should be: *testdata.MessageWithEmpty", req)) } return c.MsgsWithEpmty(ctx.Params.Context, r) }) }, }, } } func GetMutationsServiceExampleServiceMethods(c testdata.ServiceExampleClient, ih *interceptors.InterceptorHandler, tr tracer.Tracer) graphql.Fields { return graphql.Fields{ "getQueryMethod": &graphql.Field{ Name: "getQueryMethod", Type: ExmplRootMessage, Args: graphql.FieldConfigArgument{ "r_msg": &graphql.ArgumentConfig{Type: ExmplRootMessageNestedMessageInput}, "r_scalar": &graphql.ArgumentConfig{Type: scalars.GraphQLInt32Scalar}, "r_enum": &graphql.ArgumentConfig{Type: ExmplRootEnum}, "r_empty_msg": &graphql.ArgumentConfig{Type: scalars.NoDataScalar}, "n_r_enum": &graphql.ArgumentConfig{Type: ExmplCommonEnum}, "n_r_scalar": &graphql.ArgumentConfig{Type: scalars.GraphQLInt32Scalar}, "n_r_msg": &graphql.ArgumentConfig{Type: ExmplCommonMessageInput}, "scalar_from_context": &graphql.ArgumentConfig{Type: scalars.GraphQLInt32Scalar}, "n_r_empty_msg": &graphql.ArgumentConfig{Type: scalars.NoDataScalar}, "map_enum": &graphql.ArgumentConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__MapEnum))}, "map_scalar": &graphql.ArgumentConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__MapScalar))}, "map_msg": &graphql.ArgumentConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__MapMsg))}, "ctx_map": &graphql.ArgumentConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__CtxMap))}, "ctx_map_enum": &graphql.ArgumentConfig{Type: graphql.NewList(graphql.NewNonNull(ExmplRootMessageInput__CtxMapEnum))}, }, Resolve: func(p graphql.ResolveParams) (_ interface{}, rerr error) { span := tr.CreateChildSpanFromContext(p.Context, "MutationsServiceExample.getQueryMethod Resolver") defer span.Finish() defer func() { if rerr != nil { span.SetTag("error", "true").SetTag("error_message", rerr.Error()) } }() if ih == nil { req, err := ResolveExmplRootMessageInput(tr, tr.ContextWithSpan(p.Context, span), p.Args) if err != nil { return nil, err } return c.GetQueryMethod(p.Context, req) } ctx := &interceptors.Context{ Service: "MutationsServiceExample", Method: "getQueryMethod", Params: p, } req, err := ih.ResolveArgs(ctx, func(ctx *interceptors.Context, next interceptors.ResolveArgsInvoker) (result interface{}, err error) { return ResolveExmplRootMessageInput(tr, tr.ContextWithSpan(p.Context, span), p.Args) }) if err != nil { return nil, errors.Wrap(err, "failed to resolve args") } return ih.Call(ctx, req, func(ctx *interceptors.Context, req interface{}, next interceptors.CallMethodInvoker) (result interface{}, err error) { r, ok := req.(*testdata.RootMessage) if !ok { return nil, errors.New(fmt.Sprintf("Resolve args interceptor returns bad request type(%T). Should be: *testdata.RootMessage", req)) } res, err := c.GetQueryMethod(ctx.Params.Context, r) if err != nil { return nil, err } if len(res.GetCtxMap()) > 0 { ctx.PayloadError = res.GetCtxMap() } return res, err }) }, }, } }
package main import ( "os" "github.com/urfave/cli" ) var ( app = cli.NewApp() cfg config ) func main() { app.Name = "jedie" app.Usage = "Static site generator written in golang" app.Version = "0.0.1" app.Run(os.Args) }
package functions import ( "encoding/json" "fmt" "io/ioutil" "math/rand" "net/http" "strconv" "time" ) // A cumulative probability list for each categroy var categoryProbabilities = map[string]float64{ "area": 0.2, "population": 0.4, "gdpPerCapita": 0.6, "lifeExpectancy": 0.8, "healthExpenditure": 0.9, "gini": 1.0, } var excludedCountries = []string{ "um", // United States Pacific Island Wildlife Refuges "ct", // Central African Republic "wf", // Wallis and Futuna "bq", // Navassa Island "at", // Ashmore and Cartier Islands "kt", // Christmas Island "ck", // Cocos (Keeling) Islands "cr", // Coral Sea Islands "ne", // Niue "nf", // Norfolk Island "cq", // Northern Mariana Islands "tl", // Tokelau "tb", // Saint Barthelemy "dx", // Dhekelia "jn", // Jan Mayen "je", // Jersey "ip", // Clipperton Island "sb", // Saint Pierre and Miquelon "io", // British Indian Ocean Territory "sh", // Saint Helena, Ascension, and Tristan da Cunha "bv", // Bouvet Island "fs", // French Southern and Antarctic Lands "hm", // Heard Island and McDonald Islands "nc", // New Caledonia "wq", // Wake Island "nr", // Nauru "vc", // Saint Vincent and the Grenadines "pf", // Paracel Islands "kr", // Kiribati "cc", // Curacao "tn", // Tonga } // An HTTP Cloud Function. func QuestionsV2(w http.ResponseWriter, r *http.Request) { query := r.URL.Query() n, present := query["n"] if !present { n = []string{"30"} } numQuestions, _ := strconv.Atoi(n[0]) countries, readError := Read() if readError != nil { http.Error(w, "500 - Could not read file", http.StatusInternalServerError) return } // fmt.Fprintf(w, "Hello, %s!", html.EscapeString(fmt.Sprintf("hello hello %d", numQuestions))) questions := GetQuestions(countries, numQuestions) json, err := json.Marshal(questions) if err != nil { http.Error(w, "500 - Could not read file", http.StatusInternalServerError) return } // Set CORS headers for the preflight request if r.Method == http.MethodOptions { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "POST") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") w.Header().Set("Access-Control-Max-Age", "3600") w.WriteHeader(http.StatusNoContent) return } // Set CORS headers for the main request. w.Header().Set("Access-Control-Allow-Origin", "*") fmt.Fprint(w, string(json)) } func Read() (map[string]Country, error) { var folder string files, err := ioutil.ReadDir("./serverless_function_source_code/factbook/") folder = "./serverless_function_source_code/factbook/" if err != nil { // fallback to non-cloud folder // see https://cloud.google.com/functions/docs/concepts/exec#file_system files, err = ioutil.ReadDir("./factbook/") folder = "./factbook/" if err != nil { return nil, err } } var countries map[string]Country = make(map[string]Country) for _, file := range files { // fmt.Println(file) byteValue, err := ioutil.ReadFile(folder + file.Name()) if err != nil { return nil, err } var jsonObject map[string](interface{}) err = json.Unmarshal(byteValue, &jsonObject) if err != nil { return nil, err } var country Country country.Id = withoutExtension(file.Name()) if gov, ok := jsonObject["Government"].(map[string]interface{}); ok { if countryName, ok := gov["Country name"].(map[string]interface{}); ok { var shortForm = countryName["conventional short form"].(map[string]interface{}) country.Name = shortForm["text"].(string) if country.Name == "none" { // fall back to long name country.Name = countryName["conventional long form"].(map[string]interface{})["text"].(string) } } else { continue } } if geography, ok := jsonObject["Geography"].(map[string]interface{}); ok { if area, ok := geography["Area"].(map[string]interface{}); ok { if total, ok := area["total"].(map[string]interface{}); ok { if country.Area, ok = textToValue(total); !ok { continue } } else { continue } if areaComparison, ok := area["total"].(map[string]interface{}); ok { country.AreaComparison = areaComparison["text"].(string) } else { continue } } else { continue } } else { continue } if economy, ok := jsonObject["Economy"].(map[string]interface{}); ok { if gdpVal, ok := economy["Real GDP (purchasing power parity)"].(map[string]interface{}); ok { if realGdpPerCapita, ok := economy["Real GDP per capita"].(map[string]interface{}); ok { for _, year := range []int{2021, 2020, 2019, 2018, 2017, 2016, 2015} { key := fmt.Sprint("Real GDP per capita ", year) if val, ok := realGdpPerCapita[key].(map[string]interface{}); ok { if country.GDPCapita, ok = textToValue(val); !ok { continue } } key = fmt.Sprint("Real GDP (purchasing power parity) ", year) if gdpValForYear, ok := gdpVal[key].(map[string]interface{}); ok { if country.GDP, ok = textToValue(gdpValForYear); !ok { continue } else { break } } else { continue } } } else { continue } } else { continue } if gini, ok := economy["Gini Index coefficient - distribution of family income"].(map[string]interface{}); ok { years := []int{2021, 2020, 2019, 2018, 2017, 2016, 2015, 2014, 2013, 2012, 2011, 2010} for _, year := range years { key := fmt.Sprint("Gini Index coefficient - distribution of family income ", year) if val, ok := gini[key].(map[string]interface{}); ok { if country.Gini, ok = textToValue(val); ok { break } else { continue } } } } } else { continue } if people, ok := jsonObject["People and Society"].(map[string]interface{}); ok { if birthRate, ok := people["Birth rate"].(map[string]interface{}); ok { if country.BirthRate, ok = textToValue(birthRate); !ok { continue } } else { continue } if deathRate, ok := people["Death rate"].(map[string]interface{}); ok { if country.DeathRate, ok = textToValue(deathRate); !ok { continue } } else { continue } if netMigrationRate, ok := people["Net migration rate"].(map[string]interface{}); ok { if country.NetMigrationRate, ok = textToValue(netMigrationRate); !ok { continue } } else { continue } if population, ok := people["Population"].(map[string]interface{}); ok { if country.Population, ok = textToValue(population); !ok { continue } } else { continue } if tfr, ok := people["Total fertility rate"].(map[string]interface{}); ok { if country.TotalFertilityRate, ok = textToValue(tfr); !ok { continue } } else { continue } if healthExp, ok := people["Current Health Expenditure"].(map[string]interface{}); ok { if country.HealthExpenditure, ok = textToValue(healthExp); !ok { continue } } else { continue } if lifeExpectancy, ok := people["Life expectancy at birth"].(map[string]interface{}); ok { if totalLifeExpectancy, ok := lifeExpectancy["total population"].(map[string]interface{}); ok { if country.LifeExpectancy, ok = textToValue(totalLifeExpectancy); !ok { continue } } } if populationGrowthRate, ok := people["Population growth rate"].(map[string]interface{}); ok { if country.PopulationGrowthRate, ok = textToValue(populationGrowthRate); !ok { continue } } } else { continue } countries[country.Id] = country } return countries, nil } func GetQuestions(countries map[string]Country, n int) []Question { var questions []Question i := 0 for i < n { c1, c2 := getTwoCountries(countries) var q Question var v1, v2 Value q.Category = getRandomCategory() switch q.Category { case "area": v1, v2 = c1.Area, c2.Area q.Text = "___ is larger in area." q.Hint = fmt.Sprintf("%s: %s<br>%s: %s", c1.Name, c1.AreaComparison, c2.Name, c2.AreaComparison) case "population": v1, v2 = c1.Population, c2.Population q.Text = "___ has more people." q.Hint = fmt.Sprintf("%s: population growth rate of %.2f<br>%s: population growth rate of %.2f", c1.Name, c1.PopulationGrowthRate.Value, c2.Name, c2.PopulationGrowthRate.Value) case "gdpPerCapita": v1, v2 = c1.GDPCapita, c2.GDPCapita q.Text = "___ has higher GDP per capita (PPP)." q.Hint = fmt.Sprintf("%s: total GDP (PPP) is %s; %s total GDP (PPP) is %s", c1.Name, c1.GDP.Text, c2.Name, c2.GDP.Text) case "healthExpenditure": v1, v2 = c1.HealthExpenditure, c2.HealthExpenditure q.Text = "___ has higher health expenditure (%GDP)" q.Hint = fmt.Sprintf("%s: death rate of %.2f<br>%s: death rate of %.2f", c1.Name, c1.DeathRate.Value, c2.Name, c2.DeathRate.Value) case "gini": v1, v2 = c1.Gini, c2.Gini q.Text = "___ has a higher Gini index." q.Hint = "The Gini index is a measure of income<br>inequality. Higher values mean higher<br>inequality." case "lifeExpectancy": v1, v2 = c1.LifeExpectancy, c2.LifeExpectancy q.Text = "___ has a higher life expectancy at birth." q.Hint = fmt.Sprintf("%s: fertility rate of %.2f<br>%s: fertility rate of %.2f", c1.Name, c1.TotalFertilityRate.Value, c2.Name, c2.TotalFertilityRate.Value) } if v1.Value != 0 && v2.Value != 0 { q.Options = map[string]Option{ c1.Id: {c1.Name, v1.Value, v1.Text}, c2.Id: {c2.Name, v2.Value, v2.Text}, } if v1.Value > v2.Value { q.Fact = c1.Id } else { q.Fact = c2.Id } questions = append(questions, q) i++ } } return questions } func getRandomCategory() string { r := rand.Float64() for k, p := range categoryProbabilities { if r <= p { return k } } return "" } func getTwoCountries(countries map[string]Country) (Country, Country) { rand.Seed(time.Now().UnixNano()) keys := GetKeysCountry(countries) countryOne := countries[GetRandom(keys)] countryTwo := countries[GetRandom(keys)] if contains(excludedCountries, countryOne.Id) || contains(excludedCountries, countryTwo.Id) || countryOne == countryTwo { return getTwoCountries(countries) } return countryOne, countryTwo }
package daemons import ( "net/http" "docktor/server/storage" "docktor/server/types" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" ) // getAll find all daemons func getAll(c echo.Context) error { user := c.Get("user").(types.User) db := c.Get("DB").(*storage.Docktor) if user.IsAdmin() { daemons, err := db.Daemons().FindAll() if err != nil { log.WithFields(log.Fields{ "error": err, }).Error("Error when retrieving daemons") return c.JSON(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, daemons) } daemons, err := db.Daemons().FindAllLight() if err != nil { log.WithFields(log.Fields{ "error": err, }).Error("Error when retrieving daemons") return c.JSON(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, daemons) } // getAllRundeck find all daemons to rundeck format func getAllRundeck(c echo.Context) error { db := c.Get("DB").(*storage.Docktor) daemons, err := db.Daemons().FindAll() if err != nil { log.WithFields(log.Fields{ "error": err, }).Error("Error when retrieving daemons") return c.JSON(http.StatusBadRequest, err.Error()) } var nodes []types.RundeckDaemon for _, daemon := range daemons { nodes = append(nodes, daemon.ToRundeck()) } return c.JSON(http.StatusOK, nodes) } // getByID find one daemon by id func getByID(c echo.Context) error { db := c.Get("DB").(*storage.Docktor) daemon, err := db.Daemons().FindByID(c.Param(types.DAEMON_ID_PARAM)) if err != nil { log.WithFields(log.Fields{ "daemonID": c.Param(types.DAEMON_ID_PARAM), "error": err, }).Error("Error when retrieving daemon") return c.JSON(http.StatusBadRequest, err.Error()) } user := c.Get("user").(types.User) if user.IsAdmin() { return c.JSON(http.StatusOK, daemon) } return c.JSON(http.StatusOK, daemon.DaemonLight) } // save create/update a daemon func save(c echo.Context) error { var u types.Daemon err := c.Bind(&u) if err != nil { log.WithFields(log.Fields{ "error": err, }).Error("Error when saving daemon") return c.JSON(http.StatusBadRequest, err.Error()) } db := c.Get("DB").(*storage.Docktor) u, err = db.Daemons().Save(u) if err != nil { log.WithFields(log.Fields{ "daemon": u, "error": err, }).Error("Error when creating/updating daemons") return c.JSON(http.StatusBadRequest, err.Error()) } // Update the docker status u.SetDockerStatus() u, err = db.Daemons().Save(u) if err != nil { log.WithFields(log.Fields{ "daemon": u, "error": err, }).Error("Error when creating/updating daemons") return c.JSON(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, u) } // deleteByID delete one daemon by id func deleteByID(c echo.Context) error { db := c.Get("DB").(*storage.Docktor) err := db.Daemons().Delete(c.Param(types.DAEMON_ID_PARAM)) if err != nil { log.WithFields(log.Fields{ "daemonID": c.Param(types.DAEMON_ID_PARAM), "error": err, }).Error("Error when deleting daemon") return c.JSON(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, "ok") }
// Package mycocontext provides a wrapper over context.Context and some operations on the wrapper. package mycocontext import ( "bytes" "context" ) // Context is the wrapper around context.Context providing type-level safety on presence of several values. type Context interface { context.Context // HyphaName returns the name of the processed hypha. HyphaName() string // Input returns the buffer which contains all characters of the hypha text. Input() *bytes.Buffer // RecursionLevel returns current recursive transclusion level. RecursionLevel() uint // WithIncrementedRecursionLevel returns a copy of the context but with the recursion level incremented. // // lvl1 := ctx.RecursionLevel() // lvl2 := ctx.WithIncrementedRecursionLevel().RecursionLevel() // lvl2 - lvl1 == 1 WithIncrementedRecursionLevel() Context // WebSiteURL returns the URL of the wiki, including the protocol (http or https). It is used for generating OpenGraph meta tags. WebSiteURL() string } // CancelFunc is a function you call to cancel the context. Why would you, though? type CancelFunc context.CancelFunc // ContextFromStringInput returns the context for the given input. func ContextFromStringInput(hyphaName, input string) (Context, CancelFunc) { ctx, cancel := context.WithCancel( context.WithValue( context.WithValue( context.WithValue( context.WithValue( context.Background(), keyHyphaName, hyphaName), keyInputBuffer, bytes.NewBufferString(input), ), keyRecursionLevel, 0, ), keyWebSiteURL, ""), ) return &mycoContext{ctx}, CancelFunc(cancel) } // WithBuffer returns a copy of the given context but with a different input buffer. func WithBuffer(ctx Context, buf *bytes.Buffer) Context { return &mycoContext{context.WithValue(ctx, keyInputBuffer, buf)} } // WithWebSiteURL returns a copy of the given context but with the website URL set to the given one instead of the default empty string. The url should not end on slash because that would break the links generated by the library. func WithWebSiteURL(ctx Context, url string) Context { return &mycoContext{context.WithValue(ctx, keyWebSiteURL, url)} }
package aoc2019 import ( "testing" aoc "github.com/janreggie/aoc/internal" "github.com/stretchr/testify/assert" ) func TestDay03(t *testing.T) { assert := assert.New(t) testCases := []aoc.TestCase{ {Input: "R8,U5,L5,D3\nU7,R6,D4,L4", Result1: "6", Result2: "30"}, {Input: "R75,D30,R83,U83,L12,D49,R71,U7,L72\n" + "U62,R66,U55,R34,D71,R55,D58,R83", Result1: "159", Result2: "610"}, {Input: "R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\n" + "U98,R91,D20,R16,D67,R40,U7,R15,U6,R7", Result1: "135", Result2: "410"}, {Details: "Y2019D03 my input", Input: day03myInput, Result1: "1431", Result2: "48012"}, } for _, tt := range testCases { tt.Test(Day03, assert) } } func BenchmarkDay03(b *testing.B) { aoc.Benchmark(Day03, b, day03myInput) }
package crypto import ( "crypto/sha256" "golang.org/x/crypto/ripemd160" "fmt" ) // 生成密码, n 截前n个字符 func GeneratePwd(rawPwd, salt string, n int) string { pwd := generateRawPwd([]byte(rawPwd), []byte(salt)) return rawPwd2Str(pwd, n) } func generateRawPwd(rawPwd, salt []byte) []byte { rawPwd = append(rawPwd, salt...) firstSHA := sha256.Sum256(rawPwd) secondSHA := sha256.Sum256(firstSHA[:]) RIPEMD160 := ripemd160.New() _, err := RIPEMD160.Write(secondSHA[:]) if err != nil { panic(err) } pwdRIPEMD := RIPEMD160.Sum(nil) return pwdRIPEMD } //func injectChar(pwd []byte) []byte { // //} func rawPwd2Str(rawPwd []byte, n int) string { str := fmt.Sprintf("%x", rawPwd) if n >= len(str) || n <= 0 { return str } return str[:n] }
package jutils import ( "container/list" "sync" ) //数组堆栈 type ArrayStack struct { name string //名称 list *list.List lock sync.Mutex //并发锁 } //入栈 func (stack *ArrayStack) Push(ele interface{}) { stack.lock.Lock() defer stack.lock.Unlock() //插入数据 stack.list.PushFront(ele) } //出栈 func (stack *ArrayStack) Pop() interface{} { stack.lock.Lock() defer stack.lock.Unlock() ele := stack.list.Front() if ele==nil{ return nil } stack.list.Remove(ele) value:=ele.Value return value } //获取名称 func (stack *ArrayStack) GetName() string { return stack.name } //获取元素 func (stack *ArrayStack) GetElementList() *list.List { return stack.list } //获取长度 func (stack *ArrayStack) Size() int64 { return int64(stack.list.Len()) } //实例 var arrayStack map[string]*ArrayStack var lock sync.Mutex //获取实例 func NewArrayStack(name string) *ArrayStack { lock.Lock() defer lock.Unlock() if arrayStack == nil { arrayStack = make(map[string]*ArrayStack) } if _, ok := arrayStack[name]; !ok { stack := ArrayStack{ name: name, list: list.New(), lock: sync.Mutex{}, } arrayStack[name] = &stack } return arrayStack[name] }
package storage import "testing" func Test_migrationFileNameRe(t *testing.T) { t.Run("bad migration file names", func(t *testing.T) { t.Parallel() badFileNames := map[string]string{ "12 3-name-apply.sql": "spaces in versions are invalid", "123-n ame-appl.sql": "spaces in names are invalid", "123-name.sql": `"apply" or "rollback" is required`, } for name, reason := range badFileNames { if migratationFileNameRe.MatchString(name) { t.Errorf(`"%s" should be an invalid migration name because %s`, name, reason) } } }) t.Run("good migration file names", func(t *testing.T) { t.Parallel() goodFileNames := [][4]string{ {"001-name-apply.sql", "001", "name", "apply"}, {"001_name_apply.sql", "001", "name", "apply"}, {"001_name_is-here_apply.sql", "001", "name_is-here", "apply"}, {"001-name-rollback.sql", "001", "name", "rollback"}, {"001-n-a-m-e-rollback.sql", "001", "n-a-m-e", "rollback"}, {"2021-12-01-10:13:13-name-rollback.sql", "2021-12-01-10:13:13", "name", "rollback"}, } for _, goodCase := range goodFileNames { matches := migratationFileNameRe.FindStringSubmatch(goodCase[0]) if matches[1] != goodCase[1] { t.Errorf("version should be %s for file name %s but got %s", goodCase[1], goodCase[1], matches[1]) } if matches[2] != goodCase[2] { t.Errorf("migration name should be %s for file name %s but got %s", goodCase[2], goodCase[2], matches[2]) } if matches[3] != goodCase[3] { t.Errorf("migration kind should be %s for file name %s but got %s", goodCase[3], goodCase[3], matches[3]) } } }) }
package main import ( "fmt" "net/http" "github.com/gin-gonic/gin" "github.com/yuneejang/webserver/rpc" ) func SetupRouter(router *gin.Engine) *gin.Engine { //Setp. HTML rendering //Using LoadHTMLGlob() or LoadHTMLFiles() router.LoadHTMLGlob("../../templates/*.html") //경로 지정 다시!!! router = setRouterDefault(router) //wild card :page를 사용하면서 /v1도 그곳에 포함.. 라우터 그룹하려니 아래와같은 문제가 생김.. //panic: 'v1' in new path '/v1/login' conflicts with existing wildcard ':page' in exist //setting router group // v1 := router.Group("/v1") // setRouterV1(v1) // v2 := router.Group("/v2") // setRouterV2(v2) //////////////////////////////////////////// //Json Test //router = setRouterJSON(router) return router } func setRouterJSON(router *gin.Engine) *gin.Engine { // Using GET, POST, PUT, PATCH, DELETE and OPTIONS router.GET("/", rpc.RequestJSON) //router.GET("/", rpc.ParseRequest()) return router } func setRouterDefault(router *gin.Engine) *gin.Engine { // Using GET, POST, PUT, PATCH, DELETE and OPTIONS router.GET("/", func(c *gin.Context) { c.HTML(http.StatusOK, "index.html", nil) //세번째인자 무엇인지 모르겠음 }) router.GET("/:page", func(c *gin.Context) { page := c.Param("page") c.HTML(http.StatusOK, page, nil) //세번째인자 무엇인지 모르겠음 }) return router } func setRouterV1(v1 *gin.RouterGroup) { v1.GET("/login", callV1) v1.GET("/submit", callV1) v1.GET("/read", callV1) } func setRouterV2(v2 *gin.RouterGroup) { { v2.GET("/login", callV2) v2.GET("/submit", callV2) v2.GET("/read", callV2) } } func callV1(c *gin.Context) { fmt.Println("CALL ! V1 !!") c.String(http.StatusOK, "Hello V1 ") } func callV2(c *gin.Context) { fmt.Println("CALL ! V2 !!") name := c.Param("name") c.String(http.StatusOK, "Hello V2 %s", name) }
package balancetests func fnv1modInit() { methods = append(methods, method{name: "FNV1-mod", f: fnv1mod}, method{name: "FNV1a-mod", f: fnv1amod}) } func fnv1mod(k string) string { var h = 2166136261 for _, c := range []byte(k) { h *= 16777619 h ^= int(c) } return nodeList[h&(Nodes-1)] } func fnv1amod(k string) string { var h = 2166136261 for _, c := range []byte(k) { h ^= int(c) h *= 16777619 } return nodeList[h&(Nodes-1)] }
package controller import ( "fmt" "io/ioutil" "os" "path/filepath" "strings" "sync" "github.com/Sirupsen/logrus" "github.com/andygrunwald/perseus/config" "github.com/andygrunwald/perseus/dependency" "github.com/andygrunwald/perseus/dependency/repository" "github.com/andygrunwald/perseus/downloader" "github.com/andygrunwald/perseus/types/set" ) // MirrorController reflects the business logic and the Command interface to mirror all configured packages. // This command is independent from an human interface (CLI, HTTP, etc.) // The human interfaces will interact with this command. type MirrorController struct { // Config is the main medusa configuration Config *config.Medusa // Log represents a logger to log messages Log logrus.FieldLogger // NumOfWorker is the number of worker used for concurrent actions (like resolving the dependency tree) NumOfWorker int wg sync.WaitGroup } // Run is the business logic of MirrorCommand. func (c *MirrorController) Run() error { c.wg = sync.WaitGroup{} repos := set.New() // Get list of manual entered repositories // and add them to the set repoList, err := c.Config.GetNamesOfRepositories() if err != nil { if config.IsNoRepositories(err) { c.Log.WithError(err).Info("Configuration") } else { c.Log.WithError(err).Info("") } } for _, r := range repoList { repos.Add(r) } // Get all required repositories and resolve those dependencies pURL := "https://packagist.org/" packagistClient, err := repository.NewPackagist(pURL, nil) if err != nil { c.Log.WithError(err).Info("") } // Lets get a dependency resolver. // If we can't bootstrap one, we are lost anyway. // We set the queue length to the number of workers + 1. Why? // With this every worker has work, when the queue is filled. // During the add command, this is enough in most of the cases. d, err := dependency.NewComposerResolver(c.NumOfWorker, packagistClient) if err != nil { return err } results := d.GetResultStream() require := c.Config.GetRequire() // Loop over the packages and add them l := []*dependency.Package{} for _, r := range require { p, _ := dependency.NewPackage(r, "") l = append(l, p) } go d.Resolve(l) // Finally we collect all the results of the work. for p := range results { if p.Error != nil { c.Log.WithFields(logrus.Fields{ "package": p.Package.Name, "responseCode": p.Response.StatusCode, }).WithError(p.Error).Info("Error while resolving dependencies of package") continue } repos.Add(p.Package) } c.Log.WithFields(logrus.Fields{ "amountPackages": repos.Len(), "amountWorker": c.NumOfWorker, }).Info("Start concurrent download process") loader, err := downloader.NewGitDownloader(c.NumOfWorker, c.Config.GetString("repodir")) if err != nil { return err } loaderResults := loader.GetResultStream() flatten := repos.Flatten() loaderList := make([]*dependency.Package, 0, len(flatten)) for _, item := range repos.Flatten() { loaderList = append(loaderList, item.(*dependency.Package)) } loader.Download(loaderList) var satisRepositories []string for i := 1; i <= int(repos.Len()); i++ { v := <-loaderResults if v.Error != nil { if os.IsExist(v.Error) { c.Log.WithFields(logrus.Fields{ "package": v.Package.Name, }).Info("Package exists on disk. Try updating it instead. Skipping.") } else { c.Log.WithFields(logrus.Fields{ "package": v.Package.Name, }).WithError(v.Error).Info("Error while mirroring package") // If we have an error, we don't need to add it to satis repositories continue } } else { c.Log.WithFields(logrus.Fields{ "package": v.Package.Name, }).Info("Mirroring of package successful") } satisRepositories = append(satisRepositories, c.getLocalURLForRepository(v.Package.Name)) } loader.Close() // And as a final step, write the satis configuration err = c.writeSatisConfig(satisRepositories...) return err } func (c *MirrorController) getLocalURLForRepository(p string) string { var r string satisURL := c.Config.GetString("satisurl") repoDir := c.Config.GetString("repodir") if len(satisURL) > 0 { r = fmt.Sprintf("%s/%s.git", satisURL, p) } else { t := fmt.Sprintf("%s/%s.git", repoDir, p) t = strings.TrimLeft(filepath.Clean(t), "/") r = fmt.Sprintf("file:///%s", t) } return r } func (c *MirrorController) writeSatisConfig(satisRepositories ...string) error { // Write Satis file satisConfig := c.Config.GetString("satisconfig") if len(satisConfig) == 0 { c.Log.Info("No Satis configuration specified. Skipping to write a satis configuration.") return nil } satisContent, err := ioutil.ReadFile(satisConfig) if err != nil { return fmt.Errorf("Can't read Satis configuration %s: %s", satisConfig, err) } j, err := config.NewJSONProvider(satisContent) if err != nil { return fmt.Errorf("Error while creating JSONProvider: %s", err) } s, err := config.NewSatis(j) if err != nil { return fmt.Errorf("Error while creating Satis object: %s", err) } s.AddRepositories(satisRepositories...) err = s.WriteFile(satisConfig, 0644) if err != nil { return fmt.Errorf("Writing Satis configuration to %s failed: %s", satisConfig, err) } c.Log.WithFields(logrus.Fields{ "path": satisConfig, }).Info("Satis configuration successful written") return nil }
// Copyright © 2018 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause package processors import ( "bytes" "fmt" "reflect" "regexp" "strings" "text/template" "github.com/vmware/kube-fluentd-operator/config-reloader/fluentd" "github.com/vmware/kube-fluentd-operator/config-reloader/util" ) type expandLabelsMacroState struct { BaseProcessorState } var reSafe = regexp.MustCompile(`[.-]|^$`) // got this value from running kubectl with bad args // error: invalid label value: "test=-asdf": a valid label must be an empty string // or consist of alphanumeric characters, '-', '_' or '.', and must start and end with // an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is // '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?' var fns = template.FuncMap{ "last": func(x int, a interface{}) bool { return x == reflect.ValueOf(a).Len()-1 }, } var retagTemplate = template.Must(template.New("retagTemplate").Funcs(fns).Parse( ` <filter {{.Pattern}}> @type record_transformer enable_ruby true <record> kubernetes_pod_label_values {{range $i, $e := .Labels -}}${record.dig('kubernetes','labels','{{$e}}')&.gsub(/[.-]/, '_') || '_'}{{if last $i $.Labels }}{{else}}.{{end}}{{- end}} </record> </filter> <match {{.Pattern}}> @type rewrite_tag_filter <rule> key kubernetes_pod_label_values pattern ^(.+)$ tag ${tag}._labels.$1 </rule> </match> <filter {{.Pattern}}.**> @type record_transformer remove_keys kubernetes_pod_label_values </filter> `)) func makeTagFromFilter(ns string, sortedLabelNames []string, labelNames map[string]string) string { buf := &bytes.Buffer{} if cont, ok := labelNames[util.ContainerLabel]; ok { // if the special label _container is used then its name goes to the // part of the tag that denotes the container buf.WriteString(fmt.Sprintf("kube.%s.*.%s._labels.", ns, cont)) } else { buf.WriteString(fmt.Sprintf("kube.%s.*.*._labels.", ns)) } for i, lb := range sortedLabelNames { if lb == util.ContainerLabel { continue } val, ok := labelNames[lb] if ok { buf.WriteString(safeLabelValue(val)) } else { buf.WriteString("*") } if i < len(sortedLabelNames)-1 { buf.WriteString(".") } } return buf.String() } // replaces the empty string and all . with _ // as they have special meaning to fluentd func safeLabelValue(s string) string { return reSafe.ReplaceAllString(s, "_") } func (p *expandLabelsMacroState) Process(input fluentd.Fragment) (fluentd.Fragment, error) { allReferencedLabels := map[string]string{} collectLabels := func(d *fluentd.Directive, ctx *ProcessorContext) error { if d.Name != "filter" && d.Name != "match" { return nil } if !strings.HasPrefix(d.Tag, util.MacroLabels) { return nil } labelNames, err := util.ParseTagToLabels(d.Tag) if err != nil { return err } for lb := range labelNames { allReferencedLabels[lb] = "" } return nil } e := applyRecursivelyInPlace(input, p.Context, collectLabels) if e != nil { return nil, e } if len(allReferencedLabels) == 0 { return input, nil } delete(allReferencedLabels, util.ContainerLabel) sortedLabelNames := util.SortedKeys(allReferencedLabels) replaceLabels := func(d *fluentd.Directive, ctx *ProcessorContext) error { if d.Name != "filter" && d.Name != "match" { return nil } if !strings.HasPrefix(d.Tag, util.MacroLabels) { return nil } labelNames, err := util.ParseTagToLabels(d.Tag) if err != nil { // should never happen as the error should be caught beforehand return nil } d.Tag = makeTagFromFilter(ctx.Namespace, sortedLabelNames, labelNames) ctx.GenerationContext.augmentTag(d) return nil } applyRecursivelyInPlace(input, p.Context, replaceLabels) // prepare extra directives model := struct { Pattern string Labels []string }{ fmt.Sprintf("kube.%s.*.*", p.Context.Namespace), sortedLabelNames, } writer := &bytes.Buffer{} retagTemplate.Execute(writer, model) extraDirectives, err := fluentd.ParseString(writer.String()) if err != nil { return nil, err } extraDirectives = append(extraDirectives, input...) return extraDirectives, nil }
// Copyright 2019 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package android // This file provides module types that implement wrapper module types that add conditionals on // Soong config variables. import ( "fmt" "path/filepath" "strings" "text/scanner" "github.com/google/blueprint" "github.com/google/blueprint/parser" "github.com/google/blueprint/proptools" "android/soong/android/soongconfig" ) func init() { RegisterModuleType("soong_config_module_type_import", soongConfigModuleTypeImportFactory) RegisterModuleType("soong_config_module_type", soongConfigModuleTypeFactory) RegisterModuleType("soong_config_string_variable", soongConfigStringVariableDummyFactory) RegisterModuleType("soong_config_bool_variable", soongConfigBoolVariableDummyFactory) } type soongConfigModuleTypeImport struct { ModuleBase properties soongConfigModuleTypeImportProperties } type soongConfigModuleTypeImportProperties struct { From string Module_types []string } // soong_config_module_type_import imports module types with conditionals on Soong config // variables from another Android.bp file. The imported module type will exist for all // modules after the import in the Android.bp file. // // For example, an Android.bp file could have: // // soong_config_module_type_import { // from: "device/acme/Android.bp", // module_types: ["acme_cc_defaults"], // } // // acme_cc_defaults { // name: "acme_defaults", // cflags: ["-DGENERIC"], // soong_config_variables: { // board: { // soc_a: { // cflags: ["-DSOC_A"], // }, // soc_b: { // cflags: ["-DSOC_B"], // }, // }, // feature: { // cflags: ["-DFEATURE"], // }, // width: { // cflags: ["-DWIDTH=%s"], // }, // }, // } // // cc_library { // name: "libacme_foo", // defaults: ["acme_defaults"], // srcs: ["*.cpp"], // } // // And device/acme/Android.bp could have: // // soong_config_module_type { // name: "acme_cc_defaults", // module_type: "cc_defaults", // config_namespace: "acme", // variables: ["board"], // bool_variables: ["feature"], // value_variables: ["width"], // properties: ["cflags", "srcs"], // } // // soong_config_string_variable { // name: "board", // values: ["soc_a", "soc_b"], // } // // If an acme BoardConfig.mk file contained: // // SOONG_CONFIG_NAMESPACES += acme // SOONG_CONFIG_acme += \ // board \ // feature \ // // SOONG_CONFIG_acme_board := soc_a // SOONG_CONFIG_acme_feature := true // SOONG_CONFIG_acme_width := 200 // // Then libacme_foo would build with cflags "-DGENERIC -DSOC_A -DFEATURE -DWIDTH=200". func soongConfigModuleTypeImportFactory() Module { module := &soongConfigModuleTypeImport{} module.AddProperties(&module.properties) AddLoadHook(module, func(ctx LoadHookContext) { importModuleTypes(ctx, module.properties.From, module.properties.Module_types...) }) initAndroidModuleBase(module) return module } func (m *soongConfigModuleTypeImport) Name() string { // The generated name is non-deterministic, but it does not // matter because this module does not emit any rules. return soongconfig.CanonicalizeToProperty(m.properties.From) + "soong_config_module_type_import_" + fmt.Sprintf("%p", m) } func (*soongConfigModuleTypeImport) Nameless() {} func (*soongConfigModuleTypeImport) GenerateAndroidBuildActions(ModuleContext) {} // Create dummy modules for soong_config_module_type and soong_config_*_variable type soongConfigModuleTypeModule struct { ModuleBase properties soongconfig.ModuleTypeProperties } // soong_config_module_type defines module types with conditionals on Soong config // variables. The new module type will exist for all modules after the definition // in an Android.bp file, and can be imported into other Android.bp files using // soong_config_module_type_import. // // For example, an Android.bp file could have: // // soong_config_module_type { // name: "acme_cc_defaults", // module_type: "cc_defaults", // config_namespace: "acme", // variables: ["board"], // bool_variables: ["feature"], // value_variables: ["width"], // properties: ["cflags", "srcs"], // } // // soong_config_string_variable { // name: "board", // values: ["soc_a", "soc_b"], // } // // acme_cc_defaults { // name: "acme_defaults", // cflags: ["-DGENERIC"], // soong_config_variables: { // board: { // soc_a: { // cflags: ["-DSOC_A"], // }, // soc_b: { // cflags: ["-DSOC_B"], // }, // }, // feature: { // cflags: ["-DFEATURE"], // }, // width: { // cflags: ["-DWIDTH=%s"], // }, // }, // } // // cc_library { // name: "libacme_foo", // defaults: ["acme_defaults"], // srcs: ["*.cpp"], // } // // If an acme BoardConfig.mk file contained: // // SOONG_CONFIG_NAMESPACES += acme // SOONG_CONFIG_acme += \ // board \ // feature \ // // SOONG_CONFIG_acme_board := soc_a // SOONG_CONFIG_acme_feature := true // SOONG_CONFIG_acme_width := 200 // // Then libacme_foo would build with cflags "-DGENERIC -DSOC_A -DFEATURE". func soongConfigModuleTypeFactory() Module { module := &soongConfigModuleTypeModule{} module.AddProperties(&module.properties) AddLoadHook(module, func(ctx LoadHookContext) { // A soong_config_module_type module should implicitly import itself. importModuleTypes(ctx, ctx.BlueprintsFile(), module.properties.Name) }) initAndroidModuleBase(module) return module } func (m *soongConfigModuleTypeModule) Name() string { return m.properties.Name } func (*soongConfigModuleTypeModule) Nameless() {} func (*soongConfigModuleTypeModule) GenerateAndroidBuildActions(ctx ModuleContext) {} type soongConfigStringVariableDummyModule struct { ModuleBase properties soongconfig.VariableProperties stringProperties soongconfig.StringVariableProperties } type soongConfigBoolVariableDummyModule struct { ModuleBase properties soongconfig.VariableProperties } // soong_config_string_variable defines a variable and a set of possible string values for use // in a soong_config_module_type definition. func soongConfigStringVariableDummyFactory() Module { module := &soongConfigStringVariableDummyModule{} module.AddProperties(&module.properties, &module.stringProperties) initAndroidModuleBase(module) return module } // soong_config_string_variable defines a variable with true or false values for use // in a soong_config_module_type definition. func soongConfigBoolVariableDummyFactory() Module { module := &soongConfigBoolVariableDummyModule{} module.AddProperties(&module.properties) initAndroidModuleBase(module) return module } func (m *soongConfigStringVariableDummyModule) Name() string { return m.properties.Name } func (*soongConfigStringVariableDummyModule) Nameless() {} func (*soongConfigStringVariableDummyModule) GenerateAndroidBuildActions(ctx ModuleContext) {} func (m *soongConfigBoolVariableDummyModule) Name() string { return m.properties.Name } func (*soongConfigBoolVariableDummyModule) Nameless() {} func (*soongConfigBoolVariableDummyModule) GenerateAndroidBuildActions(ctx ModuleContext) {} func importModuleTypes(ctx LoadHookContext, from string, moduleTypes ...string) { from = filepath.Clean(from) if filepath.Ext(from) != ".bp" { ctx.PropertyErrorf("from", "%q must be a file with extension .bp", from) return } if strings.HasPrefix(from, "../") { ctx.PropertyErrorf("from", "%q must not use ../ to escape the source tree", from) return } moduleTypeDefinitions := loadSoongConfigModuleTypeDefinition(ctx, from) if moduleTypeDefinitions == nil { return } for _, moduleType := range moduleTypes { if factory, ok := moduleTypeDefinitions[moduleType]; ok { ctx.registerScopedModuleType(moduleType, factory) } else { ctx.PropertyErrorf("module_types", "module type %q not defined in %q", moduleType, from) } } } // loadSoongConfigModuleTypeDefinition loads module types from an Android.bp file. It caches the // result so each file is only parsed once. func loadSoongConfigModuleTypeDefinition(ctx LoadHookContext, from string) map[string]blueprint.ModuleFactory { type onceKeyType string key := NewCustomOnceKey(onceKeyType(filepath.Clean(from))) reportErrors := func(ctx LoadHookContext, filename string, errs ...error) { for _, err := range errs { if parseErr, ok := err.(*parser.ParseError); ok { ctx.Errorf(parseErr.Pos, "%s", parseErr.Err) } else { ctx.Errorf(scanner.Position{Filename: filename}, "%s", err) } } } return ctx.Config().Once(key, func() interface{} { ctx.AddNinjaFileDeps(from) r, err := ctx.Config().fs.Open(from) if err != nil { ctx.PropertyErrorf("from", "failed to open %q: %s", from, err) return (map[string]blueprint.ModuleFactory)(nil) } mtDef, errs := soongconfig.Parse(r, from) if len(errs) > 0 { reportErrors(ctx, from, errs...) return (map[string]blueprint.ModuleFactory)(nil) } globalModuleTypes := ctx.moduleFactories() factories := make(map[string]blueprint.ModuleFactory) for name, moduleType := range mtDef.ModuleTypes { factory := globalModuleTypes[moduleType.BaseModuleType] if factory != nil { factories[name] = soongConfigModuleFactory(factory, moduleType) } else { reportErrors(ctx, from, fmt.Errorf("missing global module type factory for %q", moduleType.BaseModuleType)) } } if ctx.Failed() { return (map[string]blueprint.ModuleFactory)(nil) } return factories }).(map[string]blueprint.ModuleFactory) } // soongConfigModuleFactory takes an existing soongConfigModuleFactory and a ModuleType and returns // a new soongConfigModuleFactory that wraps the existing soongConfigModuleFactory and adds conditional on Soong config // variables. func soongConfigModuleFactory(factory blueprint.ModuleFactory, moduleType *soongconfig.ModuleType) blueprint.ModuleFactory { conditionalFactoryProps := soongconfig.CreateProperties(factory, moduleType) if conditionalFactoryProps.IsValid() { return func() (blueprint.Module, []interface{}) { module, props := factory() conditionalProps := proptools.CloneEmptyProperties(conditionalFactoryProps) props = append(props, conditionalProps.Interface()) AddLoadHook(module, func(ctx LoadHookContext) { config := ctx.Config().VendorConfig(moduleType.ConfigNamespace) newProps, err := soongconfig.PropertiesToApply(moduleType, conditionalProps, config) if err != nil { ctx.ModuleErrorf("%s", err) return } for _, ps := range newProps { ctx.AppendProperties(ps) } }) return module, props } } else { return factory } }
package qbuilder import ( "reflect" "testing" "github.com/spacycoder/cosmosdb-go-sdk/cosmos" ) func TestQueryBuilder(t *testing.T) { qb := New() params := []cosmos.P{{"@SOMETHING", 20}, {"@NAME", "Lars"}} res := qb.Select("*").From("root").And("root.age > @SOMETHING").And("root.name = @NAME").Params(params...).Build() if res.Query != "SELECT * FROM root WHERE root.age > @SOMETHING AND root.name = @NAME" { t.Errorf("Invalid query string result: %s, should be: %s", res.Query, "SELECT * FROM root WHERE root.age > @SOMETHING AND root.name = @NAME") } if len(res.Parameters) != len(params) { t.Errorf("Invalid count: %d, should be: %d", len(res.Parameters), len(params)) } for i, p := range res.Parameters { if p.Name != params[i].Name { t.Errorf("Names do not match: %s vs %s", p.Name, params[i].Name) } if !reflect.DeepEqual(p.Value, params[i].Value) { t.Errorf("Values are not equal") } } qb = New() params = []cosmos.P{{"@SOMETHING", 20}, {"@NAME", "Lars"}} res = qb.Select("*").From("root").And("root.age > @SOMETHING").Or("root.name = @NAME").Params(params...).Build() if res.Query != "SELECT * FROM root WHERE root.age > @SOMETHING OR root.name = @NAME" { t.Errorf("Invalid query string result: %s, should be: %s", res.Query, "SELECT * FROM root WHERE root.age > @SOMETHING OR root.name = @NAME") } if len(res.Parameters) != len(params) { t.Errorf("Invalid count: %d, should be: %d", len(res.Parameters), len(params)) } for i, p := range res.Parameters { if p.Name != params[i].Name { t.Errorf("Names do not match: %s vs %s", p.Name, params[i].Name) } if !reflect.DeepEqual(p.Value, params[i].Value) { t.Errorf("Values are not equal") } } }
package main import ( "net/http" "net/http/httptest" "testing" "github.com/kaustavha/gravity-interview/src/authenticator" ) // Testing philosophy: Test the top level to-be-consumed API, and test for the different cases handled by internals func TestAuthMiddleware_withAuthcheckHandler_Success(t *testing.T) { // setup db, err := createDBConn() a, err := authenticator.NewAuthenticator( accountID, email, hashedPass, maxUsers, []byte(signingKey), maxUsersUpgraded, defaultCookieName, db, ) m := GetNewMiddlewareService(a) as := GetNewAuthService(a, defaultCookieName) // end setup req, err := http.NewRequest("GET", "/api/authcheck", nil) rr := httptest.NewRecorder() handler := http.HandlerFunc(m.applyMiddlewares(as.AuthcheckHandler)) handler.ServeHTTP(rr, req) if err != nil { t.Errorf("Did not expect an error response : err: %v ; code: %v", err, rr.Code) } if rr.Code != http.StatusUnauthorized { t.Errorf("Expected: %v but got %v", http.StatusUnauthorized, rr.Code) } }
package hostsfile import ( "fmt" "github.com/AdguardTeam/golibs/errors" ) // ErrEmptyLine is returned when the hosts file line is empty or contains only // comments and spaces. const ErrEmptyLine errors.Error = "line is empty" // ErrNoHosts is returned when the record doesn't contain any delimiters, but // the IP address is valid. const ErrNoHosts errors.Error = "no hostnames" // LineError is an error about a specific line in the hosts file. type LineError struct { // err is the original error. err error // Line is the line number in the hosts file source. Line int } // type check var _ error = (*LineError)(nil) // Error implements the [error] interface for *LineErr. func (e *LineError) Error() (msg string) { return fmt.Sprintf("line %d: %s", e.Line, e.err) } // type check var _ errors.Wrapper = (*LineError)(nil) // Unwrap implements the [errors.Wrapper] interface for *LineErr. func (e *LineError) Unwrap() (unwraped error) { return e.err }
package perf import ( "encoding/json" "fmt" "io/ioutil" "net/http" "strings" "time" "github.com/ghodss/yaml" "github.com/gofrs/uuid" log "github.com/sirupsen/logrus" "github.com/layer5io/meshery/mesheryctl/internal/cli/root/config" "github.com/layer5io/meshery/mesheryctl/internal/cli/root/constants" "github.com/layer5io/meshery/mesheryctl/pkg/utils" "github.com/layer5io/meshery/models" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" ) type resultStruct struct { Name string StartTime *time.Time LatenciesMs *models.LatenciesMs QPS int URL string UserID *uuid.UUID Duration string MesheryID *uuid.UUID LoadGenerator string } var ( resultPage int ) var resultCmd = &cobra.Command{ Use: "result profile-name", Short: "List performance test results", Long: `List all the available test results of a performance profile`, Args: cobra.MinimumNArgs(1), Example: ` // List Test results (maximum 25 results) mesheryctl perf result saturday profile // View other set of performance results with --page (maximum 25 results) mesheryctl perf result saturday profile --page 2 // View single performance result with detailed information mesheryctl perf result saturday profile --view `, RunE: func(cmd *cobra.Command, args []string) error { // used for searching performance profile var searchString, profileID string // setting up for error formatting cmdUsed = "result" mctlCfg, err := config.GetMesheryCtl(viper.GetViper()) if err != nil { return ErrMesheryConfig(err) } profileURL := mctlCfg.GetBaseMesheryURL() + "/api/user/performance/profiles" // set default tokenpath for command. if tokenPath == "" { tokenPath = constants.GetCurrentAuthToken() } // Merge args to get result-name searchString = strings.Join(args, "%20") data, _, _, err := fetchPerformanceProfiles(profileURL, searchString) if err != nil { return err } if len(data) == 0 { log.Info("No Performance Profiles found with the given name") return nil } if len(data) == 1 { // found only one profile with matching name profileID = data[0][1] } else { // user prompt to select profile selectedProfileIndex, err := userPrompt("profile", "Found multiple profiles with given name, select a profile", data) if err != nil { return err } // ids got shifted with 1 in userPrompt() profileID = data[selectedProfileIndex][2] } resultURL := mctlCfg.GetBaseMesheryURL() + "/api/user/performance/profiles/" + profileID + "/results" data, expandedData, body, err := fetchPerformanceProfileResults(resultURL) if err != nil { return err } if len(data) == 0 { log.Info("No Test Results to display") return nil } if outputFormatFlag != "" { var tempStruct *models.PerformanceResultsAPIResponse err = json.Unmarshal(body, &tempStruct) if err != nil { return ErrFailUnmarshal(err) } body, _ = json.Marshal(tempStruct.Results) if outputFormatFlag == "yaml" { body, _ = yaml.JSONToYAML(body) } else if outputFormatFlag != "json" { return ErrInvalidOutputChoice() } log.Info(string(body)) } else if !expand { utils.PrintToTable([]string{"NAME", "MESH", "QPS", "DURATION", "P50", "P99.9", "START-TIME"}, data) } else { // if data consists only one profile, directly print profile index := 0 if len(data) > 1 { index, err = userPrompt("result", "Select Performance-test result to exapand", data) if err != nil { return err } } a := expandedData[index] fmt.Printf("Name: %v\n", a.Name) fmt.Printf("UserID: %s\n", a.UserID.String()) fmt.Printf("Endpoint: %v\n", a.URL) fmt.Printf("QPS: %v\n", a.QPS) fmt.Printf("Test run duration: %v\n", a.Duration) fmt.Printf("Latencies _ms: Avg: %v, Max: %v, Min: %v, P50: %v, P90: %v, P99: %v\n", a.LatenciesMs.Average, a.LatenciesMs.Max, a.LatenciesMs.Min, a.LatenciesMs.P50, a.LatenciesMs.P90, a.LatenciesMs.P99) fmt.Printf("Start Time: %v\n", fmt.Sprintf("%d-%d-%d %d:%d:%d", int(a.StartTime.Month()), a.StartTime.Day(), a.StartTime.Year(), a.StartTime.Hour(), a.StartTime.Minute(), a.StartTime.Second())) fmt.Printf("Meshery ID: %v\n", a.MesheryID.String()) fmt.Printf("Load Generator: %v\n", a.LoadGenerator) } return nil }, } // Fetch results for a specific profile func fetchPerformanceProfileResults(url string) ([][]string, []resultStruct, []byte, error) { client := &http.Client{} var response *models.PerformanceResultsAPIResponse tempURL := fmt.Sprintf("%s?pageSize=%d&page=%d", url, pageSize, resultPage-1) req, _ := http.NewRequest("GET", tempURL, nil) err := utils.AddAuthDetails(req, tokenPath) if err != nil { return nil, nil, nil, ErrAttachAuthToken(err) } resp, err := client.Do(req) if err != nil { return nil, nil, nil, ErrFailRequest(err) } // failsafe for no authentication if utils.ContentTypeIsHTML(resp) { return nil, nil, nil, ErrUnauthenticated() } // failsafe for bad api call if resp.StatusCode != 200 { return nil, nil, nil, ErrFailReqStatus(resp.StatusCode) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, nil, nil, errors.Wrap(err, utils.PerfError("failed to read response body")) } err = json.Unmarshal(body, &response) if err != nil { return nil, nil, nil, ErrFailUnmarshal(err) } var data [][]string var expendedData []resultStruct // append data for single profile for _, result := range response.Results { serviceMesh := "No Mesh" p50 := "" p99_9 := "" var P50, P90, P99 float64 if result.Mesh != "" { serviceMesh = result.Mesh } qps := fmt.Sprintf("%d", int(result.RunnerResults.QPS)) duration := result.RunnerResults.RequestedDuration if len(result.RunnerResults.DurationHistogram.Percentiles) > 0 { p50 = fmt.Sprintf("%.8f", result.RunnerResults.DurationHistogram.Percentiles[0].Value) p99_9 = fmt.Sprintf("%.8f", result.RunnerResults.DurationHistogram.Percentiles[len(result.RunnerResults.DurationHistogram.Percentiles)-1].Value) } startTime := result.TestStartTime.Format("2006-01-02 15:04:05") data = append(data, []string{result.Name, serviceMesh, qps, duration, p50, p99_9, startTime}) if len(result.RunnerResults.DurationHistogram.Percentiles) > 3 { P50 = result.RunnerResults.DurationHistogram.Percentiles[0].Value P90 = result.RunnerResults.DurationHistogram.Percentiles[2].Value P99 = result.RunnerResults.DurationHistogram.Percentiles[3].Value } // append data for extended output a := resultStruct{ Name: result.Name, UserID: result.UserID, URL: result.RunnerResults.URL, QPS: int(result.RunnerResults.QPS), Duration: result.RunnerResults.RequestedDuration, LatenciesMs: &models.LatenciesMs{ Average: result.RunnerResults.DurationHistogram.Average, Max: result.RunnerResults.DurationHistogram.Max, Min: result.RunnerResults.DurationHistogram.Min, P50: P50, P90: P90, P99: P99, }, StartTime: result.TestStartTime, MesheryID: result.MesheryID, LoadGenerator: result.RunnerResults.LoadGenerator, } expendedData = append(expendedData, a) } return data, expendedData, body, nil } func init() { resultCmd.Flags().BoolVarP(&expand, "view", "", false, "(optional) View single performance results with more info") resultCmd.Flags().IntVarP(&resultPage, "page", "p", 1, "(optional) List next set of performance results with --page (default = 1)") }
// Copyright 2018 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ddl import ( "bytes" "context" "encoding/hex" "fmt" "math" "strconv" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/config" sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/label" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/format" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/opcode" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" tidbutil "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/mock" decoder "github.com/pingcap/tidb/util/rowDecoder" "github.com/pingcap/tidb/util/slice" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/stringutil" "github.com/tikv/client-go/v2/tikv" kvutil "github.com/tikv/client-go/v2/util" "go.uber.org/zap" ) const ( partitionMaxValue = "MAXVALUE" ) func checkAddPartition(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.PartitionInfo, []model.PartitionDefinition, error) { schemaID := job.SchemaID tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, errors.Trace(err) } partInfo := &model.PartitionInfo{} err = job.DecodeArgs(&partInfo) if err != nil { job.State = model.JobStateCancelled return nil, nil, nil, errors.Trace(err) } if len(tblInfo.Partition.AddingDefinitions) > 0 { return tblInfo, partInfo, tblInfo.Partition.AddingDefinitions, nil } return tblInfo, partInfo, []model.PartitionDefinition{}, nil } // TODO: Move this into reorganize partition! func (w *worker) onAddTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { // Handle the rolling back job if job.IsRollingback() { ver, err := w.onDropTablePartition(d, t, job) if err != nil { return ver, errors.Trace(err) } return ver, nil } // notice: addingDefinitions is empty when job is in state model.StateNone tblInfo, partInfo, addingDefinitions, err := checkAddPartition(t, job) if err != nil { return ver, err } // In order to skip maintaining the state check in partitionDefinition, TiDB use addingDefinition instead of state field. // So here using `job.SchemaState` to judge what the stage of this job is. switch job.SchemaState { case model.StateNone: // job.SchemaState == model.StateNone means the job is in the initial state of add partition. // Here should use partInfo from job directly and do some check action. err = checkAddPartitionTooManyPartitions(uint64(len(tblInfo.Partition.Definitions) + len(partInfo.Definitions))) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } err = checkAddPartitionValue(tblInfo, partInfo) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } err = checkAddPartitionNameUnique(tblInfo, partInfo) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } // move the adding definition into tableInfo. updateAddingPartitionInfo(partInfo, tblInfo) ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } // modify placement settings for _, def := range tblInfo.Partition.AddingDefinitions { if _, err = checkPlacementPolicyRefValidAndCanNonValidJob(t, job, def.PlacementPolicyRef); err != nil { return ver, errors.Trace(err) } } if tblInfo.TiFlashReplica != nil { // Must set placement rule, and make sure it succeeds. if err := infosync.ConfigureTiFlashPDForPartitions(true, &tblInfo.Partition.AddingDefinitions, tblInfo.TiFlashReplica.Count, &tblInfo.TiFlashReplica.LocationLabels, tblInfo.ID); err != nil { logutil.BgLogger().Error("ConfigureTiFlashPDForPartitions fails", zap.Error(err)) return ver, errors.Trace(err) } } bundles, err := alterTablePartitionBundles(t, tblInfo, tblInfo.Partition.AddingDefinitions) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } if err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles); err != nil { job.State = model.JobStateCancelled return ver, errors.Wrapf(err, "failed to notify PD the placement rules") } ids := getIDs([]*model.TableInfo{tblInfo}) for _, p := range tblInfo.Partition.AddingDefinitions { ids = append(ids, p.ID) } if _, err := alterTableLabelRule(job.SchemaName, tblInfo, ids); err != nil { job.State = model.JobStateCancelled return ver, err } // none -> replica only job.SchemaState = model.StateReplicaOnly case model.StateReplicaOnly: // replica only -> public failpoint.Inject("sleepBeforeReplicaOnly", func(val failpoint.Value) { sleepSecond := val.(int) time.Sleep(time.Duration(sleepSecond) * time.Second) }) // Here need do some tiflash replica complement check. // TODO: If a table is with no TiFlashReplica or it is not available, the replica-only state can be eliminated. if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available { // For available state, the new added partition should wait it's replica to // be finished. Otherwise the query to this partition will be blocked. needRetry, err := checkPartitionReplica(tblInfo.TiFlashReplica.Count, addingDefinitions, d) if err != nil { return convertAddTablePartitionJob2RollbackJob(d, t, job, err, tblInfo) } if needRetry { // The new added partition hasn't been replicated. // Do nothing to the job this time, wait next worker round. time.Sleep(tiflashCheckTiDBHTTPAPIHalfInterval) // Set the error here which will lead this job exit when it's retry times beyond the limitation. return ver, errors.Errorf("[ddl] add partition wait for tiflash replica to complete") } } // When TiFlash Replica is ready, we must move them into `AvailablePartitionIDs`. if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available { for _, d := range partInfo.Definitions { tblInfo.TiFlashReplica.AvailablePartitionIDs = append(tblInfo.TiFlashReplica.AvailablePartitionIDs, d.ID) err = infosync.UpdateTiFlashProgressCache(d.ID, 1) if err != nil { // just print log, progress will be updated in `refreshTiFlashTicker` logutil.BgLogger().Error("update tiflash sync progress cache failed", zap.Error(err), zap.Int64("tableID", tblInfo.ID), zap.Int64("partitionID", d.ID), ) } } } // For normal and replica finished table, move the `addingDefinitions` into `Definitions`. updatePartitionInfo(tblInfo) preSplitAndScatter(w.sess.Context, d.store, tblInfo, addingDefinitions) ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } // Finish this job. job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddTablePartition, TableInfo: tblInfo, PartInfo: partInfo}) default: err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("partition", job.SchemaState) } return ver, errors.Trace(err) } // alterTableLabelRule updates Label Rules if they exists // returns true if changed. func alterTableLabelRule(schemaName string, meta *model.TableInfo, ids []int64) (bool, error) { tableRuleID := fmt.Sprintf(label.TableIDFormat, label.IDPrefix, schemaName, meta.Name.L) oldRule, err := infosync.GetLabelRules(context.TODO(), []string{tableRuleID}) if err != nil { return false, errors.Trace(err) } if len(oldRule) == 0 { return false, nil } r, ok := oldRule[tableRuleID] if ok { rule := r.Reset(schemaName, meta.Name.L, "", ids...) err = infosync.PutLabelRule(context.TODO(), rule) if err != nil { return false, errors.Wrapf(err, "failed to notify PD label rule") } return true, nil } return false, nil } func alterTablePartitionBundles(t *meta.Meta, tblInfo *model.TableInfo, addingDefinitions []model.PartitionDefinition) ([]*placement.Bundle, error) { var bundles []*placement.Bundle // tblInfo do not include added partitions, so we should add them first tblInfo = tblInfo.Clone() p := *tblInfo.Partition p.Definitions = append([]model.PartitionDefinition{}, p.Definitions...) p.Definitions = append(tblInfo.Partition.Definitions, addingDefinitions...) tblInfo.Partition = &p // bundle for table should be recomputed because it includes some default configs for partitions tblBundle, err := placement.NewTableBundle(t, tblInfo) if err != nil { return nil, errors.Trace(err) } if tblBundle != nil { bundles = append(bundles, tblBundle) } partitionBundles, err := placement.NewPartitionListBundles(t, addingDefinitions) if err != nil { return nil, errors.Trace(err) } bundles = append(bundles, partitionBundles...) return bundles, nil } // When drop/truncate a partition, we should still keep the dropped partition's placement settings to avoid unnecessary region schedules. // When a partition is not configured with a placement policy directly, its rule is in the table's placement group which will be deleted after // partition truncated/dropped. So it is necessary to create a standalone placement group with partition id after it. func droppedPartitionBundles(t *meta.Meta, tblInfo *model.TableInfo, dropPartitions []model.PartitionDefinition) ([]*placement.Bundle, error) { partitions := make([]model.PartitionDefinition, 0, len(dropPartitions)) for _, def := range dropPartitions { def = def.Clone() if def.PlacementPolicyRef == nil { def.PlacementPolicyRef = tblInfo.PlacementPolicyRef } if def.PlacementPolicyRef != nil { partitions = append(partitions, def) } } return placement.NewPartitionListBundles(t, partitions) } // updatePartitionInfo merge `addingDefinitions` into `Definitions` in the tableInfo. func updatePartitionInfo(tblInfo *model.TableInfo) { parInfo := &model.PartitionInfo{} oldDefs, newDefs := tblInfo.Partition.Definitions, tblInfo.Partition.AddingDefinitions parInfo.Definitions = make([]model.PartitionDefinition, 0, len(newDefs)+len(oldDefs)) parInfo.Definitions = append(parInfo.Definitions, oldDefs...) parInfo.Definitions = append(parInfo.Definitions, newDefs...) tblInfo.Partition.Definitions = parInfo.Definitions tblInfo.Partition.AddingDefinitions = nil } // updateAddingPartitionInfo write adding partitions into `addingDefinitions` field in the tableInfo. func updateAddingPartitionInfo(partitionInfo *model.PartitionInfo, tblInfo *model.TableInfo) { newDefs := partitionInfo.Definitions tblInfo.Partition.AddingDefinitions = make([]model.PartitionDefinition, 0, len(newDefs)) tblInfo.Partition.AddingDefinitions = append(tblInfo.Partition.AddingDefinitions, newDefs...) } // rollbackAddingPartitionInfo remove the `addingDefinitions` in the tableInfo. func rollbackAddingPartitionInfo(tblInfo *model.TableInfo) ([]int64, []string, []*placement.Bundle) { physicalTableIDs := make([]int64, 0, len(tblInfo.Partition.AddingDefinitions)) partNames := make([]string, 0, len(tblInfo.Partition.AddingDefinitions)) rollbackBundles := make([]*placement.Bundle, 0, len(tblInfo.Partition.AddingDefinitions)) for _, one := range tblInfo.Partition.AddingDefinitions { physicalTableIDs = append(physicalTableIDs, one.ID) partNames = append(partNames, one.Name.L) if one.PlacementPolicyRef != nil { rollbackBundles = append(rollbackBundles, placement.NewBundle(one.ID)) } } tblInfo.Partition.AddingDefinitions = nil return physicalTableIDs, partNames, rollbackBundles } // Check if current table already contains DEFAULT list partition func checkAddListPartitions(tblInfo *model.TableInfo) error { for i := range tblInfo.Partition.Definitions { for j := range tblInfo.Partition.Definitions[i].InValues { for _, val := range tblInfo.Partition.Definitions[i].InValues[j] { if val == "DEFAULT" { // should already be normalized return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("ADD List partition, already contains DEFAULT partition. Please use REORGANIZE PARTITION instead") } } } } return nil } // checkAddPartitionValue check add Partition Values, // For Range: values less than value must be strictly increasing for each partition. // For List: if a Default partition exists, // // no ADD partition can be allowed // (needs reorganize partition instead). func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) error { switch meta.Partition.Type { case model.PartitionTypeRange: if len(meta.Partition.Columns) == 0 { newDefs, oldDefs := part.Definitions, meta.Partition.Definitions rangeValue := oldDefs[len(oldDefs)-1].LessThan[0] if strings.EqualFold(rangeValue, "MAXVALUE") { return errors.Trace(dbterror.ErrPartitionMaxvalue) } currentRangeValue, err := strconv.Atoi(rangeValue) if err != nil { return errors.Trace(err) } for i := 0; i < len(newDefs); i++ { ifMaxvalue := strings.EqualFold(newDefs[i].LessThan[0], "MAXVALUE") if ifMaxvalue && i == len(newDefs)-1 { return nil } else if ifMaxvalue && i != len(newDefs)-1 { return errors.Trace(dbterror.ErrPartitionMaxvalue) } nextRangeValue, err := strconv.Atoi(newDefs[i].LessThan[0]) if err != nil { return errors.Trace(err) } if nextRangeValue <= currentRangeValue { return errors.Trace(dbterror.ErrRangeNotIncreasing) } currentRangeValue = nextRangeValue } } case model.PartitionTypeList: err := checkAddListPartitions(meta) if err != nil { return err } } return nil } func checkPartitionReplica(replicaCount uint64, addingDefinitions []model.PartitionDefinition, d *ddlCtx) (needWait bool, err error) { failpoint.Inject("mockWaitTiFlashReplica", func(val failpoint.Value) { if val.(bool) { failpoint.Return(true, nil) } }) failpoint.Inject("mockWaitTiFlashReplicaOK", func(val failpoint.Value) { if val.(bool) { failpoint.Return(false, nil) } }) ctx := context.Background() pdCli := d.store.(tikv.Storage).GetRegionCache().PDClient() stores, err := pdCli.GetAllStores(ctx) if err != nil { return needWait, errors.Trace(err) } // Check whether stores have `count` tiflash engines. tiFlashStoreCount := uint64(0) for _, store := range stores { if storeHasEngineTiFlashLabel(store) { tiFlashStoreCount++ } } if replicaCount > tiFlashStoreCount { return false, errors.Errorf("[ddl] the tiflash replica count: %d should be less than the total tiflash server count: %d", replicaCount, tiFlashStoreCount) } for _, pd := range addingDefinitions { startKey, endKey := tablecodec.GetTableHandleKeyRange(pd.ID) regions, err := pdCli.ScanRegions(ctx, startKey, endKey, -1) if err != nil { return needWait, errors.Trace(err) } // For every region in the partition, if it has some corresponding peers and // no pending peers, that means the replication has completed. for _, region := range regions { regionState, err := pdCli.GetRegionByID(ctx, region.Meta.Id) if err != nil { return needWait, errors.Trace(err) } tiflashPeerAtLeastOne := checkTiFlashPeerStoreAtLeastOne(stores, regionState.Meta.Peers) failpoint.Inject("ForceTiflashNotAvailable", func(v failpoint.Value) { tiflashPeerAtLeastOne = v.(bool) }) // It's unnecessary to wait all tiflash peer to be replicated. // Here only make sure that tiflash peer count > 0 (at least one). if tiflashPeerAtLeastOne { continue } needWait = true logutil.BgLogger().Info("partition replicas check failed in replica-only DDL state", zap.String("category", "ddl"), zap.Int64("pID", pd.ID), zap.Uint64("wait region ID", region.Meta.Id), zap.Bool("tiflash peer at least one", tiflashPeerAtLeastOne), zap.Time("check time", time.Now())) return needWait, nil } } logutil.BgLogger().Info("partition replicas check ok in replica-only DDL state", zap.String("category", "ddl")) return needWait, nil } func checkTiFlashPeerStoreAtLeastOne(stores []*metapb.Store, peers []*metapb.Peer) bool { for _, peer := range peers { for _, store := range stores { if peer.StoreId == store.Id && storeHasEngineTiFlashLabel(store) { return true } } } return false } func storeHasEngineTiFlashLabel(store *metapb.Store) bool { for _, label := range store.Labels { if label.Key == placement.EngineLabelKey && label.Value == placement.EngineLabelTiFlash { return true } } return false } func checkListPartitions(defs []*ast.PartitionDefinition) error { for _, def := range defs { _, ok := def.Clause.(*ast.PartitionDefinitionClauseIn) if !ok { switch def.Clause.(type) { case *ast.PartitionDefinitionClauseLessThan: return ast.ErrPartitionWrongValues.GenWithStackByArgs("RANGE", "LESS THAN") case *ast.PartitionDefinitionClauseNone: return ast.ErrPartitionRequiresValues.GenWithStackByArgs("LIST", "IN") default: return dbterror.ErrUnsupportedCreatePartition.GenWithStack("Only VALUES IN () is supported for LIST partitioning") } } } return nil } // buildTablePartitionInfo builds partition info and checks for some errors. func buildTablePartitionInfo(ctx sessionctx.Context, s *ast.PartitionOptions, tbInfo *model.TableInfo) error { if s == nil { return nil } if strings.EqualFold(ctx.GetSessionVars().EnableTablePartition, "OFF") { ctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrTablePartitionDisabled) return nil } var enable bool switch s.Tp { case model.PartitionTypeRange: enable = true case model.PartitionTypeList: // Partition by list is enabled only when tidb_enable_list_partition is 'ON'. enable = ctx.GetSessionVars().EnableListTablePartition if enable { err := checkListPartitions(s.Definitions) if err != nil { return err } } case model.PartitionTypeHash, model.PartitionTypeKey: // Partition by hash and key is enabled by default. if s.Sub != nil { // Subpartitioning only allowed with Range or List return ast.ErrSubpartition } // Note that linear hash is simply ignored, and creates non-linear hash/key. if s.Linear { ctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrUnsupportedCreatePartition.GenWithStack(fmt.Sprintf("LINEAR %s is not supported, using non-linear %s instead", s.Tp.String(), s.Tp.String()))) } if s.Tp == model.PartitionTypeHash || len(s.ColumnNames) != 0 { enable = true } } if !enable { ctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrUnsupportedCreatePartition.GenWithStack(fmt.Sprintf("Unsupported partition type %v, treat as normal table", s.Tp))) return nil } if s.Sub != nil { ctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrUnsupportedCreatePartition.GenWithStack(fmt.Sprintf("Unsupported subpartitioning, only using %v partitioning", s.Tp))) } pi := &model.PartitionInfo{ Type: s.Tp, Enable: enable, Num: s.Num, } tbInfo.Partition = pi if s.Expr != nil { if err := checkPartitionFuncValid(ctx, tbInfo, s.Expr); err != nil { return errors.Trace(err) } buf := new(bytes.Buffer) restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags|format.RestoreBracketAroundBinaryOperation, buf) if err := s.Expr.Restore(restoreCtx); err != nil { return err } pi.Expr = buf.String() } else if s.ColumnNames != nil { pi.Columns = make([]model.CIStr, 0, len(s.ColumnNames)) for _, cn := range s.ColumnNames { pi.Columns = append(pi.Columns, cn.Name) } if err := checkColumnsPartitionType(tbInfo); err != nil { return err } } err := generatePartitionDefinitionsFromInterval(ctx, s, tbInfo) if err != nil { return errors.Trace(err) } defs, err := buildPartitionDefinitionsInfo(ctx, s.Definitions, tbInfo, s.Num) if err != nil { return errors.Trace(err) } tbInfo.Partition.Definitions = defs if s.Interval != nil { // Syntactic sugar for INTERVAL partitioning // Generate the resulting CREATE TABLE as the query string query, ok := ctx.Value(sessionctx.QueryString).(string) if ok { sqlMode := ctx.GetSessionVars().SQLMode var buf bytes.Buffer AppendPartitionDefs(tbInfo.Partition, &buf, sqlMode) syntacticSugar := s.Interval.OriginalText() syntacticStart := s.Interval.OriginTextPosition() newQuery := query[:syntacticStart] + "(" + buf.String() + ")" + query[syntacticStart+len(syntacticSugar):] ctx.SetValue(sessionctx.QueryString, newQuery) } } partCols, err := getPartitionColSlices(ctx, tbInfo, s) if err != nil { return errors.Trace(err) } for _, index := range tbInfo.Indices { if index.Unique && !checkUniqueKeyIncludePartKey(partCols, index.Columns) { index.Global = config.GetGlobalConfig().EnableGlobalIndex } } return nil } func getPartitionColSlices(sctx sessionctx.Context, tblInfo *model.TableInfo, s *ast.PartitionOptions) (partCols stringSlice, err error) { if s.Expr != nil { extractCols := newPartitionExprChecker(sctx, tblInfo) s.Expr.Accept(extractCols) partColumns, err := extractCols.columns, extractCols.err if err != nil { return nil, err } partCols = columnInfoSlice(partColumns) } else if len(s.ColumnNames) > 0 { partCols = columnNameSlice(s.ColumnNames) } else { return nil, errors.Errorf("Table partition metadata not correct, neither partition expression or list of partition columns") } return partCols, nil } // getPartitionIntervalFromTable checks if a partitioned table matches a generated INTERVAL partitioned scheme // will return nil if error occurs, i.e. not an INTERVAL partitioned table func getPartitionIntervalFromTable(ctx sessionctx.Context, tbInfo *model.TableInfo) *ast.PartitionInterval { if tbInfo.Partition == nil || tbInfo.Partition.Type != model.PartitionTypeRange { return nil } if len(tbInfo.Partition.Columns) > 1 { // Multi-column RANGE COLUMNS is not supported with INTERVAL return nil } if len(tbInfo.Partition.Definitions) < 2 { // Must have at least two partitions to calculate an INTERVAL return nil } var ( interval ast.PartitionInterval startIdx = 0 endIdx = len(tbInfo.Partition.Definitions) - 1 isIntType = true minVal = "0" ) if len(tbInfo.Partition.Columns) > 0 { partCol := findColumnByName(tbInfo.Partition.Columns[0].L, tbInfo) if partCol.FieldType.EvalType() == types.ETInt { min := getLowerBoundInt(partCol) minVal = strconv.FormatInt(min, 10) } else if partCol.FieldType.EvalType() == types.ETDatetime { isIntType = false minVal = "0000-01-01" } else { // Only INT and Datetime columns are supported for INTERVAL partitioning return nil } } else { if !isPartExprUnsigned(tbInfo) { minVal = "-9223372036854775808" } } // Check if possible null partition firstPartLessThan := driver.UnwrapFromSingleQuotes(tbInfo.Partition.Definitions[0].LessThan[0]) if strings.EqualFold(firstPartLessThan, minVal) { interval.NullPart = true startIdx++ firstPartLessThan = driver.UnwrapFromSingleQuotes(tbInfo.Partition.Definitions[startIdx].LessThan[0]) } // flag if MAXVALUE partition lastPartLessThan := driver.UnwrapFromSingleQuotes(tbInfo.Partition.Definitions[endIdx].LessThan[0]) if strings.EqualFold(lastPartLessThan, partitionMaxValue) { interval.MaxValPart = true endIdx-- lastPartLessThan = driver.UnwrapFromSingleQuotes(tbInfo.Partition.Definitions[endIdx].LessThan[0]) } // Guess the interval if startIdx >= endIdx { // Must have at least two partitions to calculate an INTERVAL return nil } var firstExpr, lastExpr ast.ExprNode if isIntType { exprStr := fmt.Sprintf("((%s) - (%s)) DIV %d", lastPartLessThan, firstPartLessThan, endIdx-startIdx) exprs, err := expression.ParseSimpleExprsWithNames(ctx, exprStr, nil, nil) if err != nil { return nil } val, isNull, err := exprs[0].EvalInt(ctx, chunk.Row{}) if isNull || err != nil || val < 1 { // If NULL, error or interval < 1 then cannot be an INTERVAL partitioned table return nil } interval.IntervalExpr.Expr = ast.NewValueExpr(val, "", "") interval.IntervalExpr.TimeUnit = ast.TimeUnitInvalid firstExpr, err = astIntValueExprFromStr(firstPartLessThan, minVal == "0") if err != nil { return nil } interval.FirstRangeEnd = &firstExpr lastExpr, err = astIntValueExprFromStr(lastPartLessThan, minVal == "0") if err != nil { return nil } interval.LastRangeEnd = &lastExpr } else { // types.ETDatetime exprStr := fmt.Sprintf("TIMESTAMPDIFF(SECOND, '%s', '%s')", firstPartLessThan, lastPartLessThan) exprs, err := expression.ParseSimpleExprsWithNames(ctx, exprStr, nil, nil) if err != nil { return nil } val, isNull, err := exprs[0].EvalInt(ctx, chunk.Row{}) if isNull || err != nil || val < 1 { // If NULL, error or interval < 1 then cannot be an INTERVAL partitioned table return nil } // This will not find all matches > 28 days, since INTERVAL 1 MONTH can generate // 2022-01-31, 2022-02-28, 2022-03-31 etc. so we just assume that if there is a // diff >= 28 days, we will try with Month and not retry with something else... i := val / int64(endIdx-startIdx) if i < (28 * 24 * 60 * 60) { // Since it is not stored or displayed, non need to try Minute..Week! interval.IntervalExpr.Expr = ast.NewValueExpr(i, "", "") interval.IntervalExpr.TimeUnit = ast.TimeUnitSecond } else { // Since it is not stored or displayed, non need to try to match Quarter or Year! if (endIdx - startIdx) <= 3 { // in case February is in the range i = i / (28 * 24 * 60 * 60) } else { // This should be good for intervals up to 5 years i = i / (30 * 24 * 60 * 60) } interval.IntervalExpr.Expr = ast.NewValueExpr(i, "", "") interval.IntervalExpr.TimeUnit = ast.TimeUnitMonth } firstExpr = ast.NewValueExpr(firstPartLessThan, "", "") lastExpr = ast.NewValueExpr(lastPartLessThan, "", "") interval.FirstRangeEnd = &firstExpr interval.LastRangeEnd = &lastExpr } partitionMethod := ast.PartitionMethod{ Tp: model.PartitionTypeRange, Interval: &interval, } partOption := &ast.PartitionOptions{PartitionMethod: partitionMethod} // Generate the definitions from interval, first and last err := generatePartitionDefinitionsFromInterval(ctx, partOption, tbInfo) if err != nil { return nil } return &interval } // comparePartitionAstAndModel compares a generated *ast.PartitionOptions and a *model.PartitionInfo func comparePartitionAstAndModel(ctx sessionctx.Context, pAst *ast.PartitionOptions, pModel *model.PartitionInfo) error { a := pAst.Definitions m := pModel.Definitions if len(pAst.Definitions) != len(pModel.Definitions) { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning: number of partitions generated != partition defined (%d != %d)", len(a), len(m)) } for i := range pAst.Definitions { // Allow options to differ! (like Placement Rules) // Allow names to differ! // Check MAXVALUE maxVD := false if strings.EqualFold(m[i].LessThan[0], partitionMaxValue) { maxVD = true } generatedExpr := a[i].Clause.(*ast.PartitionDefinitionClauseLessThan).Exprs[0] _, maxVG := generatedExpr.(*ast.MaxValueExpr) if maxVG || maxVD { if maxVG && maxVD { continue } return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("INTERVAL partitioning: MAXVALUE clause defined for partition %s differs between generated and defined", m[i].Name.O)) } lessThan := m[i].LessThan[0] if len(lessThan) > 1 && lessThan[:1] == "'" && lessThan[len(lessThan)-1:] == "'" { lessThan = driver.UnwrapFromSingleQuotes(lessThan) } cmpExpr := &ast.BinaryOperationExpr{ Op: opcode.EQ, L: ast.NewValueExpr(lessThan, "", ""), R: generatedExpr, } cmp, err := expression.EvalAstExpr(ctx, cmpExpr) if err != nil { return err } if cmp.GetInt64() != 1 { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("INTERVAL partitioning: LESS THAN for partition %s differs between generated and defined", m[i].Name.O)) } } return nil } // comparePartitionDefinitions check if generated definitions are the same as the given ones // Allow names to differ // returns error in case of error or non-accepted difference func comparePartitionDefinitions(ctx sessionctx.Context, a, b []*ast.PartitionDefinition) error { if len(a) != len(b) { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("number of partitions generated != partition defined (%d != %d)", len(a), len(b)) } for i := range a { if len(b[i].Sub) > 0 { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("partition %s does have unsupported subpartitions", b[i].Name.O)) } // TODO: We could extend the syntax to allow for table options too, like: // CREATE TABLE t ... INTERVAL ... LAST PARTITION LESS THAN ('2015-01-01') PLACEMENT POLICY = 'cheapStorage' // ALTER TABLE t LAST PARTITION LESS THAN ('2022-01-01') PLACEMENT POLICY 'defaultStorage' // ALTER TABLE t LAST PARTITION LESS THAN ('2023-01-01') PLACEMENT POLICY 'fastStorage' if len(b[i].Options) > 0 { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("partition %s does have unsupported options", b[i].Name.O)) } lessThan, ok := b[i].Clause.(*ast.PartitionDefinitionClauseLessThan) if !ok { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("partition %s does not have the right type for LESS THAN", b[i].Name.O)) } definedExpr := lessThan.Exprs[0] generatedExpr := a[i].Clause.(*ast.PartitionDefinitionClauseLessThan).Exprs[0] _, maxVD := definedExpr.(*ast.MaxValueExpr) _, maxVG := generatedExpr.(*ast.MaxValueExpr) if maxVG || maxVD { if maxVG && maxVD { continue } return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("partition %s differs between generated and defined for MAXVALUE", b[i].Name.O)) } cmpExpr := &ast.BinaryOperationExpr{ Op: opcode.EQ, L: definedExpr, R: generatedExpr, } cmp, err := expression.EvalAstExpr(ctx, cmpExpr) if err != nil { return err } if cmp.GetInt64() != 1 { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(fmt.Sprintf("partition %s differs between generated and defined for expression", b[i].Name.O)) } } return nil } func getLowerBoundInt(partCols ...*model.ColumnInfo) int64 { ret := int64(0) for _, col := range partCols { if mysql.HasUnsignedFlag(col.FieldType.GetFlag()) { return 0 } ret = mathutil.Min(ret, types.IntergerSignedLowerBound(col.GetType())) } return ret } // generatePartitionDefinitionsFromInterval generates partition Definitions according to INTERVAL options on partOptions func generatePartitionDefinitionsFromInterval(ctx sessionctx.Context, partOptions *ast.PartitionOptions, tbInfo *model.TableInfo) error { if partOptions.Interval == nil { return nil } if tbInfo.Partition.Type != model.PartitionTypeRange { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, only allowed on RANGE partitioning") } if len(partOptions.ColumnNames) > 1 || len(tbInfo.Partition.Columns) > 1 { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, does not allow RANGE COLUMNS with more than one column") } var partCol *model.ColumnInfo if len(tbInfo.Partition.Columns) > 0 { partCol = findColumnByName(tbInfo.Partition.Columns[0].L, tbInfo) if partCol == nil { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, could not find any RANGE COLUMNS") } // Only support Datetime, date and INT column types for RANGE INTERVAL! switch partCol.FieldType.EvalType() { case types.ETInt, types.ETDatetime: default: return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, only supports Date, Datetime and INT types") } } // Allow given partition definitions, but check it later! definedPartDefs := partOptions.Definitions partOptions.Definitions = make([]*ast.PartitionDefinition, 0, 1) if partOptions.Interval.FirstRangeEnd == nil || partOptions.Interval.LastRangeEnd == nil { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, currently requires FIRST and LAST partitions to be defined") } switch partOptions.Interval.IntervalExpr.TimeUnit { case ast.TimeUnitInvalid, ast.TimeUnitYear, ast.TimeUnitQuarter, ast.TimeUnitMonth, ast.TimeUnitWeek, ast.TimeUnitDay, ast.TimeUnitHour, ast.TimeUnitDayMinute, ast.TimeUnitSecond: default: return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, only supports YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE and SECOND as time unit") } first := ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{*partOptions.Interval.FirstRangeEnd}, } last := ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{*partOptions.Interval.LastRangeEnd}, } if len(tbInfo.Partition.Columns) > 0 { colTypes := collectColumnsType(tbInfo) if len(colTypes) != len(tbInfo.Partition.Columns) { return dbterror.ErrWrongPartitionName.GenWithStack("partition column name cannot be found") } if _, err := checkAndGetColumnsTypeAndValuesMatch(ctx, colTypes, first.Exprs); err != nil { return err } if _, err := checkAndGetColumnsTypeAndValuesMatch(ctx, colTypes, last.Exprs); err != nil { return err } } else { if err := checkPartitionValuesIsInt(ctx, "FIRST PARTITION", first.Exprs, tbInfo); err != nil { return err } if err := checkPartitionValuesIsInt(ctx, "LAST PARTITION", last.Exprs, tbInfo); err != nil { return err } } if partOptions.Interval.NullPart { var partExpr ast.ExprNode if len(tbInfo.Partition.Columns) == 1 && partOptions.Interval.IntervalExpr.TimeUnit != ast.TimeUnitInvalid { // Notice compatibility with MySQL, keyword here is 'supported range' but MySQL seems to work from 0000-01-01 too // https://dev.mysql.com/doc/refman/8.0/en/datetime.html says range 1000-01-01 - 9999-12-31 // https://docs.pingcap.com/tidb/dev/data-type-date-and-time says The supported range is '0000-01-01' to '9999-12-31' // set LESS THAN to ZeroTime partExpr = ast.NewValueExpr("0000-01-01", "", "") } else { var min int64 if partCol != nil { min = getLowerBoundInt(partCol) } else { if !isPartExprUnsigned(tbInfo) { min = math.MinInt64 } } partExpr = ast.NewValueExpr(min, "", "") } partOptions.Definitions = append(partOptions.Definitions, &ast.PartitionDefinition{ Name: model.NewCIStr("P_NULL"), Clause: &ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{partExpr}, }, }) } err := GeneratePartDefsFromInterval(ctx, ast.AlterTablePartition, tbInfo, partOptions) if err != nil { return err } if partOptions.Interval.MaxValPart { partOptions.Definitions = append(partOptions.Definitions, &ast.PartitionDefinition{ Name: model.NewCIStr("P_MAXVALUE"), Clause: &ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{&ast.MaxValueExpr{}}, }, }) } if len(definedPartDefs) > 0 { err := comparePartitionDefinitions(ctx, partOptions.Definitions, definedPartDefs) if err != nil { return err } // Seems valid, so keep the defined so that the user defined names are kept etc. partOptions.Definitions = definedPartDefs } else if len(tbInfo.Partition.Definitions) > 0 { err := comparePartitionAstAndModel(ctx, partOptions, tbInfo.Partition) if err != nil { return err } } return nil } func astIntValueExprFromStr(s string, unsigned bool) (ast.ExprNode, error) { if unsigned { u, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } return ast.NewValueExpr(u, "", ""), nil } i, err := strconv.ParseInt(s, 10, 64) if err != nil { return nil, err } return ast.NewValueExpr(i, "", ""), nil } // GeneratePartDefsFromInterval generates range partitions from INTERVAL partitioning. // Handles // - CREATE TABLE: all partitions are generated // - ALTER TABLE FIRST PARTITION (expr): Drops all partitions before the partition matching the expr (i.e. sets that partition as the new first partition) // i.e. will return the partitions from old FIRST partition to (and including) new FIRST partition // - ALTER TABLE LAST PARTITION (expr): Creates new partitions from (excluding) old LAST partition to (including) new LAST partition // // partition definitions will be set on partitionOptions func GeneratePartDefsFromInterval(ctx sessionctx.Context, tp ast.AlterTableType, tbInfo *model.TableInfo, partitionOptions *ast.PartitionOptions) error { if partitionOptions == nil { return nil } var sb strings.Builder err := partitionOptions.Interval.IntervalExpr.Expr.Restore(format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)) if err != nil { return err } intervalString := driver.UnwrapFromSingleQuotes(sb.String()) if len(intervalString) < 1 || intervalString[:1] < "1" || intervalString[:1] > "9" { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL, should be a positive number") } var currVal types.Datum var startExpr, lastExpr, currExpr ast.ExprNode var timeUnit ast.TimeUnitType var partCol *model.ColumnInfo if len(tbInfo.Partition.Columns) == 1 { partCol = findColumnByName(tbInfo.Partition.Columns[0].L, tbInfo) if partCol == nil { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL COLUMNS partitioning: could not find partitioning column") } } timeUnit = partitionOptions.Interval.IntervalExpr.TimeUnit switch tp { case ast.AlterTablePartition: // CREATE TABLE startExpr = *partitionOptions.Interval.FirstRangeEnd lastExpr = *partitionOptions.Interval.LastRangeEnd case ast.AlterTableDropFirstPartition: startExpr = *partitionOptions.Interval.FirstRangeEnd lastExpr = partitionOptions.Expr case ast.AlterTableAddLastPartition: startExpr = *partitionOptions.Interval.LastRangeEnd lastExpr = partitionOptions.Expr default: return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning: Internal error during generating altered INTERVAL partitions, no known alter type") } lastVal, err := expression.EvalAstExpr(ctx, lastExpr) if err != nil { return err } var partDefs []*ast.PartitionDefinition if len(partitionOptions.Definitions) != 0 { partDefs = partitionOptions.Definitions } else { partDefs = make([]*ast.PartitionDefinition, 0, 1) } for i := 0; i < mysql.PartitionCountLimit; i++ { if i == 0 { currExpr = startExpr // TODO: adjust the startExpr and have an offset for interval to handle // Month/Quarters with start partition on day 28/29/30 if tp == ast.AlterTableAddLastPartition { // ALTER TABLE LAST PARTITION ... // Current LAST PARTITION/start already exists, skip to next partition continue } } else { currExpr = &ast.BinaryOperationExpr{ Op: opcode.Mul, L: ast.NewValueExpr(i, "", ""), R: partitionOptions.Interval.IntervalExpr.Expr, } if timeUnit == ast.TimeUnitInvalid { currExpr = &ast.BinaryOperationExpr{ Op: opcode.Plus, L: startExpr, R: currExpr, } } else { currExpr = &ast.FuncCallExpr{ FnName: model.NewCIStr("DATE_ADD"), Args: []ast.ExprNode{ startExpr, currExpr, &ast.TimeUnitExpr{Unit: timeUnit}, }, } } } currVal, err = expression.EvalAstExpr(ctx, currExpr) if err != nil { return err } cmp, err := currVal.Compare(ctx.GetSessionVars().StmtCtx, &lastVal, collate.GetBinaryCollator()) if err != nil { return err } if cmp > 0 { lastStr, err := lastVal.ToString() if err != nil { return err } sb.Reset() err = startExpr.Restore(format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)) if err != nil { return err } startStr := sb.String() errStr := fmt.Sprintf("INTERVAL: expr (%s) not matching FIRST + n INTERVALs (%s + n * %s", lastStr, startStr, intervalString) if timeUnit != ast.TimeUnitInvalid { errStr = errStr + " " + timeUnit.String() } return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs(errStr + ")") } valStr, err := currVal.ToString() if err != nil { return err } if len(valStr) == 0 || valStr[0:1] == "'" { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning: Error when generating partition values") } partName := "P_LT_" + valStr if timeUnit != ast.TimeUnitInvalid { currExpr = ast.NewValueExpr(valStr, "", "") } else { if valStr[:1] == "-" { currExpr = ast.NewValueExpr(currVal.GetInt64(), "", "") } else { currExpr = ast.NewValueExpr(currVal.GetUint64(), "", "") } } partDefs = append(partDefs, &ast.PartitionDefinition{ Name: model.NewCIStr(partName), Clause: &ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{currExpr}, }, }) if cmp == 0 { // Last partition! break } } if len(tbInfo.Partition.Definitions)+len(partDefs) > mysql.PartitionCountLimit { return errors.Trace(dbterror.ErrTooManyPartitions) } partitionOptions.Definitions = partDefs return nil } // buildPartitionDefinitionsInfo build partition definitions info without assign partition id. tbInfo will be constant func buildPartitionDefinitionsInfo(ctx sessionctx.Context, defs []*ast.PartitionDefinition, tbInfo *model.TableInfo, numParts uint64) (partitions []model.PartitionDefinition, err error) { switch tbInfo.Partition.Type { case model.PartitionTypeNone: if len(defs) != 1 { return nil, dbterror.ErrUnsupportedPartitionType } partitions = []model.PartitionDefinition{{Name: defs[0].Name}} if comment, set := defs[0].Comment(); set { partitions[0].Comment = comment } case model.PartitionTypeRange: partitions, err = buildRangePartitionDefinitions(ctx, defs, tbInfo) case model.PartitionTypeHash, model.PartitionTypeKey: partitions, err = buildHashPartitionDefinitions(ctx, defs, tbInfo, numParts) case model.PartitionTypeList: partitions, err = buildListPartitionDefinitions(ctx, defs, tbInfo) default: err = dbterror.ErrUnsupportedPartitionType } if err != nil { return nil, err } return partitions, nil } func setPartitionPlacementFromOptions(partition *model.PartitionDefinition, options []*ast.TableOption) error { // the partition inheritance of placement rules don't have to copy the placement elements to themselves. // For example: // t placement policy x (p1 placement policy y, p2) // p2 will share the same rule as table t does, but it won't copy the meta to itself. we will // append p2 range to the coverage of table t's rules. This mechanism is good for cascading change // when policy x is altered. for _, opt := range options { if opt.Tp == ast.TableOptionPlacementPolicy { partition.PlacementPolicyRef = &model.PolicyRefInfo{ Name: model.NewCIStr(opt.StrValue), } } } return nil } func isNonDefaultPartitionOptionsUsed(defs []model.PartitionDefinition) bool { for i := range defs { orgDef := defs[i] if orgDef.Name.O != fmt.Sprintf("p%d", i) { return true } if len(orgDef.Comment) > 0 { return true } if orgDef.PlacementPolicyRef != nil { return true } } return false } func buildHashPartitionDefinitions(_ sessionctx.Context, defs []*ast.PartitionDefinition, tbInfo *model.TableInfo, numParts uint64) ([]model.PartitionDefinition, error) { if err := checkAddPartitionTooManyPartitions(tbInfo.Partition.Num); err != nil { return nil, err } definitions := make([]model.PartitionDefinition, numParts) oldParts := uint64(len(tbInfo.Partition.Definitions)) for i := uint64(0); i < numParts; i++ { if i < oldParts { // Use the existing definitions def := tbInfo.Partition.Definitions[i] definitions[i].Name = def.Name definitions[i].Comment = def.Comment definitions[i].PlacementPolicyRef = def.PlacementPolicyRef } else if i < oldParts+uint64(len(defs)) { // Use the new defs def := defs[i-oldParts] definitions[i].Name = def.Name definitions[i].Comment, _ = def.Comment() if err := setPartitionPlacementFromOptions(&definitions[i], def.Options); err != nil { return nil, err } } else { // Use the default definitions[i].Name = model.NewCIStr(fmt.Sprintf("p%d", i)) } } return definitions, nil } func buildListPartitionDefinitions(ctx sessionctx.Context, defs []*ast.PartitionDefinition, tbInfo *model.TableInfo) ([]model.PartitionDefinition, error) { definitions := make([]model.PartitionDefinition, 0, len(defs)) exprChecker := newPartitionExprChecker(ctx, nil, checkPartitionExprAllowed) colTypes := collectColumnsType(tbInfo) if len(colTypes) != len(tbInfo.Partition.Columns) { return nil, dbterror.ErrWrongPartitionName.GenWithStack("partition column name cannot be found") } for _, def := range defs { if err := def.Clause.Validate(model.PartitionTypeList, len(tbInfo.Partition.Columns)); err != nil { return nil, err } clause := def.Clause.(*ast.PartitionDefinitionClauseIn) if len(tbInfo.Partition.Columns) > 0 { for _, vs := range clause.Values { // TODO: use the generated strings / normalized partition values _, err := checkAndGetColumnsTypeAndValuesMatch(ctx, colTypes, vs) if err != nil { return nil, err } } } else { for _, vs := range clause.Values { if err := checkPartitionValuesIsInt(ctx, def.Name, vs, tbInfo); err != nil { return nil, err } } } comment, _ := def.Comment() err := checkTooLongTable(def.Name) if err != nil { return nil, err } piDef := model.PartitionDefinition{ Name: def.Name, Comment: comment, } if err = setPartitionPlacementFromOptions(&piDef, def.Options); err != nil { return nil, err } buf := new(bytes.Buffer) for _, vs := range clause.Values { inValue := make([]string, 0, len(vs)) for i := range vs { vs[i].Accept(exprChecker) if exprChecker.err != nil { return nil, exprChecker.err } buf.Reset() vs[i].Format(buf) inValue = append(inValue, buf.String()) } piDef.InValues = append(piDef.InValues, inValue) buf.Reset() } definitions = append(definitions, piDef) } return definitions, nil } func collectColumnsType(tbInfo *model.TableInfo) []types.FieldType { if len(tbInfo.Partition.Columns) > 0 { colTypes := make([]types.FieldType, 0, len(tbInfo.Partition.Columns)) for _, col := range tbInfo.Partition.Columns { c := findColumnByName(col.L, tbInfo) if c == nil { return nil } colTypes = append(colTypes, c.FieldType) } return colTypes } return nil } func buildRangePartitionDefinitions(ctx sessionctx.Context, defs []*ast.PartitionDefinition, tbInfo *model.TableInfo) ([]model.PartitionDefinition, error) { definitions := make([]model.PartitionDefinition, 0, len(defs)) exprChecker := newPartitionExprChecker(ctx, nil, checkPartitionExprAllowed) colTypes := collectColumnsType(tbInfo) if len(colTypes) != len(tbInfo.Partition.Columns) { return nil, dbterror.ErrWrongPartitionName.GenWithStack("partition column name cannot be found") } for _, def := range defs { if err := def.Clause.Validate(model.PartitionTypeRange, len(tbInfo.Partition.Columns)); err != nil { return nil, err } clause := def.Clause.(*ast.PartitionDefinitionClauseLessThan) var partValStrings []string if len(tbInfo.Partition.Columns) > 0 { var err error if partValStrings, err = checkAndGetColumnsTypeAndValuesMatch(ctx, colTypes, clause.Exprs); err != nil { return nil, err } } else { if err := checkPartitionValuesIsInt(ctx, def.Name, clause.Exprs, tbInfo); err != nil { return nil, err } } comment, _ := def.Comment() comment, err := validateCommentLength(ctx.GetSessionVars(), def.Name.L, &comment, dbterror.ErrTooLongTablePartitionComment) if err != nil { return nil, err } err = checkTooLongTable(def.Name) if err != nil { return nil, err } piDef := model.PartitionDefinition{ Name: def.Name, Comment: comment, } if err = setPartitionPlacementFromOptions(&piDef, def.Options); err != nil { return nil, err } buf := new(bytes.Buffer) // Range columns partitions support multi-column partitions. for i, expr := range clause.Exprs { expr.Accept(exprChecker) if exprChecker.err != nil { return nil, exprChecker.err } // If multi-column use new evaluated+normalized output, instead of just formatted expression if len(partValStrings) > i && len(colTypes) > 1 { partVal := partValStrings[i] switch colTypes[i].EvalType() { case types.ETInt: // no wrapping case types.ETDatetime, types.ETString, types.ETDuration: if _, ok := clause.Exprs[i].(*ast.MaxValueExpr); !ok { // Don't wrap MAXVALUE partVal = driver.WrapInSingleQuotes(partVal) } default: return nil, dbterror.ErrWrongTypeColumnValue.GenWithStackByArgs() } piDef.LessThan = append(piDef.LessThan, partVal) } else { expr.Format(buf) piDef.LessThan = append(piDef.LessThan, buf.String()) buf.Reset() } } definitions = append(definitions, piDef) } return definitions, nil } func checkPartitionValuesIsInt(ctx sessionctx.Context, defName interface{}, exprs []ast.ExprNode, tbInfo *model.TableInfo) error { tp := types.NewFieldType(mysql.TypeLonglong) if isPartExprUnsigned(tbInfo) { tp.AddFlag(mysql.UnsignedFlag) } for _, exp := range exprs { if _, ok := exp.(*ast.MaxValueExpr); ok { continue } if d, ok := exp.(*ast.DefaultExpr); ok { if d.Name != nil { return dbterror.ErrPartitionConstDomain.GenWithStackByArgs() } continue } val, err := expression.EvalAstExpr(ctx, exp) if err != nil { return err } switch val.Kind() { case types.KindUint64, types.KindNull: case types.KindInt64: if mysql.HasUnsignedFlag(tp.GetFlag()) && val.GetInt64() < 0 { return dbterror.ErrPartitionConstDomain.GenWithStackByArgs() } default: return dbterror.ErrValuesIsNotIntType.GenWithStackByArgs(defName) } _, err = val.ConvertTo(ctx.GetSessionVars().StmtCtx, tp) if err != nil && !types.ErrOverflow.Equal(err) { return dbterror.ErrWrongTypeColumnValue.GenWithStackByArgs() } } return nil } func checkPartitionNameUnique(pi *model.PartitionInfo) error { newPars := pi.Definitions partNames := make(map[string]struct{}, len(newPars)) for _, newPar := range newPars { if _, ok := partNames[newPar.Name.L]; ok { return dbterror.ErrSameNamePartition.GenWithStackByArgs(newPar.Name) } partNames[newPar.Name.L] = struct{}{} } return nil } func checkAddPartitionNameUnique(tbInfo *model.TableInfo, pi *model.PartitionInfo) error { partNames := make(map[string]struct{}) if tbInfo.Partition != nil { oldPars := tbInfo.Partition.Definitions for _, oldPar := range oldPars { partNames[oldPar.Name.L] = struct{}{} } } newPars := pi.Definitions for _, newPar := range newPars { if _, ok := partNames[newPar.Name.L]; ok { return dbterror.ErrSameNamePartition.GenWithStackByArgs(newPar.Name) } partNames[newPar.Name.L] = struct{}{} } return nil } func checkReorgPartitionNames(p *model.PartitionInfo, droppedNames []string, pi *model.PartitionInfo) error { partNames := make(map[string]struct{}) oldDefs := p.Definitions for _, oldDef := range oldDefs { partNames[oldDef.Name.L] = struct{}{} } for _, delName := range droppedNames { droppedName := strings.ToLower(delName) if _, ok := partNames[droppedName]; !ok { return dbterror.ErrSameNamePartition.GenWithStackByArgs(delName) } delete(partNames, droppedName) } newDefs := pi.Definitions for _, newDef := range newDefs { if _, ok := partNames[newDef.Name.L]; ok { return dbterror.ErrSameNamePartition.GenWithStackByArgs(newDef.Name) } partNames[newDef.Name.L] = struct{}{} } return nil } func checkAndOverridePartitionID(newTableInfo, oldTableInfo *model.TableInfo) error { // If any old partitionInfo has lost, that means the partition ID lost too, so did the data, repair failed. if newTableInfo.Partition == nil { return nil } if oldTableInfo.Partition == nil { return dbterror.ErrRepairTableFail.GenWithStackByArgs("Old table doesn't have partitions") } if newTableInfo.Partition.Type != oldTableInfo.Partition.Type { return dbterror.ErrRepairTableFail.GenWithStackByArgs("Partition type should be the same") } // Check whether partitionType is hash partition. if newTableInfo.Partition.Type == model.PartitionTypeHash { if newTableInfo.Partition.Num != oldTableInfo.Partition.Num { return dbterror.ErrRepairTableFail.GenWithStackByArgs("Hash partition num should be the same") } } for i, newOne := range newTableInfo.Partition.Definitions { found := false for _, oldOne := range oldTableInfo.Partition.Definitions { // Fix issue 17952 which wanna substitute partition range expr. // So eliminate stringSliceEqual(newOne.LessThan, oldOne.LessThan) here. if newOne.Name.L == oldOne.Name.L { newTableInfo.Partition.Definitions[i].ID = oldOne.ID found = true break } } if !found { return dbterror.ErrRepairTableFail.GenWithStackByArgs("Partition " + newOne.Name.L + " has lost") } } return nil } // checkPartitionFuncValid checks partition function validly. func checkPartitionFuncValid(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) error { if expr == nil { return nil } exprChecker := newPartitionExprChecker(ctx, tblInfo, checkPartitionExprArgs, checkPartitionExprAllowed) expr.Accept(exprChecker) if exprChecker.err != nil { return errors.Trace(exprChecker.err) } if len(exprChecker.columns) == 0 { return errors.Trace(dbterror.ErrWrongExprInPartitionFunc) } return nil } // checkResultOK derives from https://github.com/mysql/mysql-server/blob/5.7/sql/item_timefunc // For partition tables, mysql do not support Constant, random or timezone-dependent expressions // Based on mysql code to check whether field is valid, every time related type has check_valid_arguments_processor function. func checkResultOK(ok bool) error { if !ok { return errors.Trace(dbterror.ErrWrongExprInPartitionFunc) } return nil } // checkPartitionFuncType checks partition function return type. func checkPartitionFuncType(ctx sessionctx.Context, expr ast.ExprNode, tblInfo *model.TableInfo) error { if expr == nil { return nil } e, err := expression.RewriteSimpleExprWithTableInfo(ctx, tblInfo, expr, false) if err != nil { return errors.Trace(err) } if e.GetType().EvalType() == types.ETInt { return nil } if col, ok := expr.(*ast.ColumnNameExpr); ok { return errors.Trace(dbterror.ErrNotAllowedTypeInPartition.GenWithStackByArgs(col.Name.Name.L)) } return errors.Trace(dbterror.ErrPartitionFuncNotAllowed.GenWithStackByArgs("PARTITION")) } // checkRangePartitionValue checks whether `less than value` is strictly increasing for each partition. // Side effect: it may simplify the partition range definition from a constant expression to an integer. func checkRangePartitionValue(ctx sessionctx.Context, tblInfo *model.TableInfo) error { pi := tblInfo.Partition defs := pi.Definitions if len(defs) == 0 { return nil } if strings.EqualFold(defs[len(defs)-1].LessThan[0], partitionMaxValue) { defs = defs[:len(defs)-1] } isUnsigned := isPartExprUnsigned(tblInfo) var prevRangeValue interface{} for i := 0; i < len(defs); i++ { if strings.EqualFold(defs[i].LessThan[0], partitionMaxValue) { return errors.Trace(dbterror.ErrPartitionMaxvalue) } currentRangeValue, fromExpr, err := getRangeValue(ctx, defs[i].LessThan[0], isUnsigned) if err != nil { return errors.Trace(err) } if fromExpr { // Constant fold the expression. defs[i].LessThan[0] = fmt.Sprintf("%d", currentRangeValue) } if i == 0 { prevRangeValue = currentRangeValue continue } if isUnsigned { if currentRangeValue.(uint64) <= prevRangeValue.(uint64) { return errors.Trace(dbterror.ErrRangeNotIncreasing) } } else { if currentRangeValue.(int64) <= prevRangeValue.(int64) { return errors.Trace(dbterror.ErrRangeNotIncreasing) } } prevRangeValue = currentRangeValue } return nil } func checkListPartitionValue(ctx sessionctx.Context, tblInfo *model.TableInfo) error { pi := tblInfo.Partition if len(pi.Definitions) == 0 { return ast.ErrPartitionsMustBeDefined.GenWithStackByArgs("LIST") } expStr, err := formatListPartitionValue(ctx, tblInfo) if err != nil { return errors.Trace(err) } partitionsValuesMap := make(map[string]struct{}) for _, s := range expStr { if _, ok := partitionsValuesMap[s]; ok { return errors.Trace(dbterror.ErrMultipleDefConstInListPart) } partitionsValuesMap[s] = struct{}{} } return nil } func formatListPartitionValue(ctx sessionctx.Context, tblInfo *model.TableInfo) ([]string, error) { defs := tblInfo.Partition.Definitions pi := tblInfo.Partition var colTps []*types.FieldType cols := make([]*model.ColumnInfo, 0, len(pi.Columns)) if len(pi.Columns) == 0 { tp := types.NewFieldType(mysql.TypeLonglong) if isPartExprUnsigned(tblInfo) { tp.AddFlag(mysql.UnsignedFlag) } colTps = []*types.FieldType{tp} } else { colTps = make([]*types.FieldType, 0, len(pi.Columns)) for _, colName := range pi.Columns { colInfo := findColumnByName(colName.L, tblInfo) if colInfo == nil { return nil, errors.Trace(dbterror.ErrFieldNotFoundPart) } colTps = append(colTps, colInfo.FieldType.Clone()) cols = append(cols, colInfo) } } haveDefault := false exprStrs := make([]string, 0) inValueStrs := make([]string, 0, mathutil.Max(len(pi.Columns), 1)) for i := range defs { inValuesLoop: for j, vs := range defs[i].InValues { inValueStrs = inValueStrs[:0] for k, v := range vs { // if DEFAULT would be given as string, like "DEFAULT", // it would be stored as "'DEFAULT'", if strings.EqualFold(v, "DEFAULT") && k == 0 && len(vs) == 1 { if haveDefault { return nil, dbterror.ErrMultipleDefConstInListPart } haveDefault = true continue inValuesLoop } if strings.EqualFold(v, "MAXVALUE") { return nil, errors.Trace(dbterror.ErrMaxvalueInValuesIn) } expr, err := expression.ParseSimpleExprCastWithTableInfo(ctx, v, &model.TableInfo{}, colTps[k]) if err != nil { return nil, errors.Trace(err) } eval, err := expr.Eval(chunk.Row{}) if err != nil { return nil, errors.Trace(err) } s, err := eval.ToString() if err != nil { return nil, errors.Trace(err) } if eval.IsNull() { s = "NULL" } else { if colTps[k].EvalType() == types.ETInt { defs[i].InValues[j][k] = s } if colTps[k].EvalType() == types.ETString { s = string(hack.String(collate.GetCollator(cols[k].GetCollate()).Key(s))) s = driver.WrapInSingleQuotes(s) } } inValueStrs = append(inValueStrs, s) } exprStrs = append(exprStrs, strings.Join(inValueStrs, ",")) } } return exprStrs, nil } // getRangeValue gets an integer from the range value string. // The returned boolean value indicates whether the input string is a constant expression. func getRangeValue(ctx sessionctx.Context, str string, unsigned bool) (interface{}, bool, error) { // Unsigned bigint was converted to uint64 handle. if unsigned { if value, err := strconv.ParseUint(str, 10, 64); err == nil { return value, false, nil } e, err1 := expression.ParseSimpleExprWithTableInfo(ctx, str, &model.TableInfo{}) if err1 != nil { return 0, false, err1 } res, isNull, err2 := e.EvalInt(ctx, chunk.Row{}) if err2 == nil && !isNull { return uint64(res), true, nil } } else { if value, err := strconv.ParseInt(str, 10, 64); err == nil { return value, false, nil } // The range value maybe not an integer, it could be a constant expression. // For example, the following two cases are the same: // PARTITION p0 VALUES LESS THAN (TO_SECONDS('2004-01-01')) // PARTITION p0 VALUES LESS THAN (63340531200) e, err1 := expression.ParseSimpleExprWithTableInfo(ctx, str, &model.TableInfo{}) if err1 != nil { return 0, false, err1 } res, isNull, err2 := e.EvalInt(ctx, chunk.Row{}) if err2 == nil && !isNull { return res, true, nil } } return 0, false, dbterror.ErrNotAllowedTypeInPartition.GenWithStackByArgs(str) } // CheckDropTablePartition checks if the partition exists and does not allow deleting the last existing partition in the table. func CheckDropTablePartition(meta *model.TableInfo, partLowerNames []string) error { pi := meta.Partition if pi.Type != model.PartitionTypeRange && pi.Type != model.PartitionTypeList { return dbterror.ErrOnlyOnRangeListPartition.GenWithStackByArgs("DROP") } // To be error compatible with MySQL, we need to do this first! // see https://github.com/pingcap/tidb/issues/31681#issuecomment-1015536214 oldDefs := pi.Definitions if len(oldDefs) <= len(partLowerNames) { return errors.Trace(dbterror.ErrDropLastPartition) } dupCheck := make(map[string]bool) for _, pn := range partLowerNames { found := false for _, def := range oldDefs { if def.Name.L == pn { if _, ok := dupCheck[pn]; ok { return errors.Trace(dbterror.ErrDropPartitionNonExistent.GenWithStackByArgs("DROP")) } dupCheck[pn] = true found = true break } } if !found { return errors.Trace(dbterror.ErrDropPartitionNonExistent.GenWithStackByArgs("DROP")) } } return nil } // updateDroppingPartitionInfo move dropping partitions to DroppingDefinitions, and return partitionIDs func updateDroppingPartitionInfo(tblInfo *model.TableInfo, partLowerNames []string) []int64 { oldDefs := tblInfo.Partition.Definitions newDefs := make([]model.PartitionDefinition, 0, len(oldDefs)-len(partLowerNames)) droppingDefs := make([]model.PartitionDefinition, 0, len(partLowerNames)) pids := make([]int64, 0, len(partLowerNames)) // consider using a map to probe partLowerNames if too many partLowerNames for i := range oldDefs { found := false for _, partName := range partLowerNames { if oldDefs[i].Name.L == partName { found = true break } } if found { pids = append(pids, oldDefs[i].ID) droppingDefs = append(droppingDefs, oldDefs[i]) } else { newDefs = append(newDefs, oldDefs[i]) } } tblInfo.Partition.Definitions = newDefs tblInfo.Partition.DroppingDefinitions = droppingDefs return pids } func getPartitionDef(tblInfo *model.TableInfo, partName string) (index int, def *model.PartitionDefinition, _ error) { defs := tblInfo.Partition.Definitions for i := 0; i < len(defs); i++ { if strings.EqualFold(defs[i].Name.L, strings.ToLower(partName)) { return i, &(defs[i]), nil } } return index, nil, table.ErrUnknownPartition.GenWithStackByArgs(partName, tblInfo.Name.O) } func getPartitionIDsFromDefinitions(defs []model.PartitionDefinition) []int64 { pids := make([]int64, 0, len(defs)) for _, def := range defs { pids = append(pids, def.ID) } return pids } func hasGlobalIndex(tblInfo *model.TableInfo) bool { for _, idxInfo := range tblInfo.Indices { if idxInfo.Global { return true } } return false } // getTableInfoWithDroppingPartitions builds oldTableInfo including dropping partitions, only used by onDropTablePartition. func getTableInfoWithDroppingPartitions(t *model.TableInfo) *model.TableInfo { p := t.Partition nt := t.Clone() np := *p npd := make([]model.PartitionDefinition, 0, len(p.Definitions)+len(p.DroppingDefinitions)) npd = append(npd, p.Definitions...) npd = append(npd, p.DroppingDefinitions...) np.Definitions = npd np.DroppingDefinitions = nil nt.Partition = &np return nt } // getTableInfoWithOriginalPartitions builds oldTableInfo including truncating partitions, only used by onTruncateTablePartition. func getTableInfoWithOriginalPartitions(t *model.TableInfo, oldIDs []int64, newIDs []int64) *model.TableInfo { nt := t.Clone() np := nt.Partition // reconstruct original definitions for _, oldDef := range np.DroppingDefinitions { var newID int64 for i := range newIDs { if oldDef.ID == oldIDs[i] { newID = newIDs[i] break } } for i := range np.Definitions { newDef := &np.Definitions[i] if newDef.ID == newID { newDef.ID = oldDef.ID break } } } np.DroppingDefinitions = nil np.NewPartitionIDs = nil return nt } func dropLabelRules(_ *ddlCtx, schemaName, tableName string, partNames []string) error { deleteRules := make([]string, 0, len(partNames)) for _, partName := range partNames { deleteRules = append(deleteRules, fmt.Sprintf(label.PartitionIDFormat, label.IDPrefix, schemaName, tableName, partName)) } // delete batch rules patch := label.NewRulePatch([]*label.Rule{}, deleteRules) return infosync.UpdateLabelRules(context.TODO(), patch) } // onDropTablePartition deletes old partition meta. func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var partNames []string partInfo := model.PartitionInfo{} if err := job.DecodeArgs(&partNames, &partInfo); err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } if job.Type == model.ActionAddTablePartition || job.Type == model.ActionReorganizePartition || job.Type == model.ActionRemovePartitioning || job.Type == model.ActionAlterTablePartitioning { // It is rollback from reorganize partition, just remove DroppingDefinitions from tableInfo tblInfo.Partition.DroppingDefinitions = nil // It is rollbacked from adding table partition, just remove addingDefinitions from tableInfo. physicalTableIDs, pNames, rollbackBundles := rollbackAddingPartitionInfo(tblInfo) err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), rollbackBundles) if err != nil { job.State = model.JobStateCancelled return ver, errors.Wrapf(err, "failed to notify PD the placement rules") } // TODO: Will this drop LabelRules for existing partitions, if the new partitions have the same name? err = dropLabelRules(d, job.SchemaName, tblInfo.Name.L, pNames) if err != nil { job.State = model.JobStateCancelled return ver, errors.Wrapf(err, "failed to notify PD the label rules") } if _, err := alterTableLabelRule(job.SchemaName, tblInfo, getIDs([]*model.TableInfo{tblInfo})); err != nil { job.State = model.JobStateCancelled return ver, err } if partInfo.DDLType != model.PartitionTypeNone { // Also remove anything with the new table id physicalTableIDs = append(physicalTableIDs, tblInfo.Partition.NewTableID) // Reset if it was normal table before if tblInfo.Partition.Type == model.PartitionTypeNone { tblInfo.Partition = nil } else { tblInfo.Partition.NewTableID = 0 tblInfo.Partition.DDLExpr = "" tblInfo.Partition.DDLColumns = nil tblInfo.Partition.DDLType = model.PartitionTypeNone } } ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) job.Args = []interface{}{physicalTableIDs} return ver, nil } var physicalTableIDs []int64 // In order to skip maintaining the state check in partitionDefinition, TiDB use droppingDefinition instead of state field. // So here using `job.SchemaState` to judge what the stage of this job is. originalState := job.SchemaState switch job.SchemaState { case model.StatePublic: // If an error occurs, it returns that it cannot delete all partitions or that the partition doesn't exist. err = CheckDropTablePartition(tblInfo, partNames) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } physicalTableIDs = updateDroppingPartitionInfo(tblInfo, partNames) err = dropLabelRules(d, job.SchemaName, tblInfo.Name.L, partNames) if err != nil { job.State = model.JobStateCancelled return ver, errors.Wrapf(err, "failed to notify PD the label rules") } if _, err := alterTableLabelRule(job.SchemaName, tblInfo, getIDs([]*model.TableInfo{tblInfo})); err != nil { job.State = model.JobStateCancelled return ver, err } var bundles []*placement.Bundle // create placement groups for each dropped partition to keep the data's placement before GC // These placements groups will be deleted after GC bundles, err = droppedPartitionBundles(t, tblInfo, tblInfo.Partition.DroppingDefinitions) if err != nil { job.State = model.JobStateCancelled return ver, err } var tableBundle *placement.Bundle // Recompute table bundle to remove dropped partitions rules from its group tableBundle, err = placement.NewTableBundle(t, tblInfo) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } if tableBundle != nil { bundles = append(bundles, tableBundle) } if err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles); err != nil { job.State = model.JobStateCancelled return ver, err } job.SchemaState = model.StateDeleteOnly ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != job.SchemaState) case model.StateDeleteOnly: // This state is not a real 'DeleteOnly' state, because tidb does not maintaining the state check in partitionDefinition. // Insert this state to confirm all servers can not see the old partitions when reorg is running, // so that no new data will be inserted into old partitions when reorganizing. job.SchemaState = model.StateDeleteReorganization ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != job.SchemaState) case model.StateDeleteReorganization: oldTblInfo := getTableInfoWithDroppingPartitions(tblInfo) physicalTableIDs = getPartitionIDsFromDefinitions(tblInfo.Partition.DroppingDefinitions) tbl, err := getTable(d.store, job.SchemaID, oldTblInfo) if err != nil { return ver, errors.Trace(err) } dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return ver, errors.Trace(err) } // If table has global indexes, we need reorg to clean up them. if pt, ok := tbl.(table.PartitionedTable); ok && hasGlobalIndex(tblInfo) { // Build elements for compatible with modify column type. elements will not be used when reorganizing. elements := make([]*meta.Element, 0, len(tblInfo.Indices)) for _, idxInfo := range tblInfo.Indices { if idxInfo.Global { elements = append(elements, &meta.Element{ID: idxInfo.ID, TypeKey: meta.IndexElementKey}) } } sctx, err1 := w.sessPool.Get() if err1 != nil { return ver, err1 } defer w.sessPool.Put(sctx) rh := newReorgHandler(sess.NewSession(sctx)) reorgInfo, err := getReorgInfoFromPartitions(d.jobContext(job.ID), d, rh, job, dbInfo, pt, physicalTableIDs, elements) if err != nil || reorgInfo.first { // If we run reorg firstly, we should update the job snapshot version // and then run the reorg next time. return ver, errors.Trace(err) } err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (dropIndexErr error) { defer tidbutil.Recover(metrics.LabelDDL, "onDropTablePartition", func() { dropIndexErr = dbterror.ErrCancelledDDLJob.GenWithStack("drop partition panic") }, false) return w.cleanupGlobalIndexes(pt, physicalTableIDs, reorgInfo) }) if err != nil { if dbterror.ErrWaitReorgTimeout.Equal(err) { // if timeout, we should return, check for the owner and re-wait job done. return ver, nil } if dbterror.ErrPausedDDLJob.Equal(err) { // if ErrPausedDDLJob, we should return, check for the owner and re-wait job done. return ver, nil } return ver, errors.Trace(err) } } if tblInfo.TiFlashReplica != nil { removeTiFlashAvailablePartitionIDs(tblInfo, physicalTableIDs) } tblInfo.Partition.DroppingDefinitions = nil // used by ApplyDiff in updateSchemaVersion job.CtxVars = []interface{}{physicalTableIDs} ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } job.SchemaState = model.StateNone job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) asyncNotifyEvent(d, &util.Event{Tp: model.ActionDropTablePartition, TableInfo: tblInfo, PartInfo: &model.PartitionInfo{Definitions: tblInfo.Partition.Definitions}}) // A background job will be created to delete old partition data. job.Args = []interface{}{physicalTableIDs} default: err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("partition", job.SchemaState) } return ver, errors.Trace(err) } func removeTiFlashAvailablePartitionIDs(tblInfo *model.TableInfo, pids []int64) { // Remove the partitions ids := tblInfo.TiFlashReplica.AvailablePartitionIDs // Rarely called, so OK to take some time, to make it easy for _, id := range pids { for i, avail := range ids { if id == avail { tmp := ids[:i] tmp = append(tmp, ids[i+1:]...) ids = tmp break } } } tblInfo.TiFlashReplica.AvailablePartitionIDs = ids } // onTruncateTablePartition truncates old partition meta. func (w *worker) onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, error) { var ver int64 var oldIDs, newIDs []int64 if err := job.DecodeArgs(&oldIDs, &newIDs); err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } if len(oldIDs) != len(newIDs) { job.State = model.JobStateCancelled return ver, errors.Trace(errors.New("len(oldIDs) must be the same as len(newIDs)")) } tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } pi := tblInfo.GetPartitionInfo() if pi == nil { return ver, errors.Trace(dbterror.ErrPartitionMgmtOnNonpartitioned) } if !hasGlobalIndex(tblInfo) { oldPartitions := make([]model.PartitionDefinition, 0, len(oldIDs)) newPartitions := make([]model.PartitionDefinition, 0, len(oldIDs)) for k, oldID := range oldIDs { for i := 0; i < len(pi.Definitions); i++ { def := &pi.Definitions[i] if def.ID == oldID { oldPartitions = append(oldPartitions, def.Clone()) def.ID = newIDs[k] // Shallow copy only use the def.ID in event handle. newPartitions = append(newPartitions, *def) break } } } if len(newPartitions) == 0 { job.State = model.JobStateCancelled return ver, table.ErrUnknownPartition.GenWithStackByArgs(fmt.Sprintf("pid:%v", oldIDs), tblInfo.Name.O) } if err = clearTruncatePartitionTiflashStatus(tblInfo, newPartitions, oldIDs); err != nil { job.State = model.JobStateCancelled return ver, err } if err = updateTruncatePartitionLabelRules(job, t, oldPartitions, newPartitions, tblInfo, oldIDs); err != nil { job.State = model.JobStateCancelled return ver, err } preSplitAndScatter(w.sess.Context, d.store, tblInfo, newPartitions) job.CtxVars = []interface{}{oldIDs, newIDs} ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } // Finish this job. job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) asyncNotifyEvent(d, &util.Event{Tp: model.ActionTruncateTablePartition, TableInfo: tblInfo, PartInfo: &model.PartitionInfo{Definitions: newPartitions}}) // A background job will be created to delete old partition data. job.Args = []interface{}{oldIDs} return ver, err } // When table has global index, public->deleteOnly->deleteReorg->none schema changes should be handled. switch job.SchemaState { case model.StatePublic: // Step1: generate new partition ids truncatingDefinitions := make([]model.PartitionDefinition, 0, len(oldIDs)) for i, oldID := range oldIDs { for j := 0; j < len(pi.Definitions); j++ { def := &pi.Definitions[j] if def.ID == oldID { truncatingDefinitions = append(truncatingDefinitions, def.Clone()) def.ID = newIDs[i] break } } } pi.DroppingDefinitions = truncatingDefinitions pi.NewPartitionIDs = newIDs[:] job.SchemaState = model.StateDeleteOnly ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) case model.StateDeleteOnly: // This state is not a real 'DeleteOnly' state, because tidb does not maintaining the state check in partitionDefinition. // Insert this state to confirm all servers can not see the old partitions when reorg is running, // so that no new data will be inserted into old partitions when reorganizing. job.SchemaState = model.StateDeleteReorganization ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) case model.StateDeleteReorganization: // Step2: clear global index rows. physicalTableIDs := oldIDs oldTblInfo := getTableInfoWithOriginalPartitions(tblInfo, oldIDs, newIDs) tbl, err := getTable(d.store, job.SchemaID, oldTblInfo) if err != nil { return ver, errors.Trace(err) } dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return ver, errors.Trace(err) } // If table has global indexes, we need reorg to clean up them. if pt, ok := tbl.(table.PartitionedTable); ok && hasGlobalIndex(tblInfo) { // Build elements for compatible with modify column type. elements will not be used when reorganizing. elements := make([]*meta.Element, 0, len(tblInfo.Indices)) for _, idxInfo := range tblInfo.Indices { if idxInfo.Global { elements = append(elements, &meta.Element{ID: idxInfo.ID, TypeKey: meta.IndexElementKey}) } } sctx, err1 := w.sessPool.Get() if err1 != nil { return ver, err1 } defer w.sessPool.Put(sctx) rh := newReorgHandler(sess.NewSession(sctx)) reorgInfo, err := getReorgInfoFromPartitions(d.jobContext(job.ID), d, rh, job, dbInfo, pt, physicalTableIDs, elements) if err != nil || reorgInfo.first { // If we run reorg firstly, we should update the job snapshot version // and then run the reorg next time. return ver, errors.Trace(err) } err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (dropIndexErr error) { defer tidbutil.Recover(metrics.LabelDDL, "onDropTablePartition", func() { dropIndexErr = dbterror.ErrCancelledDDLJob.GenWithStack("drop partition panic") }, false) return w.cleanupGlobalIndexes(pt, physicalTableIDs, reorgInfo) }) if err != nil { if dbterror.ErrWaitReorgTimeout.Equal(err) { // if timeout, we should return, check for the owner and re-wait job done. return ver, nil } return ver, errors.Trace(err) } } // Step3: generate new partition ids and finish rest works oldPartitions := make([]model.PartitionDefinition, 0, len(oldIDs)) newPartitions := make([]model.PartitionDefinition, 0, len(oldIDs)) for _, oldDef := range pi.DroppingDefinitions { var newID int64 for i := range oldIDs { if oldDef.ID == oldIDs[i] { newID = newIDs[i] break } } for i := 0; i < len(pi.Definitions); i++ { def := &pi.Definitions[i] if newID == def.ID { oldPartitions = append(oldPartitions, oldDef.Clone()) newPartitions = append(newPartitions, def.Clone()) break } } } if len(newPartitions) == 0 { job.State = model.JobStateCancelled return ver, table.ErrUnknownPartition.GenWithStackByArgs(fmt.Sprintf("pid:%v", oldIDs), tblInfo.Name.O) } if err = clearTruncatePartitionTiflashStatus(tblInfo, newPartitions, oldIDs); err != nil { job.State = model.JobStateCancelled return ver, err } if err = updateTruncatePartitionLabelRules(job, t, oldPartitions, newPartitions, tblInfo, oldIDs); err != nil { job.State = model.JobStateCancelled return ver, err } // Step4: clear DroppingDefinitions and finish job. tblInfo.Partition.DroppingDefinitions = nil tblInfo.Partition.NewPartitionIDs = nil preSplitAndScatter(w.sess.Context, d.store, tblInfo, newPartitions) // used by ApplyDiff in updateSchemaVersion job.CtxVars = []interface{}{oldIDs, newIDs} ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } // Finish this job. job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) asyncNotifyEvent(d, &util.Event{Tp: model.ActionTruncateTablePartition, TableInfo: tblInfo, PartInfo: &model.PartitionInfo{Definitions: newPartitions}}) // A background job will be created to delete old partition data. job.Args = []interface{}{oldIDs} default: err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("partition", job.SchemaState) } return ver, errors.Trace(err) } func clearTruncatePartitionTiflashStatus(tblInfo *model.TableInfo, newPartitions []model.PartitionDefinition, oldIDs []int64) error { // Clear the tiflash replica available status. if tblInfo.TiFlashReplica != nil { e := infosync.ConfigureTiFlashPDForPartitions(true, &newPartitions, tblInfo.TiFlashReplica.Count, &tblInfo.TiFlashReplica.LocationLabels, tblInfo.ID) failpoint.Inject("FailTiFlashTruncatePartition", func() { e = errors.New("enforced error") }) if e != nil { logutil.BgLogger().Error("ConfigureTiFlashPDForPartitions fails", zap.Error(e)) return e } tblInfo.TiFlashReplica.Available = false // Set partition replica become unavailable. removeTiFlashAvailablePartitionIDs(tblInfo, oldIDs) } return nil } func updateTruncatePartitionLabelRules(job *model.Job, t *meta.Meta, oldPartitions, newPartitions []model.PartitionDefinition, tblInfo *model.TableInfo, oldIDs []int64) error { bundles, err := placement.NewPartitionListBundles(t, newPartitions) if err != nil { return errors.Trace(err) } tableBundle, err := placement.NewTableBundle(t, tblInfo) if err != nil { job.State = model.JobStateCancelled return errors.Trace(err) } if tableBundle != nil { bundles = append(bundles, tableBundle) } // create placement groups for each dropped partition to keep the data's placement before GC // These placements groups will be deleted after GC keepDroppedBundles, err := droppedPartitionBundles(t, tblInfo, oldPartitions) if err != nil { job.State = model.JobStateCancelled return errors.Trace(err) } bundles = append(bundles, keepDroppedBundles...) err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles) if err != nil { return errors.Wrapf(err, "failed to notify PD the placement rules") } tableID := fmt.Sprintf(label.TableIDFormat, label.IDPrefix, job.SchemaName, tblInfo.Name.L) oldPartRules := make([]string, 0, len(oldIDs)) for _, newPartition := range newPartitions { oldPartRuleID := fmt.Sprintf(label.PartitionIDFormat, label.IDPrefix, job.SchemaName, tblInfo.Name.L, newPartition.Name.L) oldPartRules = append(oldPartRules, oldPartRuleID) } rules, err := infosync.GetLabelRules(context.TODO(), append(oldPartRules, tableID)) if err != nil { return errors.Wrapf(err, "failed to get label rules from PD") } newPartIDs := getPartitionIDs(tblInfo) newRules := make([]*label.Rule, 0, len(oldIDs)+1) if tr, ok := rules[tableID]; ok { newRules = append(newRules, tr.Clone().Reset(job.SchemaName, tblInfo.Name.L, "", append(newPartIDs, tblInfo.ID)...)) } for idx, newPartition := range newPartitions { if pr, ok := rules[oldPartRules[idx]]; ok { newRules = append(newRules, pr.Clone().Reset(job.SchemaName, tblInfo.Name.L, newPartition.Name.L, newPartition.ID)) } } patch := label.NewRulePatch(newRules, []string{}) err = infosync.UpdateLabelRules(context.TODO(), patch) if err != nil { return errors.Wrapf(err, "failed to notify PD the label rules") } return nil } // onExchangeTablePartition exchange partition data func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var ( // defID only for updateSchemaVersion defID int64 ptSchemaID int64 ptID int64 partName string withValidation bool ) if err := job.DecodeArgs(&defID, &ptSchemaID, &ptID, &partName, &withValidation); err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } ntDbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } nt, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } if job.IsRollingback() { return rollbackExchangeTablePartition(d, t, job, nt) } pt, err := getTableInfo(t, ptID, ptSchemaID) if err != nil { if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err) { job.State = model.JobStateCancelled } return ver, errors.Trace(err) } index, partDef, err := getPartitionDef(pt, partName) if err != nil { return ver, errors.Trace(err) } if job.SchemaState == model.StateNone { if pt.State != model.StatePublic { job.State = model.JobStateCancelled return ver, dbterror.ErrInvalidDDLState.GenWithStack("table %s is not in public, but %s", pt.Name, pt.State) } err = checkExchangePartition(pt, nt) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } err = checkTableDefCompatible(pt, nt) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } err = checkExchangePartitionPlacementPolicy(t, nt.PlacementPolicyRef, pt.PlacementPolicyRef, partDef.PlacementPolicyRef) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } if defID != partDef.ID { logutil.BgLogger().Info("Exchange partition id changed, updating to actual id", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Int64("defID", defID), zap.Int64("partDef.ID", partDef.ID)) job.Args[0] = partDef.ID defID = partDef.ID err = updateDDLJob2Table(w.sess, job, true) if err != nil { return ver, errors.Trace(err) } } nt.ExchangePartitionInfo = &model.ExchangePartitionInfo{ ExchangePartitionID: ptID, ExchangePartitionDefID: defID, } // We need an interim schema version, // so there are no non-matching rows inserted // into the table using the schema version // before the exchange is made. job.SchemaState = model.StateWriteOnly return updateVersionAndTableInfoWithCheck(d, t, job, nt, true) } // From now on, nt (the non-partitioned table) has // ExchangePartitionInfo set, meaning it is restricted // to only allow writes that would match the // partition to be exchange with. // So we need to rollback that change, instead of just cancelling. if d.lease > 0 { delayForAsyncCommit() } if defID != partDef.ID { // Should never happen, should have been updated above, in previous state! logutil.BgLogger().Error("Exchange partition id changed, updating to actual id", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Int64("defID", defID), zap.Int64("partDef.ID", partDef.ID)) job.Args[0] = partDef.ID defID = partDef.ID err = updateDDLJob2Table(w.sess, job, true) if err != nil { return ver, errors.Trace(err) } } if withValidation { err = checkExchangePartitionRecordValidation(w, pt, index, ntDbInfo.Name, nt.Name) if err != nil { job.State = model.JobStateRollingback return ver, errors.Trace(err) } } // partition table auto IDs. ptAutoIDs, err := t.GetAutoIDAccessors(ptSchemaID, ptID).Get() if err != nil { return ver, errors.Trace(err) } // non-partition table auto IDs. ntAutoIDs, err := t.GetAutoIDAccessors(job.SchemaID, nt.ID).Get() if err != nil { return ver, errors.Trace(err) } if pt.TiFlashReplica != nil { for i, id := range pt.TiFlashReplica.AvailablePartitionIDs { if id == partDef.ID { pt.TiFlashReplica.AvailablePartitionIDs[i] = nt.ID break } } } // Recreate non-partition table meta info, // by first delete it with the old table id err = t.DropTableOrView(job.SchemaID, nt.ID) if err != nil { return ver, errors.Trace(err) } // exchange table meta id partDef.ID, nt.ID = nt.ID, partDef.ID err = t.UpdateTable(ptSchemaID, pt) if err != nil { return ver, errors.Trace(err) } err = t.CreateTableOrView(job.SchemaID, nt) if err != nil { return ver, errors.Trace(err) } failpoint.Inject("exchangePartitionErr", func(val failpoint.Value) { if val.(bool) { failpoint.Return(ver, errors.New("occur an error after updating partition id")) } }) // Set both tables to the maximum auto IDs between normal table and partitioned table. newAutoIDs := meta.AutoIDGroup{ RowID: mathutil.Max(ptAutoIDs.RowID, ntAutoIDs.RowID), IncrementID: mathutil.Max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID), RandomID: mathutil.Max(ptAutoIDs.RandomID, ntAutoIDs.RandomID), } err = t.GetAutoIDAccessors(ptSchemaID, pt.ID).Put(newAutoIDs) if err != nil { return ver, errors.Trace(err) } err = t.GetAutoIDAccessors(job.SchemaID, nt.ID).Put(newAutoIDs) if err != nil { return ver, errors.Trace(err) } failpoint.Inject("exchangePartitionAutoID", func(val failpoint.Value) { if val.(bool) { seCtx, err := w.sessPool.Get() defer w.sessPool.Put(seCtx) if err != nil { failpoint.Return(ver, err) } se := sess.NewSession(seCtx) _, err = se.Execute(context.Background(), "insert ignore into test.pt values (40000000)", "exchange_partition_test") if err != nil { failpoint.Return(ver, err) } } }) // the follow code is a swap function for rules of two partitions // though partitions has exchanged their ID, swap still take effect bundles, err := bundlesForExchangeTablePartition(t, pt, partDef, nt) if err != nil { return ver, errors.Trace(err) } if err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles); err != nil { return ver, errors.Wrapf(err, "failed to notify PD the placement rules") } ntrID := fmt.Sprintf(label.TableIDFormat, label.IDPrefix, job.SchemaName, nt.Name.L) ptrID := fmt.Sprintf(label.PartitionIDFormat, label.IDPrefix, job.SchemaName, pt.Name.L, partDef.Name.L) rules, err := infosync.GetLabelRules(context.TODO(), []string{ntrID, ptrID}) if err != nil { return 0, errors.Wrapf(err, "failed to get PD the label rules") } ntr := rules[ntrID] ptr := rules[ptrID] // This must be a bug, nt cannot be partitioned! partIDs := getPartitionIDs(nt) var setRules []*label.Rule var deleteRules []string if ntr != nil && ptr != nil { setRules = append(setRules, ntr.Clone().Reset(job.SchemaName, pt.Name.L, partDef.Name.L, partDef.ID)) setRules = append(setRules, ptr.Clone().Reset(job.SchemaName, nt.Name.L, "", append(partIDs, nt.ID)...)) } else if ptr != nil { setRules = append(setRules, ptr.Clone().Reset(job.SchemaName, nt.Name.L, "", append(partIDs, nt.ID)...)) // delete ptr deleteRules = append(deleteRules, ptrID) } else if ntr != nil { setRules = append(setRules, ntr.Clone().Reset(job.SchemaName, pt.Name.L, partDef.Name.L, partDef.ID)) // delete ntr deleteRules = append(deleteRules, ntrID) } patch := label.NewRulePatch(setRules, deleteRules) err = infosync.UpdateLabelRules(context.TODO(), patch) if err != nil { return ver, errors.Wrapf(err, "failed to notify PD the label rules") } job.SchemaState = model.StatePublic nt.ExchangePartitionInfo = nil ver, err = updateVersionAndTableInfoWithCheck(d, t, job, nt, true) if err != nil { return ver, errors.Trace(err) } job.FinishTableJob(model.JobStateDone, model.StateNone, ver, pt) return ver, nil } func getReorgPartitionInfo(t *meta.Meta, job *model.Job) (*model.TableInfo, []string, *model.PartitionInfo, []model.PartitionDefinition, []model.PartitionDefinition, error) { schemaID := job.SchemaID tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, nil, nil, errors.Trace(err) } partInfo := &model.PartitionInfo{} var partNames []string err = job.DecodeArgs(&partNames, &partInfo) if err != nil { job.State = model.JobStateCancelled return nil, nil, nil, nil, nil, errors.Trace(err) } var addingDefs, droppingDefs []model.PartitionDefinition if tblInfo.Partition != nil { addingDefs = tblInfo.Partition.AddingDefinitions droppingDefs = tblInfo.Partition.DroppingDefinitions tblInfo.Partition.NewTableID = partInfo.NewTableID tblInfo.Partition.DDLType = partInfo.Type tblInfo.Partition.DDLExpr = partInfo.Expr tblInfo.Partition.DDLColumns = partInfo.Columns } else { tblInfo.Partition = getPartitionInfoTypeNone() tblInfo.Partition.NewTableID = partInfo.NewTableID tblInfo.Partition.Definitions[0].ID = tblInfo.ID tblInfo.Partition.DDLType = partInfo.Type tblInfo.Partition.DDLExpr = partInfo.Expr tblInfo.Partition.DDLColumns = partInfo.Columns } if len(addingDefs) == 0 { addingDefs = []model.PartitionDefinition{} } if len(droppingDefs) == 0 { droppingDefs = []model.PartitionDefinition{} } return tblInfo, partNames, partInfo, droppingDefs, addingDefs, nil } func (w *worker) onReorganizePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { // Handle the rolling back job if job.IsRollingback() { ver, err := w.onDropTablePartition(d, t, job) if err != nil { return ver, errors.Trace(err) } return ver, nil } tblInfo, partNames, partInfo, _, addingDefinitions, err := getReorgPartitionInfo(t, job) if err != nil { return ver, err } switch job.SchemaState { case model.StateNone: // job.SchemaState == model.StateNone means the job is in the initial state of reorg partition. // Here should use partInfo from job directly and do some check action. // In case there was a race for queueing different schema changes on the same // table and the checks was not done on the current schema version. // The partInfo may have been checked against an older schema version for example. // If the check is done here, it does not need to be repeated, since no other // DDL on the same table can be run concurrently. num := len(partInfo.Definitions) - len(partNames) + len(tblInfo.Partition.Definitions) err = checkAddPartitionTooManyPartitions(uint64(num)) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } err = checkReorgPartitionNames(tblInfo.Partition, partNames, partInfo) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } // Re-check that the dropped/added partitions are compatible with current definition firstPartIdx, lastPartIdx, idMap, err := getReplacedPartitionIDs(partNames, tblInfo.Partition) if err != nil { job.State = model.JobStateCancelled return ver, err } sctx := w.sess.Context if err = checkReorgPartitionDefs(sctx, job.Type, tblInfo, partInfo, firstPartIdx, lastPartIdx, idMap); err != nil { job.State = model.JobStateCancelled return ver, err } // move the adding definition into tableInfo. updateAddingPartitionInfo(partInfo, tblInfo) orgDefs := tblInfo.Partition.Definitions _ = updateDroppingPartitionInfo(tblInfo, partNames) // Reset original partitions, and keep DroppedDefinitions tblInfo.Partition.Definitions = orgDefs // modify placement settings for _, def := range tblInfo.Partition.AddingDefinitions { if _, err = checkPlacementPolicyRefValidAndCanNonValidJob(t, job, def.PlacementPolicyRef); err != nil { // job.State = model.JobStateCancelled may be set depending on error in function above. return ver, errors.Trace(err) } } // From now on we cannot just cancel the DDL, we must roll back if changesMade! changesMade := false if tblInfo.TiFlashReplica != nil { // Must set placement rule, and make sure it succeeds. if err := infosync.ConfigureTiFlashPDForPartitions(true, &tblInfo.Partition.AddingDefinitions, tblInfo.TiFlashReplica.Count, &tblInfo.TiFlashReplica.LocationLabels, tblInfo.ID); err != nil { logutil.BgLogger().Error("ConfigureTiFlashPDForPartitions fails", zap.Error(err)) job.State = model.JobStateCancelled return ver, errors.Trace(err) } changesMade = true // In the next step, StateDeleteOnly, wait to verify the TiFlash replicas are OK } bundles, err := alterTablePartitionBundles(t, tblInfo, tblInfo.Partition.AddingDefinitions) if err != nil { if !changesMade { job.State = model.JobStateCancelled return ver, errors.Trace(err) } return convertAddTablePartitionJob2RollbackJob(d, t, job, err, tblInfo) } if len(bundles) > 0 { if err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles); err != nil { if !changesMade { job.State = model.JobStateCancelled return ver, errors.Wrapf(err, "failed to notify PD the placement rules") } return convertAddTablePartitionJob2RollbackJob(d, t, job, err, tblInfo) } changesMade = true } ids := getIDs([]*model.TableInfo{tblInfo}) for _, p := range tblInfo.Partition.AddingDefinitions { ids = append(ids, p.ID) } changed, err := alterTableLabelRule(job.SchemaName, tblInfo, ids) changesMade = changesMade || changed if err != nil { if !changesMade { job.State = model.JobStateCancelled return ver, err } return convertAddTablePartitionJob2RollbackJob(d, t, job, err, tblInfo) } // Doing the preSplitAndScatter here, since all checks are completed, // and we will soon start writing to the new partitions. if s, ok := d.store.(kv.SplittableStore); ok && s != nil { // partInfo only contains the AddingPartitions splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, true) } // Assume we cannot have more than MaxUint64 rows, set the progress to 1/10 of that. metrics.GetBackfillProgressByLabel(metrics.LblReorgPartition, job.SchemaName, tblInfo.Name.String()).Set(0.1 / float64(math.MaxUint64)) job.SchemaState = model.StateDeleteOnly tblInfo.Partition.DDLState = model.StateDeleteOnly ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } // Is really both StateDeleteOnly AND StateWriteOnly needed? // If transaction A in WriteOnly inserts row 1 (into both new and old partition set) // and then transaction B in DeleteOnly deletes that row (in both new and old) // does really transaction B need to do the delete in the new partition? // Yes, otherwise it would still be there when the WriteReorg happens, // and WriteReorg would only copy existing rows to the new table, so unless it is // deleted it would result in a ghost row! // What about update then? // Updates also need to be handled for new partitions in DeleteOnly, // since it would not be overwritten during Reorganize phase. // BUT if the update results in adding in one partition and deleting in another, // THEN only the delete must happen in the new partition set, not the insert! case model.StateDeleteOnly: // This state is to confirm all servers can not see the new partitions when reorg is running, // so that all deletes will be done in both old and new partitions when in either DeleteOnly // or WriteOnly state. // Also using the state for checking that the optional TiFlash replica is available, making it // in a state without (much) data and easy to retry without side effects. // Reason for having it here, is to make it easy for retry, and better to make sure it is in-sync // as early as possible, to avoid a long wait after the data copying. if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available { // For available state, the new added partition should wait its replica to // be finished, otherwise the query to this partition will be blocked. count := tblInfo.TiFlashReplica.Count needRetry, err := checkPartitionReplica(count, addingDefinitions, d) if err != nil { // need to rollback, since we tried to register the new // partitions before! return convertAddTablePartitionJob2RollbackJob(d, t, job, err, tblInfo) } if needRetry { // The new added partition hasn't been replicated. // Do nothing to the job this time, wait next worker round. time.Sleep(tiflashCheckTiDBHTTPAPIHalfInterval) // Set the error here which will lead this job exit when it's retry times beyond the limitation. return ver, errors.Errorf("[ddl] add partition wait for tiflash replica to complete") } // When TiFlash Replica is ready, we must move them into `AvailablePartitionIDs`. // Since onUpdateFlashReplicaStatus cannot see the partitions yet (not public) for _, d := range addingDefinitions { tblInfo.TiFlashReplica.AvailablePartitionIDs = append(tblInfo.TiFlashReplica.AvailablePartitionIDs, d.ID) } } tblInfo.Partition.DDLState = model.StateWriteOnly metrics.GetBackfillProgressByLabel(metrics.LblReorgPartition, job.SchemaName, tblInfo.Name.String()).Set(0.2 / float64(math.MaxUint64)) ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) job.SchemaState = model.StateWriteOnly case model.StateWriteOnly: // Insert this state to confirm all servers can see the new partitions when reorg is running, // so that new data will be updated in both old and new partitions when reorganizing. job.SnapshotVer = 0 tblInfo.Partition.DDLState = model.StateWriteReorganization metrics.GetBackfillProgressByLabel(metrics.LblReorgPartition, job.SchemaName, tblInfo.Name.String()).Set(0.3 / float64(math.MaxUint64)) ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) job.SchemaState = model.StateWriteReorganization case model.StateWriteReorganization: physicalTableIDs := getPartitionIDsFromDefinitions(tblInfo.Partition.DroppingDefinitions) tbl, err2 := getTable(d.store, job.SchemaID, tblInfo) if err2 != nil { return ver, errors.Trace(err2) } // TODO: If table has global indexes, we need reorg to clean up them. // and then add the new partition ids back... if _, ok := tbl.(table.PartitionedTable); ok && hasGlobalIndex(tblInfo) { err = errors.Trace(dbterror.ErrCancelledDDLJob.GenWithStack("global indexes is not supported yet for reorganize partition")) return convertAddTablePartitionJob2RollbackJob(d, t, job, err, tblInfo) } var done bool done, ver, err = doPartitionReorgWork(w, d, t, job, tbl, physicalTableIDs) if !done { return ver, err } firstPartIdx, lastPartIdx, idMap, err2 := getReplacedPartitionIDs(partNames, tblInfo.Partition) failpoint.Inject("reorgPartWriteReorgReplacedPartIDsFail", func(val failpoint.Value) { if val.(bool) { err2 = errors.New("Injected error by reorgPartWriteReorgReplacedPartIDsFail") } }) if err2 != nil { return ver, err2 } newDefs := getReorganizedDefinitions(tblInfo.Partition, firstPartIdx, lastPartIdx, idMap) // From now on, use the new partitioning, but keep the Adding and Dropping for double write tblInfo.Partition.Definitions = newDefs tblInfo.Partition.Num = uint64(len(newDefs)) if job.Type == model.ActionAlterTablePartitioning || job.Type == model.ActionRemovePartitioning { tblInfo.Partition.Type, tblInfo.Partition.DDLType = tblInfo.Partition.DDLType, tblInfo.Partition.Type tblInfo.Partition.Expr, tblInfo.Partition.DDLExpr = tblInfo.Partition.DDLExpr, tblInfo.Partition.Expr tblInfo.Partition.Columns, tblInfo.Partition.DDLColumns = tblInfo.Partition.DDLColumns, tblInfo.Partition.Columns } // Now all the data copying is done, but we cannot simply remove the droppingDefinitions // since they are a part of the normal Definitions that other nodes with // the current schema version. So we need to double write for one more schema version tblInfo.Partition.DDLState = model.StateDeleteReorganization ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) job.SchemaState = model.StateDeleteReorganization case model.StateDeleteReorganization: // Drop the droppingDefinitions and finish the DDL // This state is needed for the case where client A sees the schema // with version of StateWriteReorg and would not see updates of // client B that writes to the new partitions, previously // addingDefinitions, since it would not double write to // the droppingDefinitions during this time // By adding StateDeleteReorg state, client B will write to both // the new (previously addingDefinitions) AND droppingDefinitions // Register the droppingDefinitions ids for rangeDelete // and the addingDefinitions for handling in the updateSchemaVersion physicalTableIDs := getPartitionIDsFromDefinitions(tblInfo.Partition.DroppingDefinitions) newIDs := getPartitionIDsFromDefinitions(partInfo.Definitions) statisticsPartInfo := &model.PartitionInfo{Definitions: tblInfo.Partition.AddingDefinitions} tblInfo.Partition.DroppingDefinitions = nil tblInfo.Partition.AddingDefinitions = nil tblInfo.Partition.DDLState = model.StateNone if job.Type != model.ActionReorganizePartition { // ALTER TABLE ... PARTITION BY // REMOVE PARTITIONING // New Table ID, so needs to recreate the table by drop+create. oldTblID := tblInfo.ID // Overloading the NewTableID here with the oldTblID instead, // for keeping the old global statistics statisticsPartInfo.NewTableID = oldTblID err = t.DropTableOrView(job.SchemaID, tblInfo.ID) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } // TODO: Handle bundles? // TODO: How to carrie over AUTO_INCREMENT etc.? // Check if they are carried over in ApplyDiff?!? err = t.GetAutoIDAccessors(job.SchemaID, tblInfo.ID).Del() if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } tblInfo.ID = partInfo.NewTableID if partInfo.DDLType != model.PartitionTypeNone { // if partitioned before, then also add the old table ID, // otherwise it will be the already included first partition physicalTableIDs = append(physicalTableIDs, oldTblID) } if job.Type == model.ActionRemovePartitioning { tblInfo.Partition = nil } else { // ALTER TABLE ... PARTITION BY //tblInfo.Partition.Type = tblInfo.Partition.DDLType //tblInfo.Partition.Expr = tblInfo.Partition.DDLExpr //tblInfo.Partition.Columns = tblInfo.Partition.DDLColumns tblInfo.Partition.DDLType = model.PartitionTypeNone tblInfo.Partition.DDLExpr = "" tblInfo.Partition.DDLColumns = nil tblInfo.Partition.NewTableID = 0 } // TODO: Add failpoint here? err = t.CreateTableOrView(job.SchemaID, tblInfo) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } } job.CtxVars = []interface{}{physicalTableIDs, newIDs} ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) failpoint.Inject("reorgPartWriteReorgSchemaVersionUpdateFail", func(val failpoint.Value) { if val.(bool) { err = errors.New("Injected error by reorgPartWriteReorgSchemaVersionUpdateFail") } }) if err != nil { return ver, errors.Trace(err) } job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) // How to handle this? // Seems to only trigger asynchronous update of statistics. // Should it actually be synchronous? // Include the old table ID, if changed, which may contain global statistics, // so it can be reused for the new (non)partitioned table. asyncNotifyEvent(d, &util.Event{Tp: job.Type, TableInfo: tblInfo, PartInfo: statisticsPartInfo}) // A background job will be created to delete old partition data. job.Args = []interface{}{physicalTableIDs} default: err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("partition", job.SchemaState) } return ver, errors.Trace(err) } func doPartitionReorgWork(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, physTblIDs []int64) (done bool, ver int64, err error) { job.ReorgMeta.ReorgTp = model.ReorgTypeTxn sctx, err1 := w.sessPool.Get() if err1 != nil { return done, ver, err1 } defer w.sessPool.Put(sctx) rh := newReorgHandler(sess.NewSession(sctx)) elements := BuildElements(tbl.Meta().Columns[0], tbl.Meta().Indices) partTbl, ok := tbl.(table.PartitionedTable) if !ok { return false, ver, dbterror.ErrUnsupportedReorganizePartition.GenWithStackByArgs() } dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return false, ver, errors.Trace(err) } reorgInfo, err := getReorgInfoFromPartitions(d.jobContext(job.ID), d, rh, job, dbInfo, partTbl, physTblIDs, elements) err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (reorgErr error) { defer tidbutil.Recover(metrics.LabelDDL, "doPartitionReorgWork", func() { reorgErr = dbterror.ErrCancelledDDLJob.GenWithStack("reorganize partition for table `%v` panic", tbl.Meta().Name) }, false) return w.reorgPartitionDataAndIndex(tbl, reorgInfo) }) if err != nil { if dbterror.ErrPausedDDLJob.Equal(err) { return false, ver, nil } if dbterror.ErrWaitReorgTimeout.Equal(err) { // If timeout, we should return, check for the owner and re-wait job done. return false, ver, nil } if kv.IsTxnRetryableError(err) { return false, ver, errors.Trace(err) } if err1 := rh.RemoveDDLReorgHandle(job, reorgInfo.elements); err1 != nil { logutil.BgLogger().Warn("reorg partition job failed, RemoveDDLReorgHandle failed, can't convert job to rollback", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Error(err1)) } logutil.BgLogger().Warn("reorg partition job failed, convert job to rollback", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Error(err)) ver, err = convertAddTablePartitionJob2RollbackJob(d, t, job, err, tbl.Meta()) return false, ver, errors.Trace(err) } return true, ver, err } type reorgPartitionWorker struct { *backfillCtx // Static allocated to limit memory allocations rowRecords []*rowRecord rowDecoder *decoder.RowDecoder rowMap map[int64]types.Datum writeColOffsetMap map[int64]int maxOffset int reorgedTbl table.PartitionedTable } func newReorgPartitionWorker(sessCtx sessionctx.Context, i int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) (*reorgPartitionWorker, error) { reorgedTbl, err := tables.GetReorganizedPartitionedTable(t) if err != nil { return nil, errors.Trace(err) } pt := t.GetPartitionedTable() if pt == nil { return nil, dbterror.ErrUnsupportedReorganizePartition.GenWithStackByArgs() } partColIDs := reorgedTbl.GetPartitionColumnIDs() writeColOffsetMap := make(map[int64]int, len(partColIDs)) maxOffset := 0 for _, id := range partColIDs { var offset int for _, col := range pt.Cols() { if col.ID == id { offset = col.Offset break } } writeColOffsetMap[id] = offset maxOffset = mathutil.Max[int](maxOffset, offset) } return &reorgPartitionWorker{ backfillCtx: newBackfillCtx(reorgInfo.d, i, sessCtx, reorgInfo.SchemaName, t, jc, "reorg_partition_rate", false), rowDecoder: decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap), rowMap: make(map[int64]types.Datum, len(decodeColMap)), writeColOffsetMap: writeColOffsetMap, maxOffset: maxOffset, reorgedTbl: reorgedTbl, }, nil } func (w *reorgPartitionWorker) BackfillData(handleRange reorgBackfillTask) (taskCtx backfillTaskContext, errInTxn error) { oprStartTime := time.Now() ctx := kv.WithInternalSourceAndTaskType(context.Background(), w.jobContext.ddlJobSourceType(), kvutil.ExplicitTypeDDL) errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, handleRange.priority) if tagger := w.GetCtx().getResourceGroupTaggerForTopSQL(handleRange.getJobID()); tagger != nil { txn.SetOption(kv.ResourceGroupTagger, tagger) } rowRecords, nextKey, taskDone, err := w.fetchRowColVals(txn, handleRange) if err != nil { return errors.Trace(err) } taskCtx.nextKey = nextKey taskCtx.done = taskDone warningsMap := make(map[errors.ErrorID]*terror.Error) warningsCountMap := make(map[errors.ErrorID]int64) for _, prr := range rowRecords { taskCtx.scanCount++ err = txn.Set(prr.key, prr.vals) if err != nil { return errors.Trace(err) } taskCtx.addedCount++ if prr.warning != nil { if _, ok := warningsCountMap[prr.warning.ID()]; ok { warningsCountMap[prr.warning.ID()]++ } else { warningsCountMap[prr.warning.ID()] = 1 warningsMap[prr.warning.ID()] = prr.warning } } // TODO: Future optimization: also write the indexes here? // What if the transaction limit is just enough for a single row, without index? // Hmm, how could that be in the first place? // For now, implement the batch-txn w.addTableIndex, // since it already exists and is in use } // Collect the warnings. taskCtx.warnings, taskCtx.warningsCount = warningsMap, warningsCountMap // also add the index entries here? And make sure they are not added somewhere else return nil }) logSlowOperations(time.Since(oprStartTime), "BackfillData", 3000) return } func (w *reorgPartitionWorker) fetchRowColVals(txn kv.Transaction, taskRange reorgBackfillTask) ([]*rowRecord, kv.Key, bool, error) { w.rowRecords = w.rowRecords[:0] startTime := time.Now() // taskDone means that the added handle is out of taskRange.endHandle. taskDone := false sysTZ := w.sessCtx.GetSessionVars().StmtCtx.TimeZone tmpRow := make([]types.Datum, w.maxOffset+1) var lastAccessedHandle kv.Key oprStartTime := startTime err := iterateSnapshotKeys(w.jobContext, w.sessCtx.GetStore(), taskRange.priority, w.table.RecordPrefix(), txn.StartTS(), taskRange.startKey, taskRange.endKey, func(handle kv.Handle, recordKey kv.Key, rawRow []byte) (bool, error) { oprEndTime := time.Now() logSlowOperations(oprEndTime.Sub(oprStartTime), "iterateSnapshotKeys in reorgPartitionWorker fetchRowColVals", 0) oprStartTime = oprEndTime taskDone = recordKey.Cmp(taskRange.endKey) >= 0 if taskDone || len(w.rowRecords) >= w.batchCnt { return false, nil } // TODO: Extend for normal tables // TODO: Extend for REMOVE PARTITIONING _, err := w.rowDecoder.DecodeTheExistedColumnMap(w.sessCtx, handle, rawRow, sysTZ, w.rowMap) if err != nil { return false, errors.Trace(err) } // Set the partitioning columns and calculate which partition to write to for colID, offset := range w.writeColOffsetMap { d, ok := w.rowMap[colID] if !ok { return false, dbterror.ErrUnsupportedReorganizePartition.GenWithStackByArgs() } tmpRow[offset] = d } p, err := w.reorgedTbl.GetPartitionByRow(w.sessCtx, tmpRow) if err != nil { return false, errors.Trace(err) } pid := p.GetPhysicalID() newKey := tablecodec.EncodeTablePrefix(pid) newKey = append(newKey, recordKey[len(newKey):]...) w.rowRecords = append(w.rowRecords, &rowRecord{ key: newKey, vals: rawRow, }) w.cleanRowMap() lastAccessedHandle = recordKey if recordKey.Cmp(taskRange.endKey) == 0 { taskDone = true return false, nil } return true, nil }) if len(w.rowRecords) == 0 { taskDone = true } logutil.BgLogger().Debug("txn fetches handle info", zap.String("category", "ddl"), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("taskRange", taskRange.String()), zap.Duration("takeTime", time.Since(startTime))) return w.rowRecords, getNextHandleKey(taskRange, taskDone, lastAccessedHandle), taskDone, errors.Trace(err) } func (w *reorgPartitionWorker) cleanRowMap() { for id := range w.rowMap { delete(w.rowMap, id) } } func (w *reorgPartitionWorker) AddMetricInfo(cnt float64) { w.metricCounter.Add(cnt) } func (*reorgPartitionWorker) String() string { return typeReorgPartitionWorker.String() } func (w *reorgPartitionWorker) GetCtx() *backfillCtx { return w.backfillCtx } func (w *worker) reorgPartitionDataAndIndex(t table.Table, reorgInfo *reorgInfo) error { // First copy all table data to the new partitions // from each of the DroppingDefinitions partitions. // Then create all indexes on the AddingDefinitions partitions // for each new index, one partition at a time. // Copy the data from the DroppingDefinitions to the AddingDefinitions if bytes.Equal(reorgInfo.currElement.TypeKey, meta.ColumnElementKey) { err := w.updatePhysicalTableRow(t, reorgInfo) if err != nil { return errors.Trace(err) } } failpoint.Inject("reorgPartitionAfterDataCopy", func(val failpoint.Value) { //nolint:forcetypeassert if val.(bool) { panic("panic test in reorgPartitionAfterDataCopy") } }) // Rewrite this to do all indexes at once in addTableIndex // instead of calling it once per index (meaning reading the table multiple times) // But for now, try to understand how it works... firstNewPartitionID := t.Meta().Partition.AddingDefinitions[0].ID startElementOffset := 0 //startElementOffsetToResetHandle := -1 // This backfill job starts with backfilling index data, whose index ID is currElement.ID. if !bytes.Equal(reorgInfo.currElement.TypeKey, meta.IndexElementKey) { // First run, have not yet started backfilling index data // Restart with the first new partition. // TODO: handle remove partitioning reorgInfo.PhysicalTableID = firstNewPartitionID } else { // The job was interrupted and has been restarted, // reset and start from where it was done for i, element := range reorgInfo.elements[1:] { if reorgInfo.currElement.ID == element.ID { startElementOffset = i //startElementOffsetToResetHandle = i break } } } for i := startElementOffset; i < len(reorgInfo.elements[1:]); i++ { // Now build the indexes in the new partitions var physTbl table.PhysicalTable if tbl, ok := t.(table.PartitionedTable); ok { physTbl = tbl.GetPartition(reorgInfo.PhysicalTableID) } else if tbl, ok := t.(table.PhysicalTable); ok { // This may be used when partitioning a non-partitioned table physTbl = tbl } // Get the original start handle and end handle. currentVer, err := getValidCurrentVersion(reorgInfo.d.store) if err != nil { return errors.Trace(err) } // TODO: Can we improve this in case of a crash? // like where the regInfo PhysicalTableID and element is the same, // and the tableid in the key-prefix regInfo.StartKey and regInfo.EndKey matches with PhysicalTableID // do not change the reorgInfo start/end key startHandle, endHandle, err := getTableRange(reorgInfo.d.jobContext(reorgInfo.Job.ID), reorgInfo.d, physTbl, currentVer.Ver, reorgInfo.Job.Priority) if err != nil { return errors.Trace(err) } // Always (re)start with the full PhysicalTable range reorgInfo.StartKey, reorgInfo.EndKey = startHandle, endHandle // Update the element in the reorgInfo for updating the reorg meta below. reorgInfo.currElement = reorgInfo.elements[i+1] // Write the reorg info to store so the whole reorganize process can recover from panic. err = reorgInfo.UpdateReorgMeta(reorgInfo.StartKey, w.sessPool) logutil.BgLogger().Info("update column and indexes", zap.String("category", "ddl"), zap.Int64("jobID", reorgInfo.Job.ID), zap.ByteString("elementType", reorgInfo.currElement.TypeKey), zap.Int64("elementID", reorgInfo.currElement.ID), zap.Int64("partitionTableId", physTbl.GetPhysicalID()), zap.String("startHandle", hex.EncodeToString(reorgInfo.StartKey)), zap.String("endHandle", hex.EncodeToString(reorgInfo.EndKey))) if err != nil { return errors.Trace(err) } err = w.addTableIndex(t, reorgInfo) if err != nil { return errors.Trace(err) } reorgInfo.PhysicalTableID = firstNewPartitionID } failpoint.Inject("reorgPartitionAfterIndex", func(val failpoint.Value) { //nolint:forcetypeassert if val.(bool) { panic("panic test in reorgPartitionAfterIndex") } }) return nil } func bundlesForExchangeTablePartition(t *meta.Meta, pt *model.TableInfo, newPar *model.PartitionDefinition, nt *model.TableInfo) ([]*placement.Bundle, error) { bundles := make([]*placement.Bundle, 0, 3) ptBundle, err := placement.NewTableBundle(t, pt) if err != nil { return nil, errors.Trace(err) } if ptBundle != nil { bundles = append(bundles, ptBundle) } parBundle, err := placement.NewPartitionBundle(t, *newPar) if err != nil { return nil, errors.Trace(err) } if parBundle != nil { bundles = append(bundles, parBundle) } ntBundle, err := placement.NewTableBundle(t, nt) if err != nil { return nil, errors.Trace(err) } if ntBundle != nil { bundles = append(bundles, ntBundle) } if parBundle == nil && ntBundle != nil { // newPar.ID is the ID of old table to exchange, so ntBundle != nil means it has some old placement settings. // We should remove it in this situation bundles = append(bundles, placement.NewBundle(newPar.ID)) } if parBundle != nil && ntBundle == nil { // nt.ID is the ID of old partition to exchange, so parBundle != nil means it has some old placement settings. // We should remove it in this situation bundles = append(bundles, placement.NewBundle(nt.ID)) } return bundles, nil } func checkExchangePartitionRecordValidation(w *worker, pt *model.TableInfo, index int, schemaName, tableName model.CIStr) error { var sql string var paramList []interface{} pi := pt.Partition switch pi.Type { case model.PartitionTypeHash: if pi.Num == 1 { return nil } var buf strings.Builder buf.WriteString("select 1 from %n.%n where mod(") buf.WriteString(pi.Expr) buf.WriteString(", %?) != %? limit 1") sql = buf.String() paramList = append(paramList, schemaName.L, tableName.L, pi.Num, index) case model.PartitionTypeRange: // Table has only one partition and has the maximum value if len(pi.Definitions) == 1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) { return nil } // For range expression and range columns if len(pi.Columns) == 0 { sql, paramList = buildCheckSQLForRangeExprPartition(pi, index, schemaName, tableName) } else { sql, paramList = buildCheckSQLForRangeColumnsPartition(pi, index, schemaName, tableName) } case model.PartitionTypeList: if len(pi.Columns) == 0 { sql, paramList = buildCheckSQLForListPartition(pi, index, schemaName, tableName) } else { sql, paramList = buildCheckSQLForListColumnsPartition(pi, index, schemaName, tableName) } default: return dbterror.ErrUnsupportedPartitionType.GenWithStackByArgs(pt.Name.O) } var ctx sessionctx.Context ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } defer w.sessPool.Put(ctx) rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(w.ctx, nil, sql, paramList...) if err != nil { return errors.Trace(err) } rowCount := len(rows) if rowCount != 0 { return errors.Trace(dbterror.ErrRowDoesNotMatchPartition) } return nil } func checkExchangePartitionPlacementPolicy(t *meta.Meta, ntPPRef, ptPPRef, partPPRef *model.PolicyRefInfo) error { partitionPPRef := partPPRef if partitionPPRef == nil { partitionPPRef = ptPPRef } if ntPPRef == nil && partitionPPRef == nil { return nil } if ntPPRef == nil || partitionPPRef == nil { return dbterror.ErrTablesDifferentMetadata } ptPlacementPolicyInfo, _ := getPolicyInfo(t, partitionPPRef.ID) ntPlacementPolicyInfo, _ := getPolicyInfo(t, ntPPRef.ID) if ntPlacementPolicyInfo == nil && ptPlacementPolicyInfo == nil { return nil } if ntPlacementPolicyInfo == nil || ptPlacementPolicyInfo == nil { return dbterror.ErrTablesDifferentMetadata } if ntPlacementPolicyInfo.Name.L != ptPlacementPolicyInfo.Name.L { return dbterror.ErrTablesDifferentMetadata } return nil } func buildCheckSQLForRangeExprPartition(pi *model.PartitionInfo, index int, schemaName, tableName model.CIStr) (string, []interface{}) { var buf strings.Builder paramList := make([]interface{}, 0, 4) // Since the pi.Expr string may contain the identifier, which couldn't be escaped in our ParseWithParams(...) // So we write it to the origin sql string here. if index == 0 { buf.WriteString("select 1 from %n.%n where ") buf.WriteString(pi.Expr) buf.WriteString(" >= %? limit 1") paramList = append(paramList, schemaName.L, tableName.L, trimQuotation(pi.Definitions[index].LessThan[0])) return buf.String(), paramList } else if index == len(pi.Definitions)-1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) { buf.WriteString("select 1 from %n.%n where ") buf.WriteString(pi.Expr) buf.WriteString(" < %? limit 1") paramList = append(paramList, schemaName.L, tableName.L, trimQuotation(pi.Definitions[index-1].LessThan[0])) return buf.String(), paramList } else { buf.WriteString("select 1 from %n.%n where ") buf.WriteString(pi.Expr) buf.WriteString(" < %? or ") buf.WriteString(pi.Expr) buf.WriteString(" >= %? limit 1") paramList = append(paramList, schemaName.L, tableName.L, trimQuotation(pi.Definitions[index-1].LessThan[0]), trimQuotation(pi.Definitions[index].LessThan[0])) return buf.String(), paramList } } func trimQuotation(str string) string { return strings.Trim(str, "'") } func buildCheckSQLForRangeColumnsPartition(pi *model.PartitionInfo, index int, schemaName, tableName model.CIStr) (string, []interface{}) { paramList := make([]interface{}, 0, 6) colName := pi.Columns[0].L if index == 0 { paramList = append(paramList, schemaName.L, tableName.L, colName, trimQuotation(pi.Definitions[index].LessThan[0])) return "select 1 from %n.%n where %n >= %? limit 1", paramList } else if index == len(pi.Definitions)-1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) { paramList = append(paramList, schemaName.L, tableName.L, colName, trimQuotation(pi.Definitions[index-1].LessThan[0])) return "select 1 from %n.%n where %n < %? limit 1", paramList } else { paramList = append(paramList, schemaName.L, tableName.L, colName, trimQuotation(pi.Definitions[index-1].LessThan[0]), colName, trimQuotation(pi.Definitions[index].LessThan[0])) return "select 1 from %n.%n where %n < %? or %n >= %? limit 1", paramList } } func buildCheckSQLForListPartition(pi *model.PartitionInfo, index int, schemaName, tableName model.CIStr) (string, []interface{}) { var buf strings.Builder buf.WriteString("select 1 from %n.%n where ") buf.WriteString(pi.Expr) buf.WriteString(" not in (%?) limit 1") inValues := getInValues(pi, index) paramList := make([]interface{}, 0, 3) paramList = append(paramList, schemaName.L, tableName.L, inValues) return buf.String(), paramList } func buildCheckSQLForListColumnsPartition(pi *model.PartitionInfo, index int, schemaName, tableName model.CIStr) (string, []interface{}) { colName := pi.Columns[0].L var buf strings.Builder buf.WriteString("select 1 from %n.%n where %n not in (%?) limit 1") inValues := getInValues(pi, index) paramList := make([]interface{}, 0, 4) paramList = append(paramList, schemaName.L, tableName.L, colName, inValues) return buf.String(), paramList } func getInValues(pi *model.PartitionInfo, index int) []string { inValues := make([]string, 0, len(pi.Definitions[index].InValues)) for _, inValue := range pi.Definitions[index].InValues { inValues = append(inValues, inValue...) } return inValues } func checkAddPartitionTooManyPartitions(piDefs uint64) error { if piDefs > uint64(mysql.PartitionCountLimit) { return errors.Trace(dbterror.ErrTooManyPartitions) } return nil } func checkAddPartitionOnTemporaryMode(tbInfo *model.TableInfo) error { if tbInfo.Partition != nil && tbInfo.TempTableType != model.TempTableNone { return dbterror.ErrPartitionNoTemporary } return nil } func checkPartitionColumnsUnique(tbInfo *model.TableInfo) error { if len(tbInfo.Partition.Columns) <= 1 { return nil } var columnsMap = make(map[string]struct{}) for _, col := range tbInfo.Partition.Columns { if _, ok := columnsMap[col.L]; ok { return dbterror.ErrSameNamePartitionField.GenWithStackByArgs(col.L) } columnsMap[col.L] = struct{}{} } return nil } func checkNoHashPartitions(_ sessionctx.Context, partitionNum uint64) error { if partitionNum == 0 { return ast.ErrNoParts.GenWithStackByArgs("partitions") } return nil } func getPartitionIDs(table *model.TableInfo) []int64 { if table.GetPartitionInfo() == nil { return []int64{} } physicalTableIDs := make([]int64, 0, len(table.Partition.Definitions)) for _, def := range table.Partition.Definitions { physicalTableIDs = append(physicalTableIDs, def.ID) } return physicalTableIDs } func getPartitionRuleIDs(dbName string, table *model.TableInfo) []string { if table.GetPartitionInfo() == nil { return []string{} } partRuleIDs := make([]string, 0, len(table.Partition.Definitions)) for _, def := range table.Partition.Definitions { partRuleIDs = append(partRuleIDs, fmt.Sprintf(label.PartitionIDFormat, label.IDPrefix, dbName, table.Name.L, def.Name.L)) } return partRuleIDs } // checkPartitioningKeysConstraints checks that the range partitioning key is included in the table constraint. func checkPartitioningKeysConstraints(sctx sessionctx.Context, s *ast.CreateTableStmt, tblInfo *model.TableInfo) error { // Returns directly if there are no unique keys in the table. if len(tblInfo.Indices) == 0 && !tblInfo.PKIsHandle { return nil } partCols, err := getPartitionColSlices(sctx, tblInfo, s.Partition) if err != nil { return errors.Trace(err) } // Checks that the partitioning key is included in the constraint. // Every unique key on the table must use every column in the table's partitioning expression. // See https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations-partitioning-keys-unique-keys.html for _, index := range tblInfo.Indices { if index.Unique && !checkUniqueKeyIncludePartKey(partCols, index.Columns) { if index.Primary { // not support global index with clustered index if tblInfo.IsCommonHandle { return dbterror.ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("CLUSTERED INDEX") } if !config.GetGlobalConfig().EnableGlobalIndex { return dbterror.ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("PRIMARY KEY") } } if !config.GetGlobalConfig().EnableGlobalIndex { return dbterror.ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("UNIQUE INDEX") } } } // when PKIsHandle, tblInfo.Indices will not contain the primary key. if tblInfo.PKIsHandle { indexCols := []*model.IndexColumn{{ Name: tblInfo.GetPkName(), Length: types.UnspecifiedLength, }} if !checkUniqueKeyIncludePartKey(partCols, indexCols) { return dbterror.ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("CLUSTERED INDEX") } } return nil } func checkPartitionKeysConstraint(pi *model.PartitionInfo, indexColumns []*model.IndexColumn, tblInfo *model.TableInfo) (bool, error) { var ( partCols []*model.ColumnInfo err error ) // The expr will be an empty string if the partition is defined by: // CREATE TABLE t (...) PARTITION BY RANGE COLUMNS(...) if partExpr := pi.Expr; partExpr != "" { // Parse partitioning key, extract the column names in the partitioning key to slice. partCols, err = extractPartitionColumns(partExpr, tblInfo) if err != nil { return false, err } } else { partCols = make([]*model.ColumnInfo, 0, len(pi.Columns)) for _, col := range pi.Columns { colInfo := tblInfo.FindPublicColumnByName(col.L) if colInfo == nil { return false, infoschema.ErrColumnNotExists.GenWithStackByArgs(col, tblInfo.Name) } partCols = append(partCols, colInfo) } } // In MySQL, every unique key on the table must use every column in the table's partitioning expression.(This // also includes the table's primary key.) // In TiDB, global index will be built when this constraint is not satisfied and EnableGlobalIndex is set. // See https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations-partitioning-keys-unique-keys.html return checkUniqueKeyIncludePartKey(columnInfoSlice(partCols), indexColumns), nil } type columnNameExtractor struct { extractedColumns []*model.ColumnInfo tblInfo *model.TableInfo err error } func (*columnNameExtractor) Enter(node ast.Node) (ast.Node, bool) { return node, false } func (cne *columnNameExtractor) Leave(node ast.Node) (ast.Node, bool) { if c, ok := node.(*ast.ColumnNameExpr); ok { info := findColumnByName(c.Name.Name.L, cne.tblInfo) if info != nil { cne.extractedColumns = append(cne.extractedColumns, info) return node, true } cne.err = dbterror.ErrBadField.GenWithStackByArgs(c.Name.Name.O, "expression") return nil, false } return node, true } func findColumnByName(colName string, tblInfo *model.TableInfo) *model.ColumnInfo { if tblInfo == nil { return nil } for _, info := range tblInfo.Columns { if info.Name.L == colName { return info } } return nil } func extractPartitionColumns(partExpr string, tblInfo *model.TableInfo) ([]*model.ColumnInfo, error) { partExpr = "select " + partExpr stmts, _, err := parser.New().ParseSQL(partExpr) if err != nil { return nil, errors.Trace(err) } extractor := &columnNameExtractor{ tblInfo: tblInfo, extractedColumns: make([]*model.ColumnInfo, 0), } stmts[0].Accept(extractor) if extractor.err != nil { return nil, errors.Trace(extractor.err) } return extractor.extractedColumns, nil } // stringSlice is defined for checkUniqueKeyIncludePartKey. // if Go supports covariance, the code shouldn't be so complex. type stringSlice interface { Len() int At(i int) string } // checkUniqueKeyIncludePartKey checks that the partitioning key is included in the constraint. func checkUniqueKeyIncludePartKey(partCols stringSlice, idxCols []*model.IndexColumn) bool { for i := 0; i < partCols.Len(); i++ { partCol := partCols.At(i) _, idxCol := model.FindIndexColumnByName(idxCols, partCol) if idxCol == nil { // Partition column is not found in the index columns. return false } if idxCol.Length > 0 { // The partition column is found in the index columns, but the index column is a prefix index return false } } return true } // columnInfoSlice implements the stringSlice interface. type columnInfoSlice []*model.ColumnInfo func (cis columnInfoSlice) Len() int { return len(cis) } func (cis columnInfoSlice) At(i int) string { return cis[i].Name.L } // columnNameSlice implements the stringSlice interface. type columnNameSlice []*ast.ColumnName func (cns columnNameSlice) Len() int { return len(cns) } func (cns columnNameSlice) At(i int) string { return cns[i].Name.L } func isPartExprUnsigned(tbInfo *model.TableInfo) bool { // We should not rely on any configuration, system or session variables, so use a mock ctx! // Same as in tables.newPartitionExpr ctx := mock.NewContext() expr, err := expression.ParseSimpleExprWithTableInfo(ctx, tbInfo.Partition.Expr, tbInfo) if err != nil { logutil.BgLogger().Error("isPartExpr failed parsing expression!", zap.Error(err)) return false } if mysql.HasUnsignedFlag(expr.GetType().GetFlag()) { return true } return false } // truncateTableByReassignPartitionIDs reassigns new partition ids. func truncateTableByReassignPartitionIDs(t *meta.Meta, tblInfo *model.TableInfo, pids []int64) (err error) { if len(pids) < len(tblInfo.Partition.Definitions) { // To make it compatible with older versions when pids was not given // and if there has been any add/reorganize partition increasing the number of partitions morePids, err := t.GenGlobalIDs(len(tblInfo.Partition.Definitions) - len(pids)) if err != nil { return errors.Trace(err) } pids = append(pids, morePids...) } newDefs := make([]model.PartitionDefinition, 0, len(tblInfo.Partition.Definitions)) for i, def := range tblInfo.Partition.Definitions { newDef := def newDef.ID = pids[i] newDefs = append(newDefs, newDef) } tblInfo.Partition.Definitions = newDefs return nil } type partitionExprProcessor func(sessionctx.Context, *model.TableInfo, ast.ExprNode) error type partitionExprChecker struct { processors []partitionExprProcessor ctx sessionctx.Context tbInfo *model.TableInfo err error columns []*model.ColumnInfo } func newPartitionExprChecker(ctx sessionctx.Context, tbInfo *model.TableInfo, processor ...partitionExprProcessor) *partitionExprChecker { p := &partitionExprChecker{processors: processor, ctx: ctx, tbInfo: tbInfo} p.processors = append(p.processors, p.extractColumns) return p } func (p *partitionExprChecker) Enter(n ast.Node) (node ast.Node, skipChildren bool) { expr, ok := n.(ast.ExprNode) if !ok { return n, true } for _, processor := range p.processors { if err := processor(p.ctx, p.tbInfo, expr); err != nil { p.err = err return n, true } } return n, false } func (p *partitionExprChecker) Leave(n ast.Node) (node ast.Node, ok bool) { return n, p.err == nil } func (p *partitionExprChecker) extractColumns(_ sessionctx.Context, _ *model.TableInfo, expr ast.ExprNode) error { columnNameExpr, ok := expr.(*ast.ColumnNameExpr) if !ok { return nil } colInfo := findColumnByName(columnNameExpr.Name.Name.L, p.tbInfo) if colInfo == nil { return errors.Trace(dbterror.ErrBadField.GenWithStackByArgs(columnNameExpr.Name.Name.L, "partition function")) } p.columns = append(p.columns, colInfo) return nil } func checkPartitionExprAllowed(_ sessionctx.Context, tb *model.TableInfo, e ast.ExprNode) error { switch v := e.(type) { case *ast.FuncCallExpr: if _, ok := expression.AllowedPartitionFuncMap[v.FnName.L]; ok { return nil } case *ast.BinaryOperationExpr: if _, ok := expression.AllowedPartition4BinaryOpMap[v.Op]; ok { return errors.Trace(checkNoTimestampArgs(tb, v.L, v.R)) } case *ast.UnaryOperationExpr: if _, ok := expression.AllowedPartition4UnaryOpMap[v.Op]; ok { return errors.Trace(checkNoTimestampArgs(tb, v.V)) } case *ast.ColumnNameExpr, *ast.ParenthesesExpr, *driver.ValueExpr, *ast.MaxValueExpr, *ast.DefaultExpr, *ast.TimeUnitExpr: return nil } return errors.Trace(dbterror.ErrPartitionFunctionIsNotAllowed) } func checkPartitionExprArgs(_ sessionctx.Context, tblInfo *model.TableInfo, e ast.ExprNode) error { expr, ok := e.(*ast.FuncCallExpr) if !ok { return nil } argsType, err := collectArgsType(tblInfo, expr.Args...) if err != nil { return errors.Trace(err) } switch expr.FnName.L { case ast.ToDays, ast.ToSeconds, ast.DayOfMonth, ast.Month, ast.DayOfYear, ast.Quarter, ast.YearWeek, ast.Year, ast.Weekday, ast.DayOfWeek, ast.Day: return errors.Trace(checkResultOK(hasDateArgs(argsType...))) case ast.Hour, ast.Minute, ast.Second, ast.TimeToSec, ast.MicroSecond: return errors.Trace(checkResultOK(hasTimeArgs(argsType...))) case ast.UnixTimestamp: return errors.Trace(checkResultOK(hasTimestampArgs(argsType...))) case ast.FromDays: return errors.Trace(checkResultOK(hasDateArgs(argsType...) || hasTimeArgs(argsType...))) case ast.Extract: switch expr.Args[0].(*ast.TimeUnitExpr).Unit { case ast.TimeUnitYear, ast.TimeUnitYearMonth, ast.TimeUnitQuarter, ast.TimeUnitMonth, ast.TimeUnitDay: return errors.Trace(checkResultOK(hasDateArgs(argsType...))) case ast.TimeUnitDayMicrosecond, ast.TimeUnitDayHour, ast.TimeUnitDayMinute, ast.TimeUnitDaySecond: return errors.Trace(checkResultOK(hasDatetimeArgs(argsType...))) case ast.TimeUnitHour, ast.TimeUnitHourMinute, ast.TimeUnitHourSecond, ast.TimeUnitMinute, ast.TimeUnitMinuteSecond, ast.TimeUnitSecond, ast.TimeUnitMicrosecond, ast.TimeUnitHourMicrosecond, ast.TimeUnitMinuteMicrosecond, ast.TimeUnitSecondMicrosecond: return errors.Trace(checkResultOK(hasTimeArgs(argsType...))) default: return errors.Trace(dbterror.ErrWrongExprInPartitionFunc) } case ast.DateDiff: return errors.Trace(checkResultOK(slice.AllOf(argsType, func(i int) bool { return hasDateArgs(argsType[i]) }))) case ast.Abs, ast.Ceiling, ast.Floor, ast.Mod: has := hasTimestampArgs(argsType...) if has { return errors.Trace(dbterror.ErrWrongExprInPartitionFunc) } } return nil } func collectArgsType(tblInfo *model.TableInfo, exprs ...ast.ExprNode) ([]byte, error) { ts := make([]byte, 0, len(exprs)) for _, arg := range exprs { col, ok := arg.(*ast.ColumnNameExpr) if !ok { continue } columnInfo := findColumnByName(col.Name.Name.L, tblInfo) if columnInfo == nil { return nil, errors.Trace(dbterror.ErrBadField.GenWithStackByArgs(col.Name.Name.L, "partition function")) } ts = append(ts, columnInfo.GetType()) } return ts, nil } func hasDateArgs(argsType ...byte) bool { return slice.AnyOf(argsType, func(i int) bool { return argsType[i] == mysql.TypeDate || argsType[i] == mysql.TypeDatetime }) } func hasTimeArgs(argsType ...byte) bool { return slice.AnyOf(argsType, func(i int) bool { return argsType[i] == mysql.TypeDuration || argsType[i] == mysql.TypeDatetime }) } func hasTimestampArgs(argsType ...byte) bool { return slice.AnyOf(argsType, func(i int) bool { return argsType[i] == mysql.TypeTimestamp }) } func hasDatetimeArgs(argsType ...byte) bool { return slice.AnyOf(argsType, func(i int) bool { return argsType[i] == mysql.TypeDatetime }) } func checkNoTimestampArgs(tbInfo *model.TableInfo, exprs ...ast.ExprNode) error { argsType, err := collectArgsType(tbInfo, exprs...) if err != nil { return err } if hasTimestampArgs(argsType...) { return errors.Trace(dbterror.ErrWrongExprInPartitionFunc) } return nil } // hexIfNonPrint checks if printable UTF-8 characters from a single quoted string, // if so, just returns the string // else returns a hex string of the binary string (i.e. actual encoding, not unicode code points!) func hexIfNonPrint(s string) string { isPrint := true // https://go.dev/blog/strings `for range` of string converts to runes! for _, runeVal := range s { if !strconv.IsPrint(runeVal) { isPrint = false break } } if isPrint { return s } // To avoid 'simple' MySQL accepted escape characters, to be showed as hex, just escape them // \0 \b \n \r \t \Z, see https://dev.mysql.com/doc/refman/8.0/en/string-literals.html isPrint = true res := "" for _, runeVal := range s { switch runeVal { case 0: // Null res += `\0` case 7: // Bell res += `\b` case '\t': // 9 res += `\t` case '\n': // 10 res += `\n` case '\r': // 13 res += `\r` case 26: // ctrl-z / Substitute res += `\Z` default: if !strconv.IsPrint(runeVal) { isPrint = false break } res += string(runeVal) } } if isPrint { return res } // Not possible to create an easy interpreted MySQL string, return as hex string // Can be converted to string in MySQL like: CAST(UNHEX('<hex string>') AS CHAR(255)) return "0x" + hex.EncodeToString([]byte(driver.UnwrapFromSingleQuotes(s))) } func writeColumnListToBuffer(partitionInfo *model.PartitionInfo, sqlMode mysql.SQLMode, buf *bytes.Buffer) { for i, col := range partitionInfo.Columns { buf.WriteString(stringutil.Escape(col.O, sqlMode)) if i < len(partitionInfo.Columns)-1 { buf.WriteString(",") } } } // AppendPartitionInfo is used in SHOW CREATE TABLE as well as generation the SQL syntax // for the PartitionInfo during validation of various DDL commands func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode) { if partitionInfo == nil { return } // Since MySQL 5.1/5.5 is very old and TiDB aims for 5.7/8.0 compatibility, we will not // include the /*!50100 or /*!50500 comments for TiDB. // This also solves the issue with comments within comments that would happen for // PLACEMENT POLICY options. defaultPartitionDefinitions := true if partitionInfo.Type == model.PartitionTypeHash || partitionInfo.Type == model.PartitionTypeKey { for i, def := range partitionInfo.Definitions { if def.Name.O != fmt.Sprintf("p%d", i) { defaultPartitionDefinitions = false break } if len(def.Comment) > 0 || def.PlacementPolicyRef != nil { defaultPartitionDefinitions = false break } } if defaultPartitionDefinitions { if partitionInfo.Type == model.PartitionTypeHash { fmt.Fprintf(buf, "\nPARTITION BY HASH (%s) PARTITIONS %d", partitionInfo.Expr, partitionInfo.Num) } else { buf.WriteString("\nPARTITION BY KEY (") writeColumnListToBuffer(partitionInfo, sqlMode, buf) buf.WriteString(")") fmt.Fprintf(buf, " PARTITIONS %d", partitionInfo.Num) } return } } // this if statement takes care of lists/range/key columns case if len(partitionInfo.Columns) > 0 { // partitionInfo.Type == model.PartitionTypeRange || partitionInfo.Type == model.PartitionTypeList // || partitionInfo.Type == model.PartitionTypeKey // Notice that MySQL uses two spaces between LIST and COLUMNS... if partitionInfo.Type == model.PartitionTypeKey { fmt.Fprintf(buf, "\nPARTITION BY %s (", partitionInfo.Type.String()) } else { fmt.Fprintf(buf, "\nPARTITION BY %s COLUMNS(", partitionInfo.Type.String()) } writeColumnListToBuffer(partitionInfo, sqlMode, buf) buf.WriteString(")\n(") } else { fmt.Fprintf(buf, "\nPARTITION BY %s (%s)\n(", partitionInfo.Type.String(), partitionInfo.Expr) } AppendPartitionDefs(partitionInfo, buf, sqlMode) buf.WriteString(")") } // AppendPartitionDefs generates a list of partition definitions needed for SHOW CREATE TABLE (in executor/show.go) // as well as needed for generating the ADD PARTITION query for INTERVAL partitioning of ALTER TABLE t LAST PARTITION // and generating the CREATE TABLE query from CREATE TABLE ... INTERVAL func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode) { for i, def := range partitionInfo.Definitions { if i > 0 { fmt.Fprintf(buf, ",\n ") } fmt.Fprintf(buf, "PARTITION %s", stringutil.Escape(def.Name.O, sqlMode)) // PartitionTypeHash and PartitionTypeKey do not have any VALUES definition if partitionInfo.Type == model.PartitionTypeRange { lessThans := make([]string, len(def.LessThan)) for idx, v := range def.LessThan { lessThans[idx] = hexIfNonPrint(v) } fmt.Fprintf(buf, " VALUES LESS THAN (%s)", strings.Join(lessThans, ",")) } else if partitionInfo.Type == model.PartitionTypeList { if len(def.InValues) == 0 { fmt.Fprintf(buf, " DEFAULT") } else if len(def.InValues) == 1 && len(def.InValues[0]) == 1 && strings.EqualFold(def.InValues[0][0], "DEFAULT") { fmt.Fprintf(buf, " DEFAULT") } else { values := bytes.NewBuffer(nil) for j, inValues := range def.InValues { if j > 0 { values.WriteString(",") } if len(inValues) > 1 { values.WriteString("(") tmpVals := make([]string, len(inValues)) for idx, v := range inValues { tmpVals[idx] = hexIfNonPrint(v) } values.WriteString(strings.Join(tmpVals, ",")) values.WriteString(")") } else if len(inValues) == 1 { values.WriteString(hexIfNonPrint(inValues[0])) } } fmt.Fprintf(buf, " VALUES IN (%s)", values.String()) } } if len(def.Comment) > 0 { fmt.Fprintf(buf, " COMMENT '%s'", format.OutputFormat(def.Comment)) } if def.PlacementPolicyRef != nil { // add placement ref info here fmt.Fprintf(buf, " /*T![placement] PLACEMENT POLICY=%s */", stringutil.Escape(def.PlacementPolicyRef.Name.O, sqlMode)) } } }
package testutils import ( "net/http" "github.com/selectel/go-selvpcclient/selvpcclient" "github.com/selectel/go-selvpcclient/selvpcclient/resell" ) // NewTestResellV2Client prepares a client for the Resell V2 API tests. func (testEnv *TestEnv) NewTestResellV2Client() { apiVersion := "v2" resellClient := &selvpcclient.ServiceClient{ HTTPClient: &http.Client{}, Endpoint: testEnv.Server.URL + "/resell/" + apiVersion, TokenID: FakeTokenID, UserAgent: resell.UserAgent, } testEnv.Client = resellClient }
package mybytes import ( "bytes" "fmt" "testing" ) func TestBytesFirst(t *testing.T) { var buffer1 bytes.Buffer contents := "Simple byte buffer for marshaling data." fmt.Printf("Write contents %q...\n", contents) buffer1.WriteString(contents) fmt.Printf("The length of buffer:%d\n", buffer1.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println() p1 := make([]byte, 7) n, _ := buffer1.Read(p1) fmt.Printf("%d bytes were read.(call Read)\n", n) fmt.Printf("The length of buffer:%d\n", buffer1.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) } func TestBytesSecond(t *testing.T) { var contents string buffer1 := bytes.NewBufferString(contents) fmt.Printf("The length of new buffer with contents %q:%d\n", contents, buffer1.Len()) fmt.Printf("The capacity of new buffer with contents %q:%d\n", contents, buffer1.Cap()) fmt.Println() contents = "123456" fmt.Printf("Writing contests %q ...\n", contents) buffer1.WriteString(contents) fmt.Printf("The length of buffer:%d\n", buffer1.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println() contents = "78" fmt.Printf("Writing contests %q ...\n", contents) buffer1.WriteString(contents) fmt.Printf("The length of buffer:%d\n", buffer1.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println() contents = "89" fmt.Printf("Writing contests %q ...\n", contents) buffer1.WriteString(contents) fmt.Printf("The length of buffer:%d\n", buffer1.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println() contents = "abcdefghijk" buffer2 := bytes.NewBufferString(contents) fmt.Printf("The length of buffer:%d\n", buffer2.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer2.Cap()) fmt.Println() n := 10 fmt.Printf("Grow the buffer with %d ...\n", n) buffer2.Grow(n) fmt.Printf("The length of buffer:%d\n", buffer2.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer2.Cap()) fmt.Println() var buffer3 bytes.Buffer fmt.Printf("The length of buffer:%d\n", buffer3.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer3.Cap()) fmt.Println() contents = "xyz" fmt.Printf("write contes %q ...\n", contents) buffer3.WriteString(contents) fmt.Printf("The length of buffer:%d\n", buffer3.Len()) fmt.Printf("The capacity of buffer:%d\n", buffer3.Cap()) fmt.Println() } func TestBytesThird(t *testing.T) { contents := "ab" buffer1 := bytes.NewBufferString(contents) fmt.Printf("The capacity of new buffer with contents %q:%d\n", contents, buffer1.Cap()) fmt.Println() unreadbytes := buffer1.Bytes() fmt.Printf("The unread bytes of the buffer:%v\n", unreadbytes) fmt.Println() contents = "cdefg" fmt.Printf("Write contents %q ...\n", contents) buffer1.WriteString(contents) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println("---------------------------------------------") unreadbytes = unreadbytes[:cap(unreadbytes)] fmt.Printf("The unread bytes of the buffer:%v\n", unreadbytes) fmt.Println(buffer1.String()) value:=byte('X') fmt.Printf("Set a byte in the unread bytes to %v ...\n", value) unreadbytes[len(unreadbytes)-2] = value fmt.Printf("The unread bytes of the buffer:%v\n", buffer1.Bytes()) fmt.Println() fmt.Println("---------------------------------------------") contents = "hijklmn" fmt.Printf("Write contents %q ...\n", contents) buffer1.WriteString(contents) fmt.Printf("The unread bytes of the buffer:%d\n", buffer1.Cap()) fmt.Println() unreadbytes = unreadbytes[:cap(unreadbytes)] fmt.Printf("The unread bytes of the buffer:%v\n", unreadbytes) fmt.Println(buffer1.String()) fmt.Print("\n\n") } func TestBytesNext(t*testing.T){ contents :="12" buffer1 := bytes.NewBufferString(contents) fmt.Printf("The capacity of new buffer with contents %q: %d\n", contents,buffer1.Cap()) fmt.Println() nextBytes := buffer1.Next(2) fmt.Printf("The next bytes of the buffer: %v\n",nextBytes) fmt.Println() contents = "34567" fmt.Printf("Write contents %q ...\n", contents) buffer1.WriteString(contents) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println() nextBytes = nextBytes[:cap(nextBytes)] fmt.Printf("The next bytes of the buffer:%v\n",nextBytes) fmt.Println() value := byte('X') fmt.Printf("The next bytes of the buffer:%v ...\n",value) nextBytes[len(nextBytes)-2]=value fmt.Printf("The unread bytes of the buffer:%v\n", buffer1.Bytes()) fmt.Println() contents = "89101112" fmt.Printf("Writing contents %q ...\n",contents) buffer1.WriteString(contents) fmt.Printf("The capacity of buffer:%d\n", buffer1.Cap()) fmt.Println() nextBytes = nextBytes[:cap(nextBytes)] fmt.Printf("The next bytes of the buffer:%v\n", nextBytes) }
package generator import ( "fmt" "strings" ) type action struct { IamPrefix string Name string Endpoint string Signing string HasApi bool HasAction bool } type actions []*action func (s actions) get(iamPrefix, name string) *action { for _, a := range s { if a.IamPrefix == iamPrefix && strings.ToLower(a.Name) == strings.ToLower(name) { return a } } return nil } func (s actions) groupBy(grouper func(act *action) string) map[string]actions { output := map[string]actions{} for _, act := range s { key := grouper(act) output[key] = append(output[key], act) } return output } func (s *actions) Len() int { return len(*s) } func (s *actions) Swap(i, j int) { (*s)[i], (*s)[j] = (*s)[j], (*s)[i] } func (s *actions) Less(i, j int) bool { fi := fmt.Sprintf("%s:%s", (*s)[i].IamPrefix, (*s)[i].Name) fj := fmt.Sprintf("%s:%s", (*s)[j].IamPrefix, (*s)[j].Name) return fi < fj }
/* Description In many applications very large integers numbers are required. Some of these applications are using keys for secure transmission of data, encryption, etc. In this problem you are given a number, you have to determine the number of digits in the factorial of the number. Input Input consists of several lines of integer numbers. The first line contains an integer n, which is the number of cases to be tested, followed by n lines, one integer 1 <= m <= 10^7 on each line. Output The output contains the number of digits in the factorial of the integers appearing in the input. Sample Input 2 10 20 Sample Output 7 19 Source Dhaka 2002 */ package main import ( "math" ) func main() { assert(digfact(1) == 1) assert(digfact(10) == 7) assert(digfact(20) == 19) } func assert(x bool) { if !x { panic("assertion failed") } } // https://oeis.org/A034886 func digfact(n uint64) uint64 { if n < 2 { return 1 } x := float64(n) a := math.Log(2*math.Pi*x) / (2 * math.Ln10) b := x * math.Log(x/math.E) / math.Ln10 r := math.Floor(a+b) + 1 return uint64(r) }
package jobs import ( "docktor/server/storage" log "github.com/sirupsen/logrus" ) // CheckDaemonsStatuses updates the status of each daemon in the db func CheckDaemonsStatuses() { log.Info("Checking daemons status...") dock, err := storage.Get() if err != nil { log.WithFields(log.Fields{ "error": err, }).Error("Error when connecting to the db") return } defer dock.Close() ds, err := dock.Daemons().FindAll() if err != nil { log.WithFields(log.Fields{ "error": err, }).Error("Error when retrieving daemons") return } for _, d := range ds { d.SetDockerStatus() // log.Debugf("Daemon status - %s : %s", d.Name, d.Docker.Status) dock.Daemons().Save(d) } log.Info("Daemons status check done.") }
package service import ( "audit-gateway/middleware" "audit-gateway/model" "fmt" "log" "net/http" "os" ) var sessionCookieName = os.Getenv("SessionCookieName") // UserLoginService 管理用户登录的服务 type UserLoginService struct { UserName string `form:"user_name" json:"user_name" binding:"required,min=5,max=30"` Password string `form:"password" json:"password" binding:"required,min=8,max=40"` } func Login(w http.ResponseWriter, r *http.Request) { session, err := middleware.SessionStore.New(r, sessionCookieName) // 登录验证 name := r.FormValue("username") pass := r.FormValue("password") var user model.User if err := model.DB.Where("user_name = ?", name).First(&user).Error; err != nil { log.Fatal("账号或密码错误", nil) fmt.Fprintln(w, "账号或密码错误") return } if user.CheckPassword(pass) == false { log.Fatal("账号或密码错误", nil) fmt.Fprintln(w, "账号或密码错误") return } tokenString := middleware.CreateToken(name) // 在session中标记用户已经通过登录验证 session.Values["authenticated"] = true session.Values["Authorization"] = tokenString session.Values["user"] = name err = session.Save(r, w) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return } fmt.Fprintln(w, "登录成功!", err) } func GetCurrentUser(w http.ResponseWriter, r *http.Request) { session, _ := middleware.SessionStore.Get(r, sessionCookieName) auth := session.Values["authenticated"] if auth == true { user := session.Values["user"] fmt.Fprintf(w, "%s", user) } else { fmt.Fprint(w, "未登录认证") } } func Logout(w http.ResponseWriter, r *http.Request) { session, _ := middleware.SessionStore.Get(r, sessionCookieName) session.Values["authenticated"] = false session.Values["Authorization"] = "" session.Save(r, w) } func Secret(w http.ResponseWriter, r *http.Request) { session, _ := middleware.SessionStore.Get(r, sessionCookieName) if auth, ok := session.Values["authenticated"].(bool); !ok || !auth { http.Error(w, "Forbidden", http.StatusForbidden) return } fmt.Fprintln(w, "已经登录了") }
package main import ( "fmt" "log" "math" "sort" "strings" "sync" ) // Combination is a struct containing details of a combination type Combination struct { Sequence []string Moves []string NoMoves []string Dist float64 Paths int64 } func main() { log.Println("2x4 Lego Construction") startPos := "30_30_30_0" log.Println("Creating Maps") moves := moveMap() // spew.Dump(moves.Load("30_30_30_0")) nomoves := noMoveMap() // spew.Dump(nomoves.Load("30_30_30_0")) cents := centres() // ##################### // Creating the initial Combination var start Combination tmp, ok := moves.Load("30_30_30_0") if !ok { panic("Error: Centre key not found") } move, ok := tmp.([]string) if !ok { panic("Error in typing") } tmp, ok = nomoves.Load(startPos) if !ok { panic("Error: Centre key not found") } nomove, ok := tmp.([]string) if !ok { panic("Error in typing") } // spew.Dump(move) //spew.Dump(nomove) start.Sequence = append(start.Sequence, "30_30_30_0") start.Moves = append(start.Moves, move...) start.NoMoves = append(start.NoMoves, nomove...) start.Dist = 0. start.Paths = 1 // spew.Dump(start) startKey := fmt.Sprintf("01_%v_%v_%v", len(start.Moves), start.Dist, 0) // Add the first combination to the sync map var cMap sync.Map cMap.Store(startKey, start) fmt.Printf("n,D,V,M,Rc,Rf,Pmin,Pmax,Pmean,G\n") var D float64 var interfaces float64 interfaces = 184. var interfacesUsed float64 D = 1. interfacesUsed = 0. // ################# // Now run through the different combinations for i := 2; i <= 6; i++ { // initialise some vars currentKeyStartsWith := fmt.Sprintf("%02d", i) previousKeyStartsWith := fmt.Sprintf("%02d", i-1) var wg sync.WaitGroup // For each in the combination sync.Map cMap.Range(func(k, v interface{}) bool { // If the prefix is of a sequence from the previous combination set, increment a brick if strings.HasPrefix(k.(string), previousKeyStartsWith) { // fmt.Printf("Key: %v, Paths to Seq: %v \n", k.(string), v.(Combination).Paths) // fmt.Printf("Key: %v\n", k.(string)) wg.Add(1) go incrementBrick(v.(Combination), &cMap, &moves, &nomoves, &cents, &wg) } return true }) wg.Wait() // ############### // Report // Report on the previous combination while incrementing the next // fmt.Printf("\n%02d brick combinations\n", i) var V float64 var M float64 Rc := 0.0 var P []float64 cMap.Range(func(k, v interface{}) bool { if strings.HasPrefix(k.(string), currentKeyStartsWith) { // spew.Dump(v.(Combination).Sequence) M++ V += float64(v.(Combination).Paths) P = append(P, float64(v.(Combination).Paths)) } return true }) // D //fmt.Printf("Available Interfaces: %f\n", (interfaces*float64(i-1) - interfacesUsed)) D = D * (interfaces*float64(i-1) - interfacesUsed) interfacesUsed += 2 // Freedom Rf := (M / V) - (1. / V) Rf = Rf / (1. - (1. / V)) Pmin, Pmax := minMax(P) Pmean := V / float64(len(P)) Amin := (math.Pow(M, 2) + (V - M)) / 2. q, r := divmod(int64(V), int64(M)) Amax := M * (((float64(q) * (M + 1)) / 2) + float64(r)) A := calculateA(P) G := (A - Amin) / (Amax - Amin) // Print line as a csv line fmt.Printf("%d,%e,%e,%e,%e,%e,%e,%e,%e,%e\n", i, D, V, M, Rc, Rf, Pmin, Pmax, Pmean, G) } } //################################# func calculateA(P []float64) (A float64) { sort.Float64s(P) var cum float64 for _, val := range P { cum += val A += cum } return } func minMax(array []float64) (min float64, max float64) { min = array[0] max = array[0] for _, value := range array { if max < value { max = value } if min > value { min = value } } return } func divmod(numerator, denominator int64) (quotient, remainder int64) { quotient = numerator / denominator // integer division, decimals are truncated remainder = numerator % denominator return }
package main import "fmt" func main() { f := funk fmt.Println(f()) } func funk() string { return "Funky!" }
package main import ( "errors" "math/rand" "time" "github.com/nsf/termbox-go" ) //directions the snake head is going const ( LEFT = iota UP RIGHT DOWN ) type point struct { x int y int } type snake struct { length int direction int } type board struct { width int height int snake point apple point snakeParts []point } //Create an apple at a random position func (bPtr *board) newApple() { time := time.Now() rand.Seed(time.UnixNano()) b := bPtr b.apple.x = rand.Intn(b.width) b.apple.y = rand.Intn(b.height) //check if the Apple will spawn inside of the snake, otherwise retry for _, v := range b.snakeParts { if v == b.apple { b.newApple() } } //draw apple termbox.SetCell(b.apple.x, b.apple.y, ' ', termbox.ColorDefault, termbox.ColorRed) } func moveSnake(sPtr *snake, bPtr *board) error { //error is nil var err error snake := sPtr board := bPtr //set direction switch snake.direction { case LEFT: board.snake.x-- case UP: board.snake.y-- case RIGHT: board.snake.x++ case DOWN: board.snake.y++ } //if the snake collides with itself return with an error for _, v := range board.snakeParts { if v == board.snake { err = errors.New("Snake collided with itself") } } board.snakeParts = append(board.snakeParts, board.snake) //do not remove the last part of the snake and increase the length if board.apple == board.snake { board.newApple() snake.length++ //remove the last part of the snake } else if len(board.snakeParts) > 3 { termbox.SetCell(board.snakeParts[0].x, board.snakeParts[0].y, ' ', termbox.ColorDefault, termbox.ColorDefault) board.snakeParts = board.snakeParts[1:] } //Draw Snake starting position termbox.SetCell(board.snake.x, board.snake.y, ' ', termbox.ColorDefault, termbox.ColorWhite) return err } func main() { //Create a window to draw in err := termbox.Init() if err != nil { panic(err) } //Close window at end of main defer termbox.Close() //Set up the snake snake := new(snake) snake.length = 4 snake.direction = RIGHT //Set up the board board := new(board) board.width, board.height = termbox.Size() board.width-- board.height-- //Spawn Snake board.snake.x = board.width / 2 board.snake.y = board.height / 2 for i := 0; i < snake.length; i++ { moveSnake(snake, board) } //Spawn an Apple board.newApple() //key presses event_queue := make(chan termbox.Event) go func() { for { event_queue <- termbox.PollEvent() } }() //collect frame draw times frameDrawTimer := make(chan time.Time) go func() { for { frameDrawTimer <- <-time.After(time.Second / 15) } }() for board.snake.x <= board.width && board.snake.x >= 0 && board.snake.y <= board.height && board.snake.y >= 0 { //gameError := make(chan error) select { //If an arrow key has been pressed move the direction to the arrow key case event := <-event_queue: switch event.Type { case termbox.EventKey: switch event.Key { case termbox.KeyArrowDown: snake.direction = DOWN case termbox.KeyArrowLeft: snake.direction = LEFT case termbox.KeyArrowRight: snake.direction = RIGHT case termbox.KeyArrowUp: snake.direction = UP } } //After a frame time is done move and draw case <-frameDrawTimer: err := moveSnake(snake, board) termbox.Flush() if err != nil { board.snake.x = -1 board.snake.y = -1 //gameError <- err } //case err := <-gameError: // fmt.Print(err) } } termbox.SetCell(0+board.width/2, board.height/2, 'G', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(1+board.width/2, board.height/2, 'A', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(2+board.width/2, board.height/2, 'M', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(3+board.width/2, board.height/2, 'E', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(4+board.width/2, board.height/2, ' ', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(5+board.width/2, board.height/2, 'O', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(6+board.width/2, board.height/2, 'V', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(7+board.width/2, board.height/2, 'E', termbox.ColorWhite, termbox.ColorRed) termbox.SetCell(8+board.width/2, board.height/2, 'R', termbox.ColorWhite, termbox.ColorRed) termbox.Flush() //rip game inputLoop: for { event := <-event_queue switch event.Type { case termbox.EventKey: if event.Key == termbox.KeyEsc { break inputLoop } } } }
package main import ( "fmt" ) func min(x int, y int) int { if x < y { return x } else { return y } } func LevelSearch(matrix [][]int) int { levels := len(matrix) for level := levels - 1; level > 0; level-- { for item := 0; item < len(matrix[level])-1; item++ { matrix[level-1][item] += min(matrix[level][item], matrix[level][item+1]) } } return matrix[0][0] } func main() { fmt.Println("Welcome to the playground!") matrix := [][]int{ {5}, {7, 8}, {2, 3, 4}, {4, 9, 6, 1}, {2, 7, 9, 4, 5}} fmt.Println("Search the shortest path via level, get result:", LevelSearch(matrix)) } // Welcome to the playground! // Debug level: 4 item: 0 // Debug level: 4 item: 1 // Debug level: 4 item: 2 // Debug level: 4 item: 3 // Debug level: 3 item: 0 // Debug level: 3 item: 1 // Debug level: 3 item: 2 // Debug level: 2 item: 0 // Debug level: 2 item: 1 // Debug level: 1 item: 0 // Search the shortest path via level, get result: 20
type animal []string type bird []string func makeAnimal (newAnimal string) { animal(newAnimal) }
package main import ( "fmt" "io/ioutil" "net/url" "path/filepath" "strings" "github.com/BurntSushi/toml" ) func ReadConfig(filename string) (*Config, error) { c := new(Config) _, err := toml.DecodeFile(filename, c) if err != nil { return nil, err } return c, nil } func ReadSites(dir string) ([]*Site, error) { files, err := ioutil.ReadDir(dir) if err != nil { return nil, err } sites := make([]*Site, 0, len(files)) for _, info := range files { site, err := readSite(filepath.Join(dir, info.Name())) if err == errNotSite { continue } if err != nil { return nil, err } sites = append(sites, site) } return sites, err } var errNotSite = fmt.Errorf("not a site") func readSite(filename string) (*Site, error) { base := filepath.Base(filename) if base == "README" { return nil, errNotSite } if strings.HasPrefix(base, ".") { return nil, errNotSite } site := new(Site) _, err := toml.DecodeFile(filename, site) if err != nil { return nil, err } site.Name = filepath.Base(filename) return site, nil } type Config struct { Bind string `toml:"bind"` } type Site struct { Name string `toml:"-" json:"name"` Bind string `toml:"bind" json:"bind"` URL string `toml:"url" json:"url"` Description string `toml:"description" json:"description"` } func BindURL(s *Site) (*url.URL, error) { u, err := url.Parse(s.Bind) if err != nil { return nil, err } if !strings.HasPrefix(u.Path, "/") { return nil, fmt.Errorf("relative bind") } if strings.HasSuffix(u.Path, "/") { return nil, fmt.Errorf("rooted bind") } return u, nil }
package main import "fmt" func main() { fmt.Println(maxVowels("abciiidef", 3)) fmt.Println(maxVowels("aeiou", 2)) } func maxVowels(s string, k int) int { target := map[byte]bool{ 'a': true, 'e': true, 'i': true, 'o': true, 'u': true, } left, right := 0, 0 mx := 0 win := 0 for right < len(s) { for right-left < k { if target[s[right]] { win++ } right++ } if win > mx { mx = win } if target[s[left]] { win-- } left++ } return mx }
/* Copyright (c) 2015 Eric Knapik, All Rights Reserved Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // This file is about taking a slice of TaggedWord objects // and determining if they contain a copyright notice // there are a few tie overs of this into the tagger.go for // optimizations but mostly they could be removed and the tagger.go // can be a stand alone package tagger import ( "strings" ) // tri structure defined in main tagger.go //type Tri struct { // state int // word string // pos string //} const ( START int = iota // start state LPAREN // left parenthesis CCHAR // Character C RPAREN // right parenthesis NP // propper noun COMMA // the comma symbol CD // any number DASH // dash - IN // preposition DT // determiner ACCEPT // accept state REJECT // reject state SYM // symbol tag OTHER // anything else CSYM // C symbol LPARENC // left parenthesis after copyright INTERM // Intermidiate state ) // Given a string this will return whethe a copyright notice // of that string if it exists, if not the empty string is returned // The string must be tagged and propperly delimited // else will just tag // USING SHIFTING WINDOW STRATEGY func (copyrightTagger *Tagger) Match(inBytes []byte) bool { var curByte = 0 var lastCheckedByte = 0 var currWords int // the amount of words in the notice var taggedSent []TaggedWord if len(inBytes) < 15 { return false } for lastCheckedByte < len(inBytes) { // this is shifting the window based on a period followed by space for curByte < len(inBytes) { if inBytes[curByte] == byte('.') { if curByte+1 < len(inBytes) && inBytes[curByte+1] == byte(' ') { curByte++ break } } curByte++ } currWords = 0 // create array of tagged words taggedSent = copyrightTagger.TagBytes(inBytes[lastCheckedByte:curByte]) currentState := REJECT var potentialNotice []TaggedWord = make([]TaggedWord, 0) var extractedNotice []TaggedWord = make([]TaggedWord, 0) for _, taggedWord := range taggedSent { // Is what I have good enough to add to the extracted Notices if currentState == ACCEPT { currWords = 0 extractedNotice = append(extractedNotice, potentialNotice...) potentialNotice = nil } // Transition to the next state given current 'input' if strings.ToLower(taggedWord.word) == "copyright" || strings.ToLower(taggedWord.word) == "c" { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, strings.ToLower(taggedWord.word), taggedWord.tag}] } else if strings.Contains(taggedWord.word, "©") { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "©", "sym"}] } else if strings.Contains(copyrightTagger.CopyrightSyms, taggedWord.tag) { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "X", taggedWord.tag}] } else { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "X", "X"}] } // Because of multiple notices right after the other here's a check... if currentState == START || currentState == LPAREN || currentState == CSYM { currWords = 1 if len(potentialNotice) > 3 { // Does it seem like something useful has been captured extractedNotice = append(extractedNotice, potentialNotice...) potentialNotice = nil potentialNotice = append(potentialNotice, taggedWord) } else { potentialNotice = nil potentialNotice = append(potentialNotice, taggedWord) } } else if currentState != REJECT && currentState != ACCEPT { currWords += 1 potentialNotice = append(potentialNotice, taggedWord) } else if currentState == REJECT { currWords = 0 potentialNotice = nil } // This is for optimization where all I care about yes or no if currWords > 3 { return true } } // Do a final check to see if I might have a notice as the very last part of the string // Be a little more vauge here to be safe if currentState == ACCEPT || currentState == CD || currentState == NP { extractedNotice = append(extractedNotice, potentialNotice...) } // Return the extracted notice or the not found notice as a string if len(extractedNotice) > 3 { return true } lastCheckedByte = curByte } return false // no copyright notice detected } // creates the DFA and symbol "array" needed to test the transitions // for when a copyright notice can be found or noticed func mkNoticeDFA() (string, map[Tri]int) { symbols := "(,),cd,np,dt,in,--,.,sym,cc" dfa := make(map[Tri]int) // possible copyright symbols // ©, â©, Å© dfa[Tri{START, "copyright", "nn"}] = START dfa[Tri{LPAREN, "copyright", "nn"}] = START dfa[Tri{CCHAR, "copyright", "nn"}] = START dfa[Tri{RPAREN, "copyright", "nn"}] = START dfa[Tri{NP, "copyright", "nn"}] = START dfa[Tri{COMMA, "copyright", "nn"}] = START dfa[Tri{CD, "copyright", "nn"}] = START dfa[Tri{DASH, "copyright", "nn"}] = START dfa[Tri{IN, "copyright", "nn"}] = START dfa[Tri{DT, "copyright", "nn"}] = START dfa[Tri{ACCEPT, "copyright", "nn"}] = START dfa[Tri{REJECT, "copyright", "nn"}] = START dfa[Tri{SYM, "copyright", "nn"}] = START dfa[Tri{OTHER, "copyright", "nn"}] = START dfa[Tri{CSYM, "copyright", "nn"}] = START dfa[Tri{LPARENC, "copyright", "nn"}] = START dfa[Tri{START, "c", "nn"}] = REJECT dfa[Tri{LPAREN, "c", "nn"}] = CCHAR dfa[Tri{CCHAR, "c", "nn"}] = REJECT dfa[Tri{RPAREN, "c", "nn"}] = REJECT dfa[Tri{NP, "c", "nn"}] = REJECT dfa[Tri{COMMA, "c", "nn"}] = REJECT dfa[Tri{CD, "c", "nn"}] = REJECT dfa[Tri{DASH, "c", "nn"}] = REJECT dfa[Tri{IN, "c", "nn"}] = REJECT dfa[Tri{DT, "c", "nn"}] = REJECT dfa[Tri{ACCEPT, "c", "nn"}] = REJECT dfa[Tri{REJECT, "c", "nn"}] = REJECT dfa[Tri{SYM, "c", "nn"}] = REJECT dfa[Tri{OTHER, "c", "nn"}] = REJECT dfa[Tri{CSYM, "c", "nn"}] = REJECT dfa[Tri{LPARENC, "c", "nn"}] = CCHAR dfa[Tri{START, "©", "sym"}] = CSYM dfa[Tri{LPAREN, "©", "sym"}] = CSYM dfa[Tri{CCHAR, "©", "sym"}] = CSYM dfa[Tri{RPAREN, "©", "sym"}] = CSYM dfa[Tri{NP, "©", "sym"}] = CSYM dfa[Tri{COMMA, "©", "sym"}] = CSYM dfa[Tri{CD, "©", "sym"}] = CSYM dfa[Tri{DASH, "©", "sym"}] = CSYM dfa[Tri{IN, "©", "sym"}] = CSYM dfa[Tri{DT, "©", "sym"}] = CSYM dfa[Tri{ACCEPT, "©", "sym"}] = ACCEPT dfa[Tri{REJECT, "©", "sym"}] = CSYM dfa[Tri{SYM, "©", "sym"}] = CSYM dfa[Tri{OTHER, "©", "sym"}] = CSYM dfa[Tri{CSYM, "©", "sym"}] = CSYM dfa[Tri{LPARENC, "©", "sym"}] = CSYM dfa[Tri{START, "X", "("}] = LPARENC dfa[Tri{LPAREN, "X", "("}] = REJECT dfa[Tri{CCHAR, "X", "("}] = REJECT dfa[Tri{RPAREN, "X", "("}] = REJECT dfa[Tri{NP, "X", "("}] = REJECT dfa[Tri{COMMA, "X", "("}] = REJECT dfa[Tri{CD, "X", "("}] = REJECT dfa[Tri{DASH, "X", "("}] = REJECT dfa[Tri{IN, "X", "("}] = REJECT dfa[Tri{DT, "X", "("}] = REJECT dfa[Tri{ACCEPT, "X", "("}] = REJECT dfa[Tri{REJECT, "X", "("}] = LPAREN dfa[Tri{SYM, "X", "("}] = REJECT dfa[Tri{OTHER, "X", "("}] = REJECT dfa[Tri{CSYM, "X", "("}] = LPAREN dfa[Tri{LPARENC, "X", "("}] = REJECT dfa[Tri{START, "X", ")"}] = REJECT dfa[Tri{LPAREN, "X", ")"}] = REJECT dfa[Tri{CCHAR, "X", ")"}] = RPAREN dfa[Tri{RPAREN, "X", ")"}] = REJECT dfa[Tri{NP, "X", ")"}] = REJECT dfa[Tri{COMMA, "X", ")"}] = REJECT dfa[Tri{CD, "X", ")"}] = REJECT dfa[Tri{DASH, "X", ")"}] = REJECT dfa[Tri{IN, "X", ")"}] = REJECT dfa[Tri{DT, "X", ")"}] = REJECT dfa[Tri{ACCEPT, "X", ")"}] = REJECT dfa[Tri{REJECT, "X", ")"}] = REJECT dfa[Tri{SYM, "X", ")"}] = RPAREN dfa[Tri{OTHER, "X", ")"}] = REJECT dfa[Tri{CSYM, "X", ")"}] = RPAREN dfa[Tri{LPARENC, "X", ")"}] = REJECT dfa[Tri{START, "X", "cd"}] = CD dfa[Tri{LPAREN, "X", "cd"}] = REJECT dfa[Tri{CCHAR, "X", "cd"}] = REJECT dfa[Tri{RPAREN, "X", "cd"}] = CD dfa[Tri{NP, "X", "cd"}] = CD dfa[Tri{COMMA, "X", "cd"}] = CD dfa[Tri{CD, "X", "cd"}] = CD dfa[Tri{DASH, "X", "cd"}] = CD dfa[Tri{IN, "X", "cd"}] = REJECT dfa[Tri{DT, "X", "cd"}] = REJECT dfa[Tri{ACCEPT, "X", "cd"}] = ACCEPT dfa[Tri{REJECT, "X", "cd"}] = REJECT dfa[Tri{SYM, "X", "cd"}] = CD dfa[Tri{OTHER, "X", "cd"}] = CD dfa[Tri{CSYM, "X", "cd"}] = CD dfa[Tri{LPARENC, "X", "cd"}] = REJECT dfa[Tri{START, "X", "np"}] = NP dfa[Tri{LPAREN, "X", "np"}] = REJECT dfa[Tri{CCHAR, "X", "np"}] = REJECT dfa[Tri{RPAREN, "X", "np"}] = NP dfa[Tri{NP, "X", "np"}] = NP dfa[Tri{COMMA, "X", "np"}] = NP dfa[Tri{CD, "X", "np"}] = NP dfa[Tri{DASH, "X", "np"}] = NP dfa[Tri{IN, "X", "np"}] = NP dfa[Tri{DT, "X", "np"}] = NP dfa[Tri{ACCEPT, "X", "np"}] = ACCEPT dfa[Tri{REJECT, "X", "np"}] = REJECT dfa[Tri{SYM, "X", "np"}] = NP dfa[Tri{OTHER, "X", "np"}] = NP dfa[Tri{CSYM, "X", "np"}] = NP dfa[Tri{LPARENC, "X", "np"}] = REJECT dfa[Tri{START, "X", "dt"}] = DT dfa[Tri{LPAREN, "X", "dt"}] = REJECT dfa[Tri{CCHAR, "X", "dt"}] = REJECT dfa[Tri{RPAREN, "X", "dt"}] = DT dfa[Tri{NP, "X", "dt"}] = DT dfa[Tri{COMMA, "X", "dt"}] = DT dfa[Tri{CD, "X", "dt"}] = DT dfa[Tri{DASH, "X", "dt"}] = DT dfa[Tri{IN, "X", "dt"}] = DT dfa[Tri{DT, "X", "dt"}] = REJECT dfa[Tri{ACCEPT, "X", "dt"}] = REJECT dfa[Tri{REJECT, "X", "dt"}] = REJECT dfa[Tri{SYM, "X", "dt"}] = REJECT dfa[Tri{OTHER, "X", "dt"}] = DT dfa[Tri{CSYM, "X", "dt"}] = DT dfa[Tri{LPARENC, "X", "dt"}] = REJECT dfa[Tri{START, "X", "in"}] = REJECT dfa[Tri{LPAREN, "X", "in"}] = REJECT dfa[Tri{CCHAR, "X", "in"}] = REJECT dfa[Tri{RPAREN, "X", "in"}] = REJECT dfa[Tri{NP, "X", "in"}] = IN dfa[Tri{COMMA, "X", "in"}] = REJECT dfa[Tri{CD, "X", "in"}] = IN dfa[Tri{DASH, "X", "in"}] = REJECT dfa[Tri{IN, "X", "in"}] = REJECT dfa[Tri{DT, "X", "in"}] = REJECT dfa[Tri{ACCEPT, "X", "in"}] = REJECT dfa[Tri{REJECT, "X", "in"}] = REJECT dfa[Tri{SYM, "X", "in"}] = REJECT dfa[Tri{OTHER, "X", "in"}] = IN dfa[Tri{CSYM, "X", "in"}] = IN dfa[Tri{LPARENC, "X", "in"}] = REJECT dfa[Tri{START, "X", "--"}] = REJECT dfa[Tri{LPAREN, "X", "--"}] = REJECT dfa[Tri{CCHAR, "X", "--"}] = REJECT dfa[Tri{RPAREN, "X", "--"}] = REJECT dfa[Tri{NP, "X", "--"}] = DASH dfa[Tri{COMMA, "X", "--"}] = REJECT dfa[Tri{CD, "X", "--"}] = DASH dfa[Tri{DASH, "X", "--"}] = REJECT dfa[Tri{IN, "X", "--"}] = REJECT dfa[Tri{DT, "X", "--"}] = REJECT dfa[Tri{ACCEPT, "X", "--"}] = REJECT dfa[Tri{REJECT, "X", "--"}] = REJECT dfa[Tri{SYM, "X", "--"}] = DASH dfa[Tri{OTHER, "X", "--"}] = REJECT dfa[Tri{CSYM, "X", "--"}] = DASH dfa[Tri{LPARENC, "X", "--"}] = REJECT dfa[Tri{START, "X", ","}] = REJECT dfa[Tri{LPAREN, "X", ","}] = REJECT dfa[Tri{CCHAR, "X", ","}] = REJECT dfa[Tri{RPAREN, "X", ","}] = REJECT dfa[Tri{NP, "X", ","}] = COMMA dfa[Tri{COMMA, "X", ","}] = REJECT dfa[Tri{CD, "X", ","}] = COMMA dfa[Tri{DASH, "X", ","}] = REJECT dfa[Tri{IN, "X", ","}] = REJECT dfa[Tri{DT, "X", ","}] = REJECT dfa[Tri{ACCEPT, "X", ","}] = REJECT dfa[Tri{REJECT, "X", ","}] = REJECT dfa[Tri{SYM, "X", ","}] = REJECT dfa[Tri{OTHER, "X", ","}] = REJECT dfa[Tri{CSYM, "X", ","}] = COMMA dfa[Tri{LPARENC, "X", ","}] = REJECT dfa[Tri{START, "X", "."}] = REJECT dfa[Tri{LPAREN, "X", "."}] = REJECT dfa[Tri{CCHAR, "X", "."}] = REJECT dfa[Tri{RPAREN, "X", "."}] = REJECT dfa[Tri{NP, "X", "."}] = ACCEPT dfa[Tri{COMMA, "X", "."}] = REJECT dfa[Tri{CD, "X", "."}] = ACCEPT dfa[Tri{DASH, "X", "."}] = REJECT dfa[Tri{IN, "X", "."}] = REJECT dfa[Tri{DT, "X", "."}] = REJECT dfa[Tri{ACCEPT, "X", "."}] = REJECT dfa[Tri{REJECT, "X", "."}] = REJECT dfa[Tri{SYM, "X", "."}] = REJECT dfa[Tri{OTHER, "X", "."}] = REJECT dfa[Tri{CSYM, "X", "."}] = REJECT dfa[Tri{LPARENC, "X", "."}] = REJECT dfa[Tri{START, "X", "sym"}] = SYM dfa[Tri{LPAREN, "X", "sym"}] = REJECT dfa[Tri{CCHAR, "X", "sym"}] = REJECT dfa[Tri{RPAREN, "X", "sym"}] = REJECT dfa[Tri{NP, "X", "sym"}] = SYM dfa[Tri{COMMA, "X", "sym"}] = REJECT dfa[Tri{CD, "X", "sym"}] = SYM dfa[Tri{DASH, "X", "sym"}] = REJECT dfa[Tri{IN, "X", "sym"}] = REJECT dfa[Tri{DT, "X", "sym"}] = REJECT dfa[Tri{ACCEPT, "X", "sym"}] = ACCEPT dfa[Tri{REJECT, "X", "sym"}] = REJECT dfa[Tri{SYM, "X", "sym"}] = REJECT dfa[Tri{OTHER, "X", "sym"}] = REJECT dfa[Tri{CSYM, "X", "sym"}] = SYM dfa[Tri{LPARENC, "X", "sym"}] = REJECT dfa[Tri{START, "X", "cc"}] = REJECT dfa[Tri{LPAREN, "X", "cc"}] = REJECT dfa[Tri{CCHAR, "X", "cc"}] = REJECT dfa[Tri{RPAREN, "X", "cc"}] = REJECT dfa[Tri{NP, "X", "cc"}] = OTHER dfa[Tri{COMMA, "X", "cc"}] = REJECT dfa[Tri{CD, "X", "cc"}] = OTHER dfa[Tri{DASH, "X", "cc"}] = REJECT dfa[Tri{IN, "X", "cc"}] = OTHER dfa[Tri{DT, "X", "cc"}] = OTHER dfa[Tri{ACCEPT, "X", "cc"}] = REJECT dfa[Tri{REJECT, "X", "cc"}] = REJECT dfa[Tri{SYM, "X", "cc"}] = REJECT dfa[Tri{OTHER, "X", "cc"}] = REJECT dfa[Tri{CSYM, "X", "cc"}] = OTHER dfa[Tri{LPARENC, "X", "cc"}] = REJECT dfa[Tri{START, "X", "X"}] = REJECT dfa[Tri{LPAREN, "X", "X"}] = REJECT dfa[Tri{CCHAR, "X", "X"}] = REJECT dfa[Tri{RPAREN, "X", "X"}] = REJECT dfa[Tri{NP, "X", "X"}] = ACCEPT dfa[Tri{COMMA, "X", "X"}] = REJECT dfa[Tri{CD, "X", "X"}] = ACCEPT dfa[Tri{DASH, "X", "X"}] = REJECT dfa[Tri{IN, "X", "X"}] = REJECT dfa[Tri{DT, "X", "X"}] = REJECT dfa[Tri{ACCEPT, "X", "X"}] = REJECT dfa[Tri{REJECT, "X", "X"}] = REJECT dfa[Tri{SYM, "X", "X"}] = REJECT dfa[Tri{OTHER, "X", "X"}] = REJECT dfa[Tri{CSYM, "X", "X"}] = REJECT dfa[Tri{LPARENC, "X", "X"}] = REJECT return symbols, dfa } // Given a string this will return the copyright notice // of that string if it exists, if not the empty string is returned // The string must be tagged and propperly delimited func (copyrightTagger *Tagger) Extract(inBytes []byte) string { // Before I can match for copyright notice I need the sentence tagged var taggedSent []TaggedWord taggedSent = copyrightTagger.TagBytes(inBytes) currentState := REJECT var potentialNotice []TaggedWord = make([]TaggedWord, 0) var extractedNotice []TaggedWord = make([]TaggedWord, 0) for _, taggedWord := range taggedSent { // Is what I have good enough to add to the extracted Notices if currentState == ACCEPT { extractedNotice = append(extractedNotice, potentialNotice...) potentialNotice = nil } // Transition to the next state given current 'input' if strings.ToLower(taggedWord.word) == "copyright" || strings.ToLower(taggedWord.word) == "c" { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, strings.ToLower(taggedWord.word), taggedWord.tag}] } else if strings.Contains(taggedWord.word, "©") { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "©", "sym"}] } else if strings.Contains(copyrightTagger.CopyrightSyms, taggedWord.tag) { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "X", taggedWord.tag}] } else { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "X", "X"}] } // Because of multiple notices right after the other here's a check... if currentState == START || currentState == LPAREN || currentState == CSYM { if len(potentialNotice) > 3 { // Does it seem like something useful has been captured extractedNotice = append(extractedNotice, potentialNotice...) potentialNotice = nil potentialNotice = append(potentialNotice, taggedWord) } else { potentialNotice = nil potentialNotice = append(potentialNotice, taggedWord) } } else if currentState != REJECT { // && currentState != ACCEPT potentialNotice = append(potentialNotice, taggedWord) } } // Do a final check to see if I might have a notice as the very last part of the string // Be a little more vauge here to be safe if currentState == ACCEPT || currentState == CD || currentState == NP || len(potentialNotice) > 3 { extractedNotice = append(extractedNotice, potentialNotice...) } if len(extractedNotice) < 1 { return "" } return toString(extractedNotice) } // similar to the regex findAllIndex, will return the byte offsets func (copyrightTagger *Tagger) FindAllIndex(inBytes []byte) [][]int { // Before I can match for copyright notice I need the sentence tagged var taggedSent []TaggedWord taggedSent = copyrightTagger.TagBytes(inBytes) //Return array of indicies var indicies = make([][]int, 0) currentState := REJECT var potentialNotice []TaggedWord = make([]TaggedWord, 0) for _, taggedWord := range taggedSent { // Is what I have good enough to add to the extracted Notices if currentState == ACCEPT { indicies = append(indicies, []int{potentialNotice[0].byteStart, taggedWord.byteStart}) potentialNotice = nil } // Transition to the next state given current 'input' if strings.ToLower(taggedWord.word) == "copyright" || strings.ToLower(taggedWord.word) == "c" { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, strings.ToLower(taggedWord.word), taggedWord.tag}] } else if strings.Contains(taggedWord.word, "©") { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "©", "sym"}] } else if strings.Contains(copyrightTagger.CopyrightSyms, taggedWord.tag) { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "X", taggedWord.tag}] } else { currentState = copyrightTagger.CopyrightDFA[Tri{currentState, "X", "X"}] } // Because of multiple notices right after the other here's a check... if currentState == START || currentState == LPAREN || currentState == CSYM { if len(potentialNotice) > 3 { // Does it seem like something useful has been captured indicies = append(indicies, []int{potentialNotice[0].byteStart, taggedWord.byteStart}) potentialNotice = nil potentialNotice = append(potentialNotice, taggedWord) } else { potentialNotice = nil potentialNotice = append(potentialNotice, taggedWord) } } else if currentState != REJECT { // && currentState != ACCEPT potentialNotice = append(potentialNotice, taggedWord) } } // Do a final check to see if I might have a notice as the very last part of the string // Be a little more vauge here to be safe if currentState == ACCEPT || currentState == CD || currentState == NP || len(potentialNotice) > 3 { indicies = append(indicies, []int{potentialNotice[0].byteStart, potentialNotice[len(potentialNotice)-1].byteStart}) } return indicies }
package main import ( "fmt" "time" "io/ioutil" "os" "os/exec" ) //var path = "~/go/src/github.com/gitders222/sanntid/exercises/Ex06/backup.txt" var path = "/backup.txt" var count int = 0 func main(){ backup := exec.Command("gnome-terminal", "-x", "sh", "-c", "go run backup.go") //CreateFile() go PrimaryProcess() for{ if !IsAlive(){ fmt.Println("DEAD") backup.Run() } } } func PrimaryProcess(){ c := make([]byte,1) for{ atime := time.Now() mtime := time.Now() time.Sleep(time.Second * 2) c[0]++ fmt.Println(c[0]) //send to backup err := ioutil.WriteFile("backup.txt", c, os.ModeDevice) CheckError(err) err1 := os.Chtimes("backup.txt", atime, mtime) CheckError(err1) } } func CreateFile() { // detect if file exists var _, err = os.Stat(path) // create file if not exists if os.IsNotExist(err) { var file, err = os.Create(path) CheckError(err) defer file.Close() } } func IsAlive()bool{ time.Sleep(time.Second * 2) dat, err := ioutil.ReadFile("backup.txt") fi, err := os.Stat("backup.txt") CheckError(err) _ = dat time := time.Now() timeFile := fi.ModTime fmt.Println(timeFile) fmt.Println(time) if timeFile > time{ return true }else{ return false } } func Exit(file *os.File){ defer os.Remove(file.Name()) } func CheckError(e error){ if e!= nil{ panic(e) } }
package labels import ( "fmt" "sync" "time" "github.com/square/p2/pkg/logging" "github.com/square/p2/pkg/store/consul/consulutil" "github.com/hashicorp/consul/api" "github.com/sirupsen/logrus" "k8s.io/kubernetes/pkg/labels" "github.com/rcrowley/go-metrics" ) // minimum required time between retrievals of label subtrees. var DefaultAggregationRate = 10 * time.Second type MetricsRegistry interface { Register(metricName string, metric interface{}) error } // selectorWatches represents a watched label selector and its result channels. type selectorWatches struct { selector labels.Selector // all accesses to this map are done while the aggregator's watcherLock // is held, so no additonal mutex is necessary watches map[selectorWatch]struct{} } type watchMap map[string]*selectorWatches func (w watchMap) len() int { total := 0 for _, watches := range w { total += len(watches.watches) } return total } type selectorWatch struct { resultCh chan []Labeled } type consulAggregator struct { logger logging.Logger labelType Type watcherLock sync.Mutex // watcherLock synchronizes access to labeledCache and watchers path string kv consulutil.ConsulLister watchers watchMap labeledCache []Labeled // cached contents of the label subtree aggregatorQuit chan struct{} aggregationRate time.Duration metReg MetricsRegistry // how many watchers are currently using this aggregator? metWatchCount metrics.Gauge // count how many watcher channels are full when a send is attempted metWatchSendMiss metrics.Gauge // how big is the cache of labels? metCacheSize metrics.Gauge } func NewConsulAggregator( labelType Type, kv consulutil.ConsulLister, logger logging.Logger, metReg MetricsRegistry, aggregationRate time.Duration, ) *consulAggregator { if aggregationRate == 0 { aggregationRate = DefaultAggregationRate } if metReg == nil { metReg = metrics.NewRegistry() } watchCount := metrics.NewGauge() watchSendMiss := metrics.NewGauge() cacheSize := metrics.NewGauge() _ = metReg.Register(fmt.Sprintf("%v_aggregate_watches", labelType.String()), watchCount) _ = metReg.Register(fmt.Sprintf("%v_aggregate_send_miss", labelType.String()), watchSendMiss) _ = metReg.Register(fmt.Sprintf("%v_aggregate_cache_size", labelType.String()), cacheSize) return &consulAggregator{ kv: kv, logger: logger, labelType: labelType, path: typePath(labelType), aggregatorQuit: make(chan struct{}), aggregationRate: aggregationRate, metReg: metReg, metWatchCount: watchCount, metWatchSendMiss: watchSendMiss, metCacheSize: cacheSize, watchers: make(map[string]*selectorWatches), } } // Add a new selector to the aggregator. New values on the output channel may not appear // right away. func (c *consulAggregator) Watch(selector labels.Selector, quitCh <-chan struct{}) chan []Labeled { resCh := make(chan []Labeled, 1) // this buffer is useful in sendMatches(), below select { case <-c.aggregatorQuit: c.logger.WithField("selector", selector.String()).Warnln("New selector added after aggregator was closed") close(resCh) return resCh default: } c.watcherLock.Lock() defer c.watcherLock.Unlock() watches, ok := c.watchers[selector.String()] if !ok { watches = &selectorWatches{ selector: selector, watches: make(map[selectorWatch]struct{}), } c.watchers[selector.String()] = watches } watch := selectorWatch{ resultCh: resCh, } watches.watches[watch] = struct{}{} if c.labeledCache != nil { // technically we could send the matches only to the new watcher, but // it simplifies the code to just send to all watchers of that // selector c.sendMatches(*watches) } go func() { select { case <-quitCh: case <-c.aggregatorQuit: } c.removeWatch(selector, watch) }() c.metWatchCount.Update(int64(c.watchers.len())) return watch.resultCh } func (c *consulAggregator) removeWatch(selector labels.Selector, watch selectorWatch) { c.watcherLock.Lock() defer c.watcherLock.Unlock() close(watch.resultCh) watches, ok := c.watchers[selector.String()] if !ok { // this would indicate a pretty bad bug if this happened, maybe even panic worthy c.logger.Errorf("couldn't find the removed watcher for selector %s", selector) } delete(watches.watches, watch) if len(watches.watches) == 0 { delete(c.watchers, selector.String()) } c.metWatchCount.Update(int64(c.watchers.len())) } func (c *consulAggregator) Quit() { close(c.aggregatorQuit) } // Aggregate does the labor of querying Consul for all labels under a given type, // applying each watcher's label selector to the results and sending those results on each // watcher's output channel respectively. // Aggregate will loop forever, constantly sending matches to each watcher // until Quit() has been invoked. func (c *consulAggregator) Aggregate(jitterWindow time.Duration) { outPairs := make(chan api.KVPairs) done := make(chan struct{}) outErrors := make(chan error) go consulutil.WatchPrefix(c.path+"/", c.kv, outPairs, done, outErrors, 0, jitterWindow) for { missedSends := 0 loopTime := time.After(c.aggregationRate) select { case err := <-outErrors: c.logger.WithError(err).Errorln("Error during watch") case <-c.aggregatorQuit: return case pairs := <-outPairs: if len(pairs) == 0 { // This protects us against spurious 404s from consul. It could // pose problems when the label type is something that might have // zero entries e.g. rolls, but for now there is no such use-case c.logger.WithError(NoLabelsFound).Errorf("No labels found for type %s", c.labelType) continue } c.watcherLock.Lock() // replace our current cache with the latest contents of the label tree. c.fillCache(pairs) // Iterate over each watcher and send the []Labeled // that match the watcher's selector to the watcher's out channel. var wg sync.WaitGroup missedSendsCh := make(chan struct{}) missedSendsProcessed := make(chan struct{}) go func() { defer close(missedSendsProcessed) for range missedSendsCh { missedSends++ } }() for _, watcher := range c.watchers { wg.Add(1) go func(watches selectorWatches) { defer wg.Done() sendResults := c.sendMatches(watches) for _, success := range sendResults { if !success { missedSendsCh <- struct{}{} } } }(*watcher) } wg.Wait() c.watcherLock.Unlock() close(missedSendsCh) <-missedSendsProcessed c.metWatchSendMiss.Update(int64(missedSends)) } select { case <-c.aggregatorQuit: return case <-loopTime: // we purposely don't case outErrors here, since loopTime lets us // back off of Consul watches. If an error repeatedly were occurring, // we could end up in a nasty busy loop. } } } func (c *consulAggregator) getCache() ([]Labeled, error) { c.watcherLock.Lock() defer c.watcherLock.Unlock() if len(c.labeledCache) == 0 { return nil, fmt.Errorf("No cache available") } return c.labeledCache, nil } func (c *consulAggregator) fillCache(pairs api.KVPairs) { cache := make([]Labeled, len(pairs)) for i, kvp := range pairs { labeled, err := convertKVPToLabeled(kvp) if err != nil { c.logger.WithErrorAndFields(err, logrus.Fields{ "key": kvp.Key, "value": string(kvp.Value), }).Errorln("Invalid key encountered, skipping this value") continue } cache[i] = labeled } c.labeledCache = cache c.metCacheSize.Update(int64(len(cache))) } // this must be called within the watcherLock mutex. func (c *consulAggregator) sendMatches(watches selectorWatches) []bool { matches := []Labeled{} for _, labeled := range c.labeledCache { if watches.selector.Matches(labeled.Labels) { matches = append(matches, labeled) } } // Fast, lossy result broadcasting. We treat clients as unreliable // tenants of the aggregator by performing the following: the resulting // channel is a buffered channel of size 1. When sending an update to // watchers, we first see if we can _read_ a value off the buffered result // channel. This removes any stale values that have yet to be read by // watchers. We then subsequently put the newer value into the channel. // // This approach has the effect of a slower watcher potentially missing updates, // but also means one watcher can't cause a DoS of other watchers as the main // aggregation goroutine waits. // first drain the previous (stale) cached value if present... var ret []bool for watcher, _ := range watches.watches { sendSuccess := true select { case <-watcher.resultCh: sendSuccess = false default: } // ... then send the newer value. select { case watcher.resultCh <- matches: default: } ret = append(ret, sendSuccess) } return ret } func selectorsEqual(sel1 labels.Selector, sel2 labels.Selector) bool { return sel1.String() == sel2.String() }
package main import "fmt" func calculosMatematicos(n1, n2 int) (suma, resta int) { suma = n1 + n2 resta = n1 - n2 return } func main() { _, resultadoResta := calculosMatematicos(10, 5) fmt.Println(resultadoResta) }
package argument_parsing /* TODO: stick on first matched block */ // state machine type machine = [][][]byte // state type state = [][]byte /* jumps to state according to case in context of current state [i]: <index_of_state> */ type jmp = []byte /* executes stuff according to case in context of current state [i]: 0 - skip 1 - execute */ type job = []byte /* jobs: [1] append result [2] inc bracket counter [3] dec bracket counter [4] append current match */ var stateMachine = machine{ /* start */ state{ jmp{0, 3, 2, 2, 0, 0, 1}, job{0, 0, 0, 0, 0, 0, 0}, job{0, 0, 1, 1, 0, 0, 0}, job{0, 0, 0, 0, 0, 0, 0}, job{0, 0, 0, 0, 0, 0, 1} }, // 0 /* regular text */ state{ jmp{1, 3, 2, 2, 1, 1, 1}, job{1, 1, 1, 1, 0, 0, 0}, job{0, 0, 1, 1, 0, 0, 0}, job{0, 0, 0, 0, 0, 0, 0}, job{0, 0, 0, 0, 1, 1, 1} }, // 1 /* text in open_close brackets */ state{ jmp{2, 2, 2, 2, 2, 1, 2}, job{1, 0, 0, 0, 0, 1, 0}, job{0, 0, 1, 1, 0, 0, 0}, job{0, 0, 0, 0, 1, 1, 0}, job{1, 1, 1, 1, 1, 0, 1} }, // 2 /* text in single block (quotes) */ state{ jmp{3, 1, 3, 3, 3, 3, 3}, job{0, 1, 0, 0, 0, 0, 0}, job{0, 0, 0, 0, 0, 0, 0}, job{0, 0, 0, 0, 0, 0, 0}, job{1, 0, 1, 1, 1, 1, 1} }, // 3 } /* cases are referring to index of jmp and job cells */ const caseSeparator = 0 const caseSingleBlock = 1 const caseOpeningBracket = 2 const caseOpenBracket = 3 const caseCloseBracket = 4 const caseLastClosure = 5 const caseDefault = 6 func Parse(content string, separators []rune, single []rune, brackets []rune) (result [][]rune) { var ( currentState byte cs int openBrackets, closeBrackets []rune bracketCounter int buf []rune cp int ) if len(brackets) % 2 != 0 { panic("expected even number of brackets (open and closed ones).") } for i := 0; i < len(brackets); i++ { if i % 2 == 0 { openBrackets = append(openBrackets, brackets[i]) } else { closeBrackets = append(closeBrackets, brackets[i]) } } for _, r := range content { switch { case containsRune(separators, r): cs = caseSeparator case containsRune(single, r): cs = caseSingleBlock case containsRune(openBrackets, r): switch bracketCounter { case 0: cs = caseOpeningBracket default: cs = caseOpenBracket } case containsRune(closeBrackets, r): switch bracketCounter { case 1: cs = caseLastClosure default: cs = caseCloseBracket } default: cs = caseDefault } state := stateMachine[currentState] if check(state[1][cs]) && cp != len(buf) { result = append(result, buf[cp:]) cp = len(buf) } if check(state[2][cs]) { bracketCounter++ } if check(state[3][cs]) { bracketCounter-- } if check(state[4][cs]) { buf = append(buf, r) } /* jump to next state */ currentState = stateMachine[currentState][0][cs] } if len(buf) > cp { result = append(result, buf[cp:]) } return } func check(b byte) bool { if b > 0 { return true } return false } func containsRune(slice []rune, r rune) bool { for i:=0; i < len(slice); i++ { if slice[i] == r { return true } } return false }
package medasync import ( "context" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "git.scc.kit.edu/sdm/lsdf-checksum/meda" ) type chunker struct { ChunkSize uint64 NextChunkQuery string DB *meda.DB BeginTx func(ctx context.Context, db *meda.DB) (*sqlx.Tx, error) ProcessChunk func(ctx context.Context, db *meda.DB, tx *sqlx.Tx, prevID, lastID uint64) error } func (c *chunker) Run(ctx context.Context) error { it := meda.ChunkIterator{ NextChunkQuery: c.NextChunkQuery, ChunkSize: c.ChunkSize, } tx, err := c.beginTx(ctx) if err != nil { return errors.Wrap(err, "(*chunker).Run: begin transaction") } for it.Next(ctx, tx) { err = c.ProcessChunk(ctx, c.DB, tx, it.PreviousID(), it.LastID()) if err != nil { _ = tx.Rollback() return errors.Wrap(err, "(*chunker).Run: ProcessChunk execution") } err = tx.Commit() if err != nil { return errors.Wrap(err, "(*chunker).Run: commit transaction") } tx, err = c.beginTx(ctx) if err != nil { return errors.Wrap(err, "(*chunker).Run: begin transaction") } } if err := it.Err(); err != nil { _ = tx.Rollback() return errors.Wrap(err, "(*chunker).Run: chunk reader") } if err := tx.Rollback(); err != nil { return errors.Wrap(err, "(*chunker).Run: rollback empty transaction") } return nil } func (c *chunker) beginTx(ctx context.Context) (*sqlx.Tx, error) { if c.BeginTx == nil { return c.DB.BeginTxx(ctx, nil) } return c.BeginTx(ctx, c.DB) }
package parser import ( "github.com/fr3fou/monkey/ast" "github.com/fr3fou/monkey/token" ) // parseStatement is a helper function that parses the current token // with the appropriate parsing function func (p *Parser) parseStatement() ast.Statement { switch p.tok.Type { case token.LET: return p.parseLetStatement() case token.RETURN: return p.parseReturnStatement() default: return p.parseExpressionStatement() } } // parseReturnStatement parses any return statment (return '5') func (p *Parser) parseReturnStatement() *ast.ReturnStatement { stmt := &ast.ReturnStatement{ Token: p.tok, } p.nextToken() stmt.ReturnValue = p.parseExpression(LOWEST) if p.nextTokIs(token.SEMICOLON) { p.nextToken() } return stmt } // parseLetStatement parses any let statment (let foo = 5) func (p *Parser) parseLetStatement() *ast.LetStatement { stmt := &ast.LetStatement{ Token: p.tok, } if !p.expectPeek(token.IDENT) { return nil } stmt.Name = &ast.Identifier{ Token: p.tok, Value: p.tok.Literal, } if !p.expectPeek(token.ASSIGN) { return nil } p.nextToken() stmt.Value = p.parseExpression(LOWEST) if p.nextTokIs(token.SEMICOLON) { p.nextToken() } return stmt } // parseExpressionStatement parses any expression statements (foobar;) func (p *Parser) parseExpressionStatement() *ast.ExpressionStatement { stmt := &ast.ExpressionStatement{ Token: p.tok, } stmt.Expression = p.parseExpression(LOWEST) if p.nextTokIs(token.SEMICOLON) { p.nextToken() } return stmt } // parseBlockStatement parses any block statement // { // let a = 5; // let b = 4; // } func (p *Parser) parseBlockStatement() *ast.BlockStatement { // create the block block := &ast.BlockStatement{ Token: p.tok, Statements: []ast.Statement{}, } p.nextToken() for !p.tokIs(token.RBRACE) && !p.tokIs(token.EOF) { stmt := p.parseStatement() if stmt != nil { block.Statements = append(block.Statements, stmt) } p.nextToken() } return block }
package helpers import ( "fmt" "log" "net/http" "os" "strings" "time" "github.com/brianvoe/sjwt" "github.com/golang-jwt/jwt" "github.com/labstack/echo/v4" ) type jwtClaim struct { UserId int `json:"user_id"` Role string `json:"role"` jwt.StandardClaims } func GenerateToken(userId int, role string) (string, error) { // token := jwt.New(jwt.SigningMethodHS256) // token.Claims["username"] = username // token.Claims["password"] = password // tokenString, _ := token.SignedString([]byte("secret")) // return tokenString // claims := jwt.MapClaims{} // claims["user_id"] = userId // claims["authorized"] = true // claims["exp"] = time.Now().Add(time.Hour * 24).Unix() claims := jwtClaim{userId, role, jwt.StandardClaims{ // ExpiresAt: jwt.TimeFunc().Add(time.Hour * 24).Unix(), ExpiresAt: time.Now().Local().Add(time.Hour * 2400).Unix(), }} token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) tokenString, _ := token.SignedString([]byte(os.Getenv("JWT_SECRET"))) return tokenString, nil } func ExtractClaims(tokenStr string) (jwtClaim, bool) { hmacSecretString := os.Getenv("JWT_SECRET") hmacSecret := []byte(hmacSecretString) token, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) { // check token signing method etc return hmacSecret, nil }) if err != nil { return jwtClaim{}, false } if claims, ok := token.Claims.(jwtClaim); ok && token.Valid { return claims, true } else { log.Printf("Invalid JWT Token") return jwtClaim{}, false } } func ExtractToken(tokenString string) (int, error) { token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) } return []byte(os.Getenv("JWT_SECRET")), nil }) if err != nil { return 0, err } if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { return claims["user_id"].(int), nil } return 0, nil } func ExtractTokenUserId(e echo.Context) (int, error) { user := e.Get("user").(*jwt.Token) if user.Valid { if claims, ok := user.Claims.(jwt.MapClaims); ok && user.Valid { return claims["user_id"].(int), nil } } return 0, nil } func SetTokenCookie(name, token string, expiration time.Time, c echo.Context) { cookie := new(http.Cookie) cookie.Name = name cookie.Value = token cookie.Expires = expiration cookie.Path = "/" // Http-only helps mitigate the risk of client side script accessing the protected cookie. cookie.HttpOnly = true c.SetCookie(cookie) } func AdminRoleValidation(next echo.HandlerFunc) echo.HandlerFunc { return func(e echo.Context) error { role, err := ExtractJWTPayloadRole(e) if err != nil { return echo.ErrUnauthorized } if role == "admin" { return next(e) } return echo.ErrUnauthorized } } func UserRoleValidation(next echo.HandlerFunc) echo.HandlerFunc { return func(e echo.Context) error { role, err := ExtractJWTPayloadRole(e) if err != nil { return echo.ErrUnauthorized } if role == "user" { return next(e) } return echo.ErrUnauthorized } } func ExtractJWTPayloadRole(c echo.Context) (string, error) { header := c.Request().Header.Clone().Get("Authorization") token := strings.Split(header, "Bearer ")[1] claims, err := sjwt.Parse(token) if err != nil { return "", err } return claims["role"].(string), nil } func ExtractJWTPayloadUserId(c echo.Context) (float64, error) { header := c.Request().Header.Clone().Get("Authorization") token := strings.Split(header, "Bearer ")[1] claims, _ := sjwt.Parse(token) userId := claims["user_id"].(float64) return userId, nil }
/* * Copyright (C) 2017-Present Pivotal Software, Inc. All rights reserved. * * This program and the accompanying materials are made available under * the terms of the under the Apache License, Version 2.0 (the "License”); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package serviceutil import ( plugin_models "code.cloudfoundry.org/cli/plugin/models" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "strings" "code.cloudfoundry.org/cli/plugin" "github.com/pivotal-cf/spring-cloud-services-cli-plugin/httpclient" ) type ManagementParameters struct { Url string `json:"-"` ServiceOfferingName string `json:"serviceOfferingName"` ServicePlanName string `json:"planName"` } type serviceDefinitionResp struct { Credentials struct { URI string } } //go:generate counterfeiter . ServiceInstanceResolver type ServiceInstanceResolver interface { GetServiceInstanceUrl(serviceInstanceName string, accessToken string) (string, error) GetManagementParameters(serviceInstanceName string, accessToken string, lifecycleOperation bool) (ManagementParameters, error) } type serviceInstanceUrlResolver struct { cliConnection plugin.CliConnection authClient httpclient.AuthenticatedClient } func NewServiceInstanceUrlResolver(cliConnection plugin.CliConnection, authClient httpclient.AuthenticatedClient) ServiceInstanceResolver { return &serviceInstanceUrlResolver{ cliConnection: cliConnection, authClient: authClient, } } func (s *serviceInstanceUrlResolver) GetServiceInstanceUrl(serviceInstanceName string, accessToken string) (string, error) { serviceModel, err := s.cliConnection.GetService(serviceInstanceName) if err != nil { return "", fmt.Errorf("Service instance not found: %s", err) } if isV2ServiceInstance(serviceModel) { return s.getV2ServiceInstanceUrl(serviceModel.DashboardUrl, accessToken) } else { return s.getV3ServiceInstanceUrl(serviceModel.DashboardUrl, accessToken) } } func (s *serviceInstanceUrlResolver) GetManagementParameters(serviceInstanceName string, accessToken string, lifecycleOperation bool) (ManagementParameters, error) { serviceModel, err := s.cliConnection.GetService(serviceInstanceName) if err != nil { return ManagementParameters{}, fmt.Errorf("Service instance not found: %s", err) } var managementUrl string if isV2ServiceInstance(serviceModel) { managementUrl, err = s.getV2ManagementUrl(serviceModel) } else { managementUrl, err = s.getV3ManagementUrl(serviceModel, lifecycleOperation) } if err != nil { return ManagementParameters{}, fmt.Errorf("unable to resolve management managementUrl: %s", err) } return ManagementParameters{ Url: managementUrl, ServiceOfferingName: serviceModel.ServiceOffering.Name, ServicePlanName: serviceModel.ServicePlan.Name, }, nil } func (s *serviceInstanceUrlResolver) getV2ServiceInstanceUrl(dashboardUrl string, accessToken string) (string, error) { parsedUrl, err := url.Parse(dashboardUrl) if err != nil { return "", err } path := parsedUrl.Path segments := strings.Split(path, "/") if len(segments) == 0 || (len(segments) == 1 && segments[0] == "") { return "", fmt.Errorf("path of %s has no segments", dashboardUrl) } guid := segments[len(segments)-1] parsedUrl.Path = "/cli/instance/" + guid bodyReader, statusCode, err := s.authClient.DoAuthenticatedGet(parsedUrl.String(), accessToken) //In the case of a 404, the most likely cause is that the CLI version is greater than the broker version. if statusCode == http.StatusNotFound { return "", errors.New("The /cli/instance endpoint could not be found.\n" + "This could be because the Spring Cloud Services broker version is too old.\n" + "Please ensure SCS is at least version 1.3.3.\n") } if err != nil { return "", fmt.Errorf("Invalid service definition response: %s", err) } body, err := ioutil.ReadAll(bodyReader) if err != nil { return "", fmt.Errorf("Cannot read service definition response body: %s", err) } var serviceDefinitionResp serviceDefinitionResp err = json.Unmarshal(body, &serviceDefinitionResp) if err != nil { return "", fmt.Errorf("JSON response failed to unmarshal: %s", string(body)) } if serviceDefinitionResp.Credentials.URI == "" { return "", fmt.Errorf("JSON response contained empty property 'credentials.url', response body: '%s'", string(body)) } return serviceDefinitionResp.Credentials.URI + "/", nil } func (s *serviceInstanceUrlResolver) getV3ServiceInstanceUrl(dashboardUrl string, accessToken string) (string, error) { parsedUrl, err := url.Parse(dashboardUrl) if err != nil { return "", err } parsedUrl.Path = "" return fmt.Sprintf("%s/", parsedUrl.String()), nil } func (s *serviceInstanceUrlResolver) getV2ManagementUrl(serviceModel plugin_models.GetService_Model) (string, error) { parsedUrl, err := url.Parse(serviceModel.DashboardUrl) if err != nil { return "", err } parsedUrl.Path = fmt.Sprintf("/cli/instances/%s", serviceModel.Guid) return parsedUrl.String(), nil } func (s *serviceInstanceUrlResolver) getV3ManagementUrl(serviceModel plugin_models.GetService_Model, serviceBrokerOperation bool) (string, error) { var parsedUrl *url.URL var err error if serviceBrokerOperation { serviceBrokerV3Url, err := s.getV3ServiceBrokerUrl() if err != nil { return "", err } parsedUrl, err = url.Parse(serviceBrokerV3Url) } else { parsedUrl, err = url.Parse(serviceModel.DashboardUrl) } if err != nil { return "", err } parsedUrl.Path = fmt.Sprintf("/cli/instances/%s", serviceModel.Guid) return parsedUrl.String(), nil } func (s *serviceInstanceUrlResolver) getV3ServiceBrokerUrl() (string, error) { apiUrl, err := s.cliConnection.ApiEndpoint() if err != nil { return "", err } posFirst := strings.Index(apiUrl, ".") systemDomain := apiUrl[posFirst+1:] serviceBrokerV3Url := "https://scs-service-broker." + systemDomain return serviceBrokerV3Url, nil } func isV2ServiceInstance(serviceModel plugin_models.GetService_Model) bool { return strings.HasPrefix(serviceModel.ServiceOffering.Name, "p-") }
package collector import ( "bytes" "os" "os/exec" "github.com/prometheus/common/log" "strings" ) func getGlusterBinary(glusterPath string) (string, error) { switch glusterPath { // NoDefine case "": out, err := exec.Command("which","gluster").Output() // Trim `out` with '\n' rout := strings.TrimSuffix(string(out), "\n") if err != nil { log.Fatal("Please Make sure Gluster installed correctly. Cannot find gluster binary.") return rout, err } return rout, err // Has Define default: // Check Exists _, err := PathExists(glusterPath) if err != nil { return "", err } return glusterPath, nil } } func execGlusterCommand(arg ...string) (*bytes.Buffer, error) { glusterCmd, getErr := getGlusterBinary("") if getErr != nil { log.Error(getErr) } stdoutBuffer := &bytes.Buffer{} argXML := append(arg, "--xml") glusterExec := exec.Command(glusterCmd, argXML...) glusterExec.Stdout = stdoutBuffer err := glusterExec.Run() if err != nil { log.Errorf("tried to execute %v and got error: %v", arg, err) return stdoutBuffer, err } return stdoutBuffer, nil } // ContainsVolume checks a slice if it contains an element func ContainsVolume(slice []string, element string) bool { for _, a := range slice { if a == element { return true } } return false } func PathExists(path string) (bool, error) { if _, err := os.Stat(path); os.IsNotExist(err) { return false, err } return true, nil }
package sqlite import ( "database/sql" "encoding/json" "fmt" "log" "strings" "sync" "github.com/aichaos/rivescript-go/sessions" _ "modernc.org/sqlite" ) var schema string = `PRAGMA journal_mode = WAL; PRAGMA synchronous = normal; PRAGMA foreign_keys = on; PRAGMA encoding = "UTF-8"; BEGIN TRANSACTION; CREATE TABLE IF NOT EXISTS "users" ( "id" INTEGER, "username" TEXT UNIQUE, "last_match" TEXT, PRIMARY KEY("id" AUTOINCREMENT) ); CREATE TABLE IF NOT EXISTS "user_variables" ( "id" INTEGER, "user_id" INTEGER NOT NULL, "key" TEXT NOT NULL, "value" TEXT, PRIMARY KEY("id" AUTOINCREMENT), UNIQUE("user_id", "key") ); CREATE TABLE IF NOT EXISTS "history" ( "id" INTEGER, "user_id" INTEGER NOT NULL, "input" TEXT NOT NULL, "reply" TEXT NOT NULL, "timestamp" INTEGER NOT NULL DEFAULT (CAST(strftime('%s', 'now') AS INTEGER)), PRIMARY KEY("id" AUTOINCREMENT) ); CREATE TABLE IF NOT EXISTS "frozen_user" ( "id" INTEGER, "user_id" INTEGER NOT NULL, "data" TEXT NOT NULL, PRIMARY KEY("id" AUTOINCREMENT) ); CREATE TABLE IF NOT EXISTS "local_storage" ( "id" INTEGER, "key" TEXT NOT NULL, "value" TEXT NOT NULL, PRIMARY KEY("id" AUTOINCREMENT) ); CREATE VIEW IF NOT EXISTS v_user_variables AS SELECT users.username AS username, user_variables.key, user_variables.value FROM users, user_variables WHERE users.id = user_variables.user_id; COMMIT;` type Client struct { lock sync.Mutex db *sql.DB } // New creates a new Client. func New(filename string) (*Client, error) { db, err := sql.Open("sqlite", filename) if err != nil { return nil, err } db.SetMaxOpenConns(0) _, err = db.Exec(schema) if err != nil { return nil, err } return &Client{ db: db, }, nil } func (s *Client) Close() error { return s.db.Close() } // init makes sure a username exists in the memory store. func (s *Client) Init(username string) *sessions.UserData { user, err := s.GetAny(username) if err != nil { func() { s.lock.Lock() defer s.lock.Unlock() tx, _ := s.db.Begin() stmt, _ := tx.Prepare(`INSERT OR IGNORE INTO users (username, last_match) VALUES (?,"");`) defer stmt.Close() stmt.Exec(username) tx.Commit() }() s.Set(username, map[string]string{ "topic": "random", }) return &sessions.UserData{ Variables: map[string]string{ "topic": "random", }, LastMatch: "", History: sessions.NewHistory(), } } return user } // Init() // Set a user variable. func (s *Client) Set(username string, vars map[string]string) { s.Init(username) s.lock.Lock() defer s.lock.Unlock() tx, _ := s.db.Begin() stmt, _ := tx.Prepare(`INSERT OR REPLACE INTO user_variables (user_id, key, value) VALUES ((SELECT id FROM users WHERE username = ?), ?, ?);`) defer stmt.Close() for k, v := range vars { stmt.Exec(username, k, v) } tx.Commit() } // AddHistory adds history items. func (s *Client) AddHistory(username, input, reply string) { s.Init(username) s.lock.Lock() defer s.lock.Unlock() tx, _ := s.db.Begin() stmt, _ := tx.Prepare(`INSERT INTO history (user_id, input,reply)VALUES((SELECT id FROM users WHERE username = ?),?,?);`) defer stmt.Close() stmt.Exec(username, input, reply) tx.Commit() } // SetLastMatch sets the user's last matched trigger. func (s *Client) SetLastMatch(username, trigger string) { s.Init(username) s.lock.Lock() defer s.lock.Unlock() tx, _ := s.db.Begin() stmt, _ := tx.Prepare(`UPDATE users SET last_match = ? WHERE username = ?;`) defer stmt.Close() stmt.Exec(trigger, username) tx.Commit() } // Get a user variable. func (s *Client) Get(username, name string) (string, error) { var value string row := s.db.QueryRow(`SELECT value FROM user_variables WHERE user_id = (SELECT id FROM users WHERE username = ?) AND key = ?;`, username, name) switch err := row.Scan(&value); err { case sql.ErrNoRows: return "", fmt.Errorf("no %s variable found for user %s", name, username) case nil: return value, nil default: return "", fmt.Errorf("unknown sql error") } } // GetAny gets all variables for a user. func (s *Client) GetAny(username string) (*sessions.UserData, error) { history, err := s.GetHistory(username) if err != nil { return nil, err } last_match, err := s.GetLastMatch(username) if err != nil { return nil, err } var variables map[string]string = make(map[string]string) rows, err := s.db.Query(`SELECT key,value FROM user_variables WHERE user_id = (SELECT id FROM users WHERE username = ?);`, username) if err != nil { return nil, err } defer rows.Close() var key, value string for rows.Next() { err = rows.Scan(&key, &value) if err != nil { continue } variables[key] = value } return &sessions.UserData{ History: history, LastMatch: last_match, Variables: variables, }, nil } // GetAll gets all data for all users. func (s *Client) GetAll() map[string]*sessions.UserData { var users []string = make([]string, 0) rows, _ := s.db.Query(`SELECT username FROM users;`) defer rows.Close() var user string for rows.Next() { rows.Scan(&user) users = append(users, user) } var usersmap map[string]*sessions.UserData = make(map[string]*sessions.UserData) for _, user := range users { u, _ := s.GetAny(user) usersmap[user] = u } return usersmap } // GetLastMatch returns the last matched trigger for the user, func (s *Client) GetLastMatch(username string) (string, error) { var last_match string row := s.db.QueryRow(`SELECT last_match FROM users WHERE username = ?;`, username) switch err := row.Scan(&last_match); err { case sql.ErrNoRows: return "", fmt.Errorf("no last match found for user %s", username) case nil: return last_match, nil default: return "", fmt.Errorf("unknown sql error: %s", err) } } // GetHistory gets the user's history. func (s *Client) GetHistory(username string) (*sessions.History, error) { data := &sessions.History{ Input: []string{}, Reply: []string{}, } for i := 0; i < sessions.HistorySize; i++ { data.Input = append(data.Input, "undefined") data.Reply = append(data.Reply, "undefined") } rows, err := s.db.Query("SELECT input,reply FROM history WHERE user_id = (SELECT id FROM users WHERE username = ?) ORDER BY timestamp ASC LIMIT 10;", username) if err != nil { return data, err } defer rows.Close() for rows.Next() { var input, reply string err := rows.Scan(&input, &reply) if err != nil { log.Println("[ERROR]", err) continue } data.Input = data.Input[:len(data.Input)-1] // Pop data.Input = append([]string{strings.TrimSpace(input)}, data.Input...) // Unshift data.Reply = data.Reply[:len(data.Reply)-1] // Pop data.Reply = append([]string{strings.TrimSpace(reply)}, data.Reply...) // Unshift } return data, nil } // Clear data for a user. func (s *Client) Clear(username string) { s.lock.Lock() defer s.lock.Unlock() tx, _ := s.db.Begin() tx.Exec(`DELETE FROM user_variables WHERE user_id = (SELECT id FROM users WHERE username = ?);`, username) tx.Exec(`DELETE FROM history WHERE user_id = (SELECT id FROM users WHERE username = ?);`, username) tx.Exec(`DELETE FROM users WHERE username = ?;`, username) tx.Commit() } // ClearAll resets all user data for all users. func (s *Client) ClearAll() { s.lock.Lock() defer s.lock.Unlock() tx, _ := s.db.Begin() tx.Exec(`DELETE FROM user_variables;`) tx.Exec(`DELETE FROM history;`) s.db.Exec(`DELETE FROM users;`) tx.Commit() } // Freeze makes a snapshot of user variables. func (s *Client) Freeze(username string) error { user := s.Init(username) data, err := json.Marshal(user) if err != nil { return err } s.lock.Lock() defer s.lock.Unlock() tx, err := s.db.Begin() if err != nil { return err } stmt, err := tx.Prepare(`INSERT OR REPLACE INTO frozen_user (user_id, data)VALUES((SELECT id FROM users WHERE username = ?), ?);`) if err != nil { return err } defer stmt.Close() _, err = stmt.Exec(username, string(data)) if err != nil { return err } return tx.Commit() } // Thaw restores from a snapshot. func (s *Client) Thaw(username string, action sessions.ThawAction) error { user, err := func(u string) (sessions.UserData, error) { var data string var reply sessions.UserData row := s.db.QueryRow(`SELECT data FROM frozen_user WHERE user_id = (SELECT id FROM users WHERE username = ?);`, username) switch err := row.Scan(&data); err { case sql.ErrNoRows: return sessions.UserData{}, fmt.Errorf("no rows found") case nil: err = json.Unmarshal([]byte(data), &reply) if err != nil { return sessions.UserData{}, err } return reply, nil default: return sessions.UserData{}, fmt.Errorf("unknown sql error") } }(username) if err != nil { return fmt.Errorf("no data for snapshot for user %s", username) } switch action { case sessions.Thaw: if err := func() error { s.lock.Lock() defer s.lock.Unlock() _, err = s.db.Exec(`DELETE FROM frozen_user WHERE user_id = (SELECT id FROM users WHERE username = ?);`, username) if err != nil { return err } return nil }(); err != nil { return err } s.Clear(username) s.Set(username, user.Variables) s.SetLastMatch(username, user.LastMatch) for i := len(user.History.Input) - 1; i >= 0; i-- { s.AddHistory(username, user.History.Input[i], user.History.Reply[i]) } return nil case sessions.Discard: s.lock.Lock() defer s.lock.Unlock() _, err = s.db.Exec(`DELETE FROM frozen_user WHERE user_id = (SELECT id FROM users WHERE username = ?);`, username) if err != nil { return err } case sessions.Keep: s.Clear(username) s.Set(username, user.Variables) s.SetLastMatch(username, user.LastMatch) for i := range user.History.Input { s.AddHistory(username, user.History.Input[i], user.History.Reply[i]) } return nil default: return fmt.Errorf("something went wrong") } return nil }
// Copyright © 2020. All rights reserved. // Author: Ilya Stroy. // Contacts: qioalice@gmail.com, https://github.com/qioalice // License: https://opensource.org/licenses/MIT package privet import ( "strconv" "strings" "github.com/qioalice/ekago/v2/ekaerr" "github.com/qioalice/ekago/v2/ekaunsafe" "github.com/modern-go/reflect2" ) type ( /* localeNode represents the node of locale that contains: - Its data as KV storage, K is a translation key, V is a language phrase. Field content stores these phrases. - Child (derived) localeNode s. E.g: if translate key is "a.b", and the current node is root node, then subNodes will contain localeNode by a key "a" with the translate key "b" that represents requested phrase. While parsing locale's source, before one source parsing is completed, its phrases will saved into contentTmp, meaning them as a temporary. It's done to provide a mechanism to drop all one source's phrases if there was an error of parsing that source. After one source parsing is completed, the all phrases from contentTmp must be MOVED to the content. Client.scan() using localeNode.applyRecursively() doing that. usedSourcesIdx contains indexes of Client.sources or Client.sourcesTmp (depends of Client's state - either sources under loading or not), meaning that sources with these indexes were used to construct EXACTLY current node (content), neither nested nor parented. */ localeNode struct { parent *Locale subNodes map[string]*localeNode content map[string]string contentTmp map[string]string usedSourcesIdx []int } ) /* subNode returns a localeNode with the given name from the current localeNode's subNodes map. If 2nd argument is true, the new empty localeNode will be created and initialized, if there is no localeNode with the given name in subNodes map. If it's false, nil is returned. */ func (n *localeNode) subNode(name string, createIfNotExist bool) *localeNode { subNode := n.subNodes[name] if subNode == nil && createIfNotExist { subNode = n.parent.makeSubNode() n.subNodes[name] = subNode } return subNode } /* applyRecursively calls passed callback cb passing the current localeNode, treating it as a root, and then doing the same work for each localeNode from subNodes "recursively". Note. "Recursively" above means, that each embedded localeNode (no matter how deep it is) will be processed. Say we have the localeNode tree like: Root (A) | |---- Level 1.1 node (B) | | | |---- Level 2.1 node (C) | | | | | |---- Level 3.1 node (D) | | | |---- Level 2.2 node (E) | | | |---- Level 2.3 node (F) | |---- Level 1.2 node (G) For each localeNode of: A, B, C, D, E, F, G, a cb will be called, and each of that localeNode will be passed (order is not guaranteed). There is two ways HOW it will be done (depends on 2nd argument). Iterative or recursive way of algorithm. Recursive is preferred when the deep level and whole amount of localeNode are not too high. Otherwise iterative algorithm must be used, because recursive tail optimisation is impossible here. Requirements: - Current localeNode (n) != nil, panic otherwise. - Passed callback (cb) != nil, panic otherwise. */ func (n *localeNode) applyRecursively(cb func(*localeNode), recursive ...bool) { var applicator func(_ *localeNode, _ func(node *localeNode)) doRecursive := len(recursive) > 0 && recursive[0] // TODO: There's a infinity loop dunno why, when two files have the duplicated // translated phrases (phrases with the same translation key). // Moreover, it leads to infinity loop only in an iterative algo. // So, till that issue is resolved, recursive algo is forced. doRecursive = true if doRecursive { applicator = func(nodeToProcess *localeNode, cb func(*localeNode)) { cb(nodeToProcess) for _, nodeToProcess = range nodeToProcess.subNodes { applicator(nodeToProcess, cb) } } } else { applicator = func(nodeToProcess *localeNode, cb func(node *localeNode)) { nodesToProcess := make([]*localeNode, 0, 1024) nodesToProcess = append(nodesToProcess, nodeToProcess) for len(nodesToProcess) > 0 { nodeToProcess = nodesToProcess[0] nodesToProcess = nodesToProcess[:len(nodesToProcess)-1] cb(nodeToProcess) for _, nodeToProcess = range nodeToProcess.subNodes { nodesToProcess = append(nodesToProcess, nodeToProcess) } } } } applicator(n, cb) } /* scan walks over passed map[string]interface{}, treating it like a source of locale's content for the current localeNode, doing next things: - If a value is a basic Golang type (such as string, bool, int, uint, float, nil), that value is saved with corresponding key to the contentTmp using store() method. - If a value is the same type map (map[string]interface{}), the embedded localeNode by the corresponding key will be either extracted from the subNodes or created an empty new one, and scan() will be called recursively for that sub localeNode and that map. - If a value has any other type, it's an error, even if it's array (arrays are prohibited). sourceItemIdx will be saved to the usedSourcesIdx, after the whole map is successfully parsed and if there is no the same index yet. */ func (n *localeNode) scan( from map[string]interface{}, sourceItemIdx int, overwrite bool, ) *ekaerr.Error { const s = "Failed to scan a key-value component." var err *ekaerr.Error for key, value := range from { switch rtype := reflect2.RTypeOf(value); { case key == "": err = ekaerr.IllegalFormat. New(s + "Key is empty.") case rtype == 0: err = n.store(key, "<undefined>", overwrite) case rtype == ekaunsafe.RTypeString(): err = n.store(key, value.(string), overwrite) case rtype == ekaunsafe.RTypeBool(): b := *(*bool)(ekaunsafe.TakeRealAddr(value)) value := "false" if b { value = "true" } err = n.store(key, value, overwrite) case ekaunsafe.RTypeIsIntAny(rtype): i64 := *(*int64)(ekaunsafe.TakeRealAddr(value)) err = n.store(key, strconv.FormatInt(i64, 10), overwrite) case ekaunsafe.RTypeIsUintAny(rtype): u64 := *(*uint64)(ekaunsafe.TakeRealAddr(value)) err = n.store(key, strconv.FormatUint(u64, 10), overwrite) case ekaunsafe.RTypeIsFloatAny(rtype): f64 := *(*float64)(ekaunsafe.TakeRealAddr(value)) bitSize := 32 if rtype == ekaunsafe.RTypeFloat64() { bitSize = 64 } err = n.store(key, strconv.FormatFloat(f64, 'f', 2, bitSize), overwrite) case rtype == ekaunsafe.RTypeMapStringInterface(): embeddedMap := value.(map[string]interface{}) err = n.subNode(key, true).scan(embeddedMap, sourceItemIdx, overwrite) default: err = ekaerr.IllegalFormat. New(s + "Unexpected type of value."). AddFields("privet_source_value_type", reflect2.TypeOf(value).String()) } //goland:noinspection GoNilness if err.IsNotNil() { return err. AddMessage(s). AddFields("privet_source_key", key). Throw() } } // All is good. // We may proceed. needToAdd := true for _, alreadyMarkedSourceIdx := range n.usedSourcesIdx { if alreadyMarkedSourceIdx == sourceItemIdx { needToAdd = false break } } if needToAdd { n.usedSourcesIdx = append(n.usedSourcesIdx, sourceItemIdx) } return nil } /* store saves passed key, value to the contentTmp map, if there is no the same key yet in content map, or if overwriting is allowed. Returns an error if overwriting is prohibited and it's a duplication. */ func (n *localeNode) store(key, value string, overwrite bool) *ekaerr.Error { // contentTmp contains only the current file processing keys; // it will be so strange (and impossible), if there will be the same keys. if _, isExist := n.content[key]; isExist && !overwrite { alreadyUsedSources := make([]string, len(n.usedSourcesIdx)) for i, usedSourceIdx := range n.usedSourcesIdx { alreadyUsedSources[i] = n.parent.owner.sourcesTmp[usedSourceIdx].Path } return ekaerr.AlreadyExist. New("Failed to add new translation phrase. Already exist."). AddFields( "privet_source_applied", strings.Join(alreadyUsedSources, ", "), "privet_source_key", key, "privet_source_new_value", value, "privet_source_old_value", n.content[key]). Throw() } n.contentTmp[key] = value return nil }
// This is a "stub" file. It's a little start on your solution. // It's not a complete solution though; you have to write some code. // Package triangle should have a package comment that summarizes what it's about. // https://golang.org/doc/effective_go.html#commentary package triangle import "math" // Notice KindFromSides() returns this type. Pick a suitable data type. type Kind string const ( // Pick values for the following identifiers used by the test program. NaT = "NaT" // not a triangle Equ = "Equ" // equilateral Iso = "Iso" // isosceles Sca = "Sca" // scalene ) // KindFromSides should have a comment documenting it. func KindFromSides(a, b, c float64) Kind { max := math.Max(a, math.Max(b, c)) if a <= 0 || b <= 0 || c <= 0 || max >= a+b+c-max { return NaT } min := math.Min(a, math.Min(b, c)) if max == min { return Equ } middle := (a + b + c) - (max + min) if max != middle && middle != min { return Sca } return Iso }
package service type LoadBalancer interface { Balance([]Backend) Backend }
package notion import "net/http" // ClientConfig stores the configuration for the Client type ClientConfig struct { BaseURL string APIVersion string HeaderVersion string Token string } // Client encapsulates the logic for connect to Notion's API type Client struct { // Config encapsulates the configuration need it for the Client Config ClientConfig // Client is the HTTP Client for make the calls Client http.Client }
package main import ( "context" "encoding/json" "fmt" "log" "sync" "time" "github.com/ethereum/go-ethereum/common" "github.com/Dipper-Labs/dip-bridge/config" "github.com/Dipper-Labs/dip-bridge/dip" "github.com/Dipper-Labs/dip-bridge/eth" "github.com/Dipper-Labs/dip-bridge/redis" "github.com/Dipper-Labs/dip-bridge/util" ) type Bridge struct { eth.EthLand dip.DipLand redis.RedisCli ethHeaderBlock int64 ethHeaderBlockRWLock *sync.RWMutex } func NewBridge(cfgPath string) *Bridge { config.Init(cfgPath) return &Bridge{ eth.NewEthLand(config.EthChainWsEndpoint), dip.NewDipLand(config.DipSdkCfgFileAbsPath), redis.NewRedisCli(config.RedisEndpoint), 0, new(sync.RWMutex), } } func (bridge *Bridge) UpdateEthHeaderBlock(HeaderBlock int64) { bridge.ethHeaderBlockRWLock.Lock() defer bridge.ethHeaderBlockRWLock.Unlock() bridge.ethHeaderBlock = HeaderBlock } func (bridge *Bridge) calcFromBlock(ctx context.Context) int64 { fromBlock := config.EthChainStartBlockNumber if config.EthChainStartBlockNumberFromRedis { ethBlockCursor, err := bridge.GetEthBlockCursor(ctx) if err != nil { log.Fatalf("do GetEthBlockCursor failed:[%v]\n", err) } if ethBlockCursor > 0 { fromBlock = ethBlockCursor } } return fromBlock } func (bridge *Bridge) RunBridge(ctx context.Context) { abiObj, err := util.AbiFromFile(config.EthChainDipManagerAbi) if err != nil { log.Fatalf("do AbiFromFile failed:[%v]\n", err) } fromBlock := bridge.calcFromBlock(ctx) ethDipManagerAddr := common.HexToAddress(config.EthChainDipManagerAddr) for { bridge.ethHeaderBlockRWLock.RLock() toBlock := bridge.ethHeaderBlock - config.EthChainConfirmBlockCount bridge.ethHeaderBlockRWLock.RUnlock() if toBlock <= fromBlock { log.Printf("ping %d secs", config.DetectIntervalInSeconde) time.Sleep(time.Second * time.Duration(config.DetectIntervalInSeconde)) continue } logs, err := bridge.QueryTokenLockedLog(ctx, ethDipManagerAddr, fromBlock, toBlock) if err != nil { log.Printf("do QueryTokenLockedLog failed:[%v],fromBlock:%v, toBlock:%v\n", err, fromBlock, toBlock) time.Sleep(time.Second * time.Duration(10)) continue } if len(logs) > 0 { log.Printf("got %d Token Locked Event between block[%v, %v]", len(logs), fromBlock, toBlock) } logsCount := len(logs) logIndex := 0 for _, logE := range logs { logIndex++ tokenLockedInfo, err := util.ParseTokenLocked(abiObj, logE) if err != nil { logJson, _ := logE.MarshalJSON() log.Fatalf("do ParseTokenLocked failed:[%v],logE:[%s]\n", err, string(logJson)) } log.Printf("[%v/%v]-%v:%s-event[%s:%s:%v]", logIndex, logsCount, logE.BlockNumber, logE.TxHash.String(), tokenLockedInfo.From.String(), tokenLockedInfo.To, tokenLockedInfo.Amount.String()) if bridge.EthTxidExist(ctx, logE.TxHash.String()) { log.Printf("txId:[%s] already processed", logE.TxHash.String()) continue } result, err := bridge.MintDip(tokenLockedInfo, logE.TxHash) if err != nil { tokenLockedInfoJson, _ := json.Marshal(tokenLockedInfo) failedInfo := fmt.Sprintf("do MintDip failed:[%v],tokenLockedInfo:[%s],txHash:%s\n", err, string(tokenLockedInfoJson), logE.TxHash.String()) log.Println(failedInfo) bridge.SaveEthTxidProcessReceiptOnDip(ctx, logE.TxHash.String(), "failed") bridge.SaveEthTxidProcessReceiptOnDip(ctx, fmt.Sprintf("failed.%s", logE.TxHash.String()), failedInfo) continue } dipReceipt, err := json.Marshal(result) if err != nil { log.Fatalf("do Marshal failed:[%v],dipper network txid:%s\n", err, result.CommitResult.Hash.String()) } bridge.SaveEthTxidProcessReceiptOnDip(ctx, logE.TxHash.String(), string(dipReceipt)) log.Printf("txId:[%s] finished", logE.TxHash.String()) } bridge.SetEthBlockCursor(ctx, toBlock) log.Printf("finished eth block: %v", toBlock) fromBlock = toBlock + 1 } }
package apocalisp import ( "apocalisp/core" "errors" "fmt" ) func Rep(sexpr string, environment *core.Environment, eval func(*core.Type, *core.Environment) (*core.Type, error), parser core.Parser) (string, error) { // read t, err := parser.Parse(sexpr) if err != nil { return "", err } else if t == nil { return "", nil } // eval evaluated, err := eval(t, environment) if err != nil { return "", err } else if evaluated.IsException() { return "", errors.New(evaluated.ToString(false)) } // print return evaluated.ToString(true), nil } func NoEval(node *core.Type, environment *core.Environment) (*core.Type, error) { return node, nil } func Evaluate(node *core.Type, environment *core.Environment) (*core.Type, error) { var lexicalReturnValue *core.Type var lexicalError error processReturn := func() (*core.Type, error) { if lexicalError != nil { return nil, lexicalError } else if lexicalReturnValue != nil { return lexicalReturnValue, nil } return nil, errors.New("Error: Unexpected behavior.") } wrapReturn := func(node *core.Type, err error) { if err != nil { lexicalError = err } else if node != nil { lexicalReturnValue = node } } // TCO loop for { if lexicalReturnValue != nil || lexicalError != nil { return processReturn() } expanded := macroexpand(*node, *environment) node = &expanded if !node.IsList() { wrapReturn(evalAst(node, environment, Evaluate)) } else if node.IsList() && node.IsEmptyIterable() { wrapReturn(node, nil) } else if node.IsList() && !node.IsEmptyIterable() { first, rest := node.AsIterable()[0], node.AsIterable()[1:] if first.CompareSymbol("def!") { wrapReturn(specialFormDef(Evaluate, rest, environment)) } else if first.CompareSymbol("defmacro!") { wrapReturn(specialFormDefmacro(Evaluate, rest, environment)) } else if first.CompareSymbol("macroexpand") { expanded := macroexpand(rest[0], *environment) wrapReturn(&expanded, nil) } else if first.CompareSymbol("let*") { wrapReturn(tcoSpecialFormLet(Evaluate, rest, &node, &environment)) } else if first.CompareSymbol("do") { wrapReturn(tcoSpecialFormDo(Evaluate, rest, &node, &environment)) } else if first.CompareSymbol("fn*", `\`) { wrapReturn(tcoSpecialFormFn(Evaluate, rest, &node, &environment)) } else if first.CompareSymbol("if") { wrapReturn(tcoSpecialFormIf(Evaluate, rest, &node, &environment)) } else if first.CompareSymbol("quasiquote") { wrapReturn(tcoSpecialFormQuasiquote(Evaluate, rest, &node, &environment)) } else if first.CompareSymbol("quasiquoteexpand") { wrapReturn(specialFormQuasiquoteexpand(Evaluate, rest, environment)) } else if first.CompareSymbol("quote") { wrapReturn(specialFormQuote(Evaluate, rest, environment)) } else if first.CompareSymbol("try*") { wrapReturn(specialFormTryCatch(Evaluate, rest, environment)) } else { if container, err := evalAst(node, environment, Evaluate); err != nil { wrapReturn(nil, err) } else { function, parameters := container.AsIterable()[0], container.AsIterable()[1:] if function.IsFunction() { node = &function.Function.Body environment = core.NewEnvironment(&function.Function.Environment, function.Function.Params, parameters) } else { wrapReturn(evalCallable(container)) } } } } else { return processReturn() } } } func tcoSpecialFormLet(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, node **core.Type, environment **core.Environment) (*core.Type, error) { if len(rest) != 2 || !rest[0].IsEvenIterable() { return nil, errors.New("Error: Invalid syntax for `let*`.") } else { letEnvironment := core.NewEnvironment(*environment, []string{}, []core.Type{}) bindings, body := rest[0].AsIterable(), &rest[1] for symbol, target := 0, 1; symbol < len(bindings); symbol, target = symbol+2, target+2 { if e, ierr := eval(&bindings[target], letEnvironment); ierr != nil { return nil, ierr } else if e.IsException() { *node = e return nil, nil } else { letEnvironment.Set(bindings[symbol].ToString(true), *e) } } *environment, *node = letEnvironment, body return nil, nil } } func tcoSpecialFormDo(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, node **core.Type, environment **core.Environment) (*core.Type, error) { if len(rest) < 1 { return nil, errors.New("Error: Invalid syntax for `do`.") } else { toEvaluate := rest[:len(rest)-1] if elements, err := evalAst(&core.Type{List: &toEvaluate}, *environment, eval); err != nil { return nil, err } else { for _, element := range elements.AsIterable() { if element.IsException() { *node = &element return nil, nil } } *node = &rest[len(rest)-1] return nil, nil } } } func tcoSpecialFormIf(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, node **core.Type, environment **core.Environment) (*core.Type, error) { length := len(rest) if length < 2 || length > 3 { return nil, errors.New("Error: Invalid syntax for `if`.") } else if condition, err := eval(&rest[0], *environment); err != nil { return nil, err } else if !condition.IsNil() && !condition.CompareBoolean(false) { *node = &rest[1] } else if length == 3 { *node = &rest[2] } else { *node = core.NewNil() } return nil, nil } func tcoSpecialFormFn(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, node **core.Type, environment **core.Environment) (*core.Type, error) { if len(rest) < 2 || !rest[0].IsIterable() { return nil, errors.New("Error: Invalid syntax for `fn*`.") } else { symbols := make([]string, 0) for _, node := range rest[0].AsIterable() { if node.IsSymbol() { symbols = append(symbols, node.AsSymbol()) } else { return nil, errors.New("Error: Invalid syntax for `fn*`.") } } callable := func(args ...core.Type) core.Type { newEnvironment := core.NewEnvironment(*environment, symbols, args) if result, err := eval(&rest[1], newEnvironment); err != nil { return *core.NewStringException(err.Error()) } else { return *result } } function := core.Function{ Params: symbols, Body: rest[1], Callable: callable, Environment: **environment, } return &core.Type{Function: &function}, nil } } func tcoSpecialFormQuasiquote(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, node **core.Type, environment **core.Environment) (*core.Type, error) { if len(rest) < 1 { return nil, errors.New("Error: Invalid syntax for `quasiquote`.") } else { newNode := quasiquote(rest[0]) *node = &newNode } return nil, nil } func specialFormQuote(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, environment *core.Environment) (*core.Type, error) { if len(rest) < 1 { return nil, errors.New("Error: Invalid syntax for `quote`.") } else { return &rest[0], nil } } func specialFormQuasiquoteexpand(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, environment *core.Environment) (*core.Type, error) { if len(rest) >= 1 { newNode := quasiquote(rest[0]) return &newNode, nil } return nil, errors.New("Error: Invalid syntax for `quasiquoteexpand`.") } func specialFormDef(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, environment *core.Environment) (*core.Type, error) { if len(rest) != 2 || !rest[0].IsSymbol() { return nil, errors.New("Error: Invalid syntax for `def!`.") } else { if e, ierr := eval(&rest[1], environment); ierr != nil { return nil, ierr } else if e.IsException() { return e, nil } else { environment.Set(rest[0].AsSymbol(), *e) return e, nil } } } func specialFormDefmacro(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, environment *core.Environment) (*core.Type, error) { if len(rest) != 2 || !rest[0].IsSymbol() { return nil, errors.New("Error: Invalid syntax for `defmacro!`.") } else { if e, ierr := eval(&rest[1], environment); ierr != nil { return nil, ierr } else if e.IsException() { return e, nil } else { var macro *core.Type if e.IsFunction() { newFunction := core.Function{ IsMacro: true, Environment: e.Function.Environment, Body: e.Function.Body, Params: e.Function.Params, Callable: e.Function.Callable, } macro = &core.Type{Function: &newFunction, Metadata: e.Metadata} } else { macro = e } environment.Set(rest[0].AsSymbol(), *macro) return macro, nil } } } func specialFormTryCatch(eval func(*core.Type, *core.Environment) (*core.Type, error), rest []core.Type, environment *core.Environment) (*core.Type, error) { if len(rest) < 1 { return nil, errors.New("Error: Invalid syntax for `try*!`.") } e, err := eval(&rest[0], environment) if err != nil { return nil, err } else if len(rest) >= 2 { catchexp := rest[1].AsIterable() if e.IsException() && len(catchexp) == 3 && catchexp[0].CompareSymbol("catch*") && catchexp[1].IsSymbol() { symbol, body := catchexp[1].AsSymbol(), catchexp[2] return eval(&body, core.NewEnvironment(environment, []string{symbol}, []core.Type{*e.AsException()})) } } return e, nil } func evalCallable(node *core.Type) (*core.Type, error) { first, rest := node.AsIterable()[0], node.AsIterable()[1:] if first.IsCallable() { result := first.CallCallable(rest...) return &result, nil } else if first.IsException() { return &first, nil } else { return nil, errors.New(fmt.Sprintf("Error: '%s' is not a function.", first.ToString(true))) } } func evalAst(node *core.Type, environment *core.Environment, eval func(*core.Type, *core.Environment) (*core.Type, error)) (*core.Type, error) { if node.IsSymbol() && !node.IsKeyword() { value := environment.Get(node.AsSymbol()) return &value, nil } if node.IsIterable() { newIterable := node.DeriveIterable() for _, element := range node.AsIterable() { if evaluated, err := eval(&element, environment); err != nil { return nil, err } else { newIterable.Append(*evaluated) } } return newIterable, nil } if node.IsHashmap() { currentHashmap, newHashmap := node.AsHashmap(), core.NewHashmap() for key, value := range currentHashmap { if evaluated, err := eval(&value, environment); err != nil { return nil, err } else { newHashmap.HashmapSet(key, *evaluated) } } return newHashmap, nil } return node, nil } func quasiquote(node core.Type) core.Type { iterable := node.AsIterable() unquoted := len(iterable) >= 2 && iterable[0].CompareSymbol("unquote") if node.IsSymbol() || node.IsHashmap() || (node.IsVector() && unquoted) { return *core.NewList(*core.NewSymbol("quote"), node) } else if node.IsVector() { return *core.NewList(*core.NewSymbol("vec"), quasiquote(*core.NewList(iterable...))) } else if unquoted { return iterable[1] } else if len(iterable) >= 1 { result := *core.NewList() for i := len(iterable) - 1; i >= 0; i-- { el := iterable[i] eli := el.AsIterable() if len(eli) >= 2 && eli[0].CompareSymbol("splice-unquote") { result = *core.NewList(*core.NewSymbol("concat"), eli[1], result) } else { result = *core.NewList(*core.NewSymbol("cons"), quasiquote(el), result) } } return result } return node } func isMacroCall(node core.Type, environment core.Environment, capture func(core.Type)) bool { if iterable := node.AsIterable(); node.IsList() && len(iterable) >= 1 { if first := iterable[0]; first.IsSymbol() { if macro := environment.Get(first.AsSymbol()); macro.IsMacroFunction() { capture(macro) return true } } } return false } func macroexpand(node core.Type, environment core.Environment) core.Type { var macro core.Type capture := func(m core.Type) { macro = m } for isMacroCall(node, environment, capture) { parameters := node.AsIterable()[1:] node = macro.CallFunction(parameters...) } return node }
// pengantar: object yang belum diinisialisasi akan bernilai null atau nil // untuk golang. saat pertamakali variable dibuat maka akan langsung memiliki default value // sesuai dengan assignment tipe data yang digunakan // golang tetap memiliki data nil(data kosong) // dan hanya bisa digunakan pada interface, function, map slice, pointer dan channel // praktik: dapat berguna untuk mengecek data didalam map, slice, dsb package main import "fmt" func NewMap(name string) map[string]string { // jika name kosong > return nil // jika terisi > membuat map baru // map[type-key]type-value if name == "" { return nil } else { return map[string]string{ "name": name, } } } // =========[ simpel nya begini ]========== // ======================================== // coba := { // key: name, // } // coba := NewMap(val, key){ // return map[string]string{ // key:val, // } // } // func cobaNil(){ // return nil // } // ======================================== // =========[ simpel nya begitu ]========== func main() { // cobaNil() var coba, lain map[string]string lain = map[string]string{} lain["oke"] = "oke dan bisa" fmt.Println(lain["oke"]) // cara vertikal var chicken1 = map[string]int{"januari": 50, "februari": 40} // cara horizontal var chicken2 = map[string]int{ "januari": 50, "februari": 40, } fmt.Println(chicken1["januari"], chicken2["januari"]) // coba = NewMap("val1", "key1") // coba = NewMap("val2", "key2") fmt.Println(coba["key1"]) fmt.Println(coba["key2"]) fmt.Println("sukses") // contoh untuk pengecekan apakah ada `value` didalam map var person map[string]string = NewMap("coba") if person == nil { fmt.Println("data kosong") } else { fmt.Println(person) } }
package main import ( "github.com/cosmos/cosmos-sdk/cmd/cosmos-sdk-cli/cmd" ) func main() { cmd.Execute() }
package main type TreeNode struct { Val int Left *TreeNode Right *TreeNode } func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode { res1 := []*TreeNode{} res2 := []*TreeNode{} path := []*TreeNode{} find := false findPath(root, p, path, &find, &res1) find = false findPath(root, q, path, &find, &res2) minLen := 0 if len(res1) < len(res2) { minLen = len(res1) } else { minLen = len(res2) } res := &TreeNode{} for i := 0; i < minLen; i++ { if res1[i] == res2[i] { res = res1[i] } else { break } } return res } func findPath(root, target *TreeNode, path []*TreeNode, find *bool, res *[]*TreeNode) { if root == nil || *find { return } path = append(path, root) if root == target { *find = true // 这里path不是引用传递,正是为了让path只存单一路径,而不是整个遍历的路径 *res = append(*res, path...) } findPath(root.Left, target, path, find, res) findPath(root.Right, target, path, find, res) }
//File : goroutine.go //Author: 燕人Lee&骚气又迷人的反派 //Date : 2019-08-21 package main import ( "fmt" "time" ) func main() { //var a[10] int for i := 0; i < 10; i++ { go func(i int) { // for { //a[i]++ //runtime.Gosched() fmt.Println("hello from"+"goroutine %d \n", i) } }(i) } time.Sleep(time.Minute) //fmt.Println(a) } func printHello() { }
/* * Copyright © 2019-2022 Software AG, Darmstadt, Germany and/or its licensors * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package adabas import ( "fmt" "testing" "time" "github.com/stretchr/testify/assert" ) func TestAcbx(t *testing.T) { acbx := newAcbx(1) assert.Equal(t, Dbid(1), acbx.Acbxdbid) acbx.Acbxcid = [4]byte{'A', 'B', 'x', 'n'} acbx.Acbxcop = [8]byte{'I', 'J', 0, 0, 0, 0, 0xff, 0x1} acbx.Acbxisn = 1234543 assert.Equal(t, "ACBX:\n CmdCode: CmdId: 41 42 78 6e [ABxn] [...>]\n Dbid: 1 Filenr: 0 Responsecode: 148 Subcode: 0\n Isn: 1234543 ISN Lower Limit: 0 ISN Quantity: 0\n CmdOption: 49 4a 00 00 00 00 ff 01 [IJ....ÿ.] [........]\n Add1: 20 20 20 20 20 20 20 20 [ ] [........]\n Add2: 20 20 20 20 [ ] [....]\n Add3: 00 00 00 00 00 00 00 00 [........] [........]\n Add4: 00 00 00 00 00 00 00 00 [........] [........]\n Add5: 00 00 00 00 00 00 00 00 [........] [........]\n Add6: 00 00 00 00 00 00 00 00 [........] [........]\n User Area: 00000000000000000000000000000000 [................] [................]\n", acbx.String()) acbx.resetCop() acbx.Acbxrsp = AdaSECUR assert.Equal(t, "ACBX:\n CmdCode: CmdId: 41 42 78 6e [ABxn] [...>]\n Dbid: 1 Filenr: 0 Responsecode: 200 Subcode: 0\n Isn: 1234543 ISN Lower Limit: 0 ISN Quantity: 0\n CmdOption: 20 20 20 20 20 20 20 20 [ ] [........]\n Add1: 20 20 20 20 20 20 20 20 [ ] [........]\n Add2: 20 20 20 20 [ ] [....]\n Add3: 00 00 00 00 00 00 00 00 [........] [........]\n Add4: 00 00 00 00 00 00 00 00 [........] [........]\n Add5: 00 00 00 00 00 00 00 00 [........] [........]\n Add6: 00 00 00 00 00 00 00 00 [........] [........]\n User Area: 00000000000000000000000000000000 [................] [................]\n", acbx.String()) acbx.resetAcbx() assert.Equal(t, "ACBX:\n CmdCode: CmdId: 41 42 78 6e [ABxn] [...>]\n Dbid: 1 Filenr: 0 Responsecode: 148 Subcode: 0\n Isn: 0 ISN Lower Limit: 0 ISN Quantity: 0\n CmdOption: 20 20 20 20 20 20 20 20 [ ] [........]\n Add1: 20 20 20 20 20 20 20 20 [ ] [........]\n Add2: 20 20 20 20 [ ] [....]\n Add3: 00 00 00 00 00 00 00 00 [........] [........]\n Add4: 00 00 00 00 00 00 00 00 [........] [........]\n Add5: 00 00 00 00 00 00 00 00 [........] [........]\n Add6: 00 00 00 00 00 00 00 00 [........] [........]\n User Area: 00000000000000000000000000000000 [................] [................]\n", acbx.String()) } func TestAID(t *testing.T) { aid := NewAdabasID() aid.AddCredential("abc", "def") assert.Equal(t, "abc", aid.user) assert.Equal(t, "def", aid.pwd) fmt.Println(aid) aid.isOpen("abc") } func TestAIDClone(t *testing.T) { aid := NewAdabasID() aid.AddCredential("abc", "def") time.Sleep(10 * time.Second) caid := aid.Clone() assert.Equal(t, caid.user, aid.user) assert.Equal(t, caid.pwd, aid.pwd) assert.Equal(t, caid.AdaID.User, aid.AdaID.User) assert.NotEqual(t, caid.AdaID.Pid, aid.AdaID.Pid) assert.NotEqual(t, caid.AdaID.Timestamp, aid.AdaID.Timestamp) } func TestAdabasOpenParameter(t *testing.T) { isq := uint64(101122305) assert.Equal(t, "6.7.1.1", parseVersion(isq)) isq = uint64(101122305) assert.Equal(t, "6.7.1.1", parseVersion(isq)) }
/* * @lc app=leetcode.cn id=171 lang=golang * * [171] Excel表列序号 */ package solution // @lc code=start func titleToNumber(s string) (acc int) { for _, b := range s { acc = acc*26 + int(byte(b)-64) } return } // @lc code=end
package autocert import ( "context" "errors" "io" "io/fs" "cloud.google.com/go/storage" "github.com/caddyserver/certmagic" "google.golang.org/api/iterator" ) type gcsStorage struct { client *storage.Client bucket string prefix string *locker } func newGCSStorage(client *storage.Client, bucket, prefix string) *gcsStorage { s := &gcsStorage{ client: client, bucket: bucket, prefix: prefix, } s.locker = &locker{ store: s.Store, load: s.Load, delete: s.Delete, } return s } func (s *gcsStorage) Store(ctx context.Context, key string, value []byte) error { obj := s.client. Bucket(s.bucket). Object(key) w := obj.NewWriter(ctx) _, err := w.Write(value) if err != nil { _ = w.CloseWithError(err) return err } err = w.Close() if err != nil { return err } return nil } func (s *gcsStorage) Load(ctx context.Context, key string) ([]byte, error) { r, err := s.client. Bucket(s.bucket). Object(key). NewReader(ctx) if errors.Is(err, storage.ErrObjectNotExist) { return nil, fs.ErrNotExist } else if err != nil { return nil, err } defer r.Close() return io.ReadAll(r) } func (s *gcsStorage) Delete(ctx context.Context, key string) error { err := s.client. Bucket(s.bucket). Object(key). Delete(ctx) if errors.Is(err, storage.ErrObjectNotExist) { return nil } return err } func (s *gcsStorage) Exists(ctx context.Context, key string) bool { _, err := s.client. Bucket(s.bucket). Object(key). Attrs(ctx) return err == nil } func (s *gcsStorage) List(ctx context.Context, prefix string, recursive bool) ([]string, error) { var delimiter string if !recursive { delimiter = "/" } it := s.client. Bucket(s.bucket). Objects(ctx, &storage.Query{ Delimiter: delimiter, Prefix: prefix, }) var keys []string for { attrs, err := it.Next() if errors.Is(err, iterator.Done) { break } else if err != nil { return nil, err } if attrs.Prefix != "" { keys = append(keys, attrs.Prefix) } else { keys = append(keys, attrs.Name) } } return keys, nil } func (s *gcsStorage) Stat(ctx context.Context, key string) (certmagic.KeyInfo, error) { attrs, err := s.client. Bucket(s.bucket). Object(key). Attrs(ctx) if errors.Is(err, storage.ErrObjectNotExist) { return certmagic.KeyInfo{}, fs.ErrNotExist } else if err != nil { return certmagic.KeyInfo{}, err } return certmagic.KeyInfo{ Key: key, Modified: attrs.Updated, Size: attrs.Size, IsTerminal: true, }, nil }
package application import ( "log" "os" "github.com/cloudfoundry-incubator/notifications/cf" "github.com/cloudfoundry-incubator/notifications/config" "github.com/cloudfoundry-incubator/notifications/gobble" "github.com/cloudfoundry-incubator/notifications/models" "github.com/cloudfoundry-incubator/notifications/postal" "github.com/cloudfoundry-incubator/notifications/web/handlers" "github.com/cloudfoundry-incubator/notifications/web/middleware" "github.com/cloudfoundry-incubator/notifications/web/services" "github.com/nu7hatch/gouuid" "github.com/pivotal-cf/uaa-sso-golang/uaa" "github.com/ryanmoran/stack" ) type Mother struct { logger *log.Logger queue *gobble.Queue } func NewMother() *Mother { return &Mother{} } func (mother *Mother) Logger() *log.Logger { if mother.logger == nil { mother.logger = log.New(os.Stdout, "[WEB] ", log.LstdFlags) } return mother.logger } func (mother *Mother) Queue() *gobble.Queue { if mother.queue == nil { mother.queue = gobble.NewQueue() } return mother.queue } func (mother Mother) NewUAARecipe() postal.UAARecipe { env := config.NewEnvironment() uaaClient := uaa.NewUAA("", env.UAAHost, env.UAAClientID, env.UAAClientSecret, "") uaaClient.VerifySSL = env.VerifySSL cloudController := cf.NewCloudController(env.CCHost) tokenLoader := postal.NewTokenLoader(&uaaClient) userLoader := postal.NewUserLoader(&uaaClient, mother.Logger(), cloudController) spaceLoader := postal.NewSpaceLoader(cloudController) templateLoader := postal.NewTemplateLoader(postal.NewFileSystem()) mailer := mother.Mailer() receiptsRepo := models.NewReceiptsRepo() return postal.NewUAARecipe(tokenLoader, userLoader, spaceLoader, templateLoader, mailer, receiptsRepo) } func (mother Mother) EmailRecipe() postal.MailRecipeInterface { return postal.NewEmailRecipe(mother.Mailer(), postal.NewTemplateLoader(postal.NewFileSystem())) } func (mother Mother) NotificationFinder() services.NotificationFinder { clientsRepo, kindsRepo := mother.Repos() return services.NewNotificationFinder(clientsRepo, kindsRepo) } func (mother Mother) Mailer() postal.Mailer { return postal.NewMailer(mother.Queue(), uuid.NewV4, mother.UnsubscribesRepo()) } func (mother Mother) Repos() (models.ClientsRepo, models.KindsRepo) { return models.NewClientsRepo(), models.NewKindsRepo() } func (mother Mother) Logging() stack.Middleware { return stack.NewLogging(mother.Logger()) } func (mother Mother) ErrorWriter() handlers.ErrorWriter { return handlers.NewErrorWriter() } func (mother Mother) Authenticator(scopes []string) middleware.Authenticator { return middleware.NewAuthenticator(scopes, config.UAAPublicKey) } func (mother Mother) Registrar() services.Registrar { clientsRepo, kindsRepo := mother.Repos() return services.NewRegistrar(clientsRepo, kindsRepo) } func (mother Mother) PreferencesFinder() *services.PreferencesFinder { return services.NewPreferencesFinder(models.NewPreferencesRepo()) } func (mother Mother) PreferenceUpdater() services.PreferenceUpdater { return services.NewPreferenceUpdater(mother.UnsubscribesRepo()) } func (mother Mother) UnsubscribesRepo() models.UnsubscribesRepo { return models.NewUnsubscribesRepo() } func (mother Mother) CORS() middleware.CORS { env := config.NewEnvironment() return middleware.NewCORS(env.CORSOrigin) }
package array_test import ( "testing" "github.com/stretchr/testify/assert" "github.com/ywardhana/golib/array" ) func TestEqual(t *testing.T) { tests := []struct { arrA interface{} arrB interface{} expected bool }{ { arrA: []int{1, 2, 3, 4}, arrB: []int{1, 2, 3, 4}, expected: true, }, { arrA: []int{4, 2, 1, 3}, arrB: []int{1, 2, 3, 4}, expected: true, }, { arrA: []int{4, 2, 1}, arrB: []int{1, 2, 3, 4}, expected: false, }, { arrA: []int{4, 2, 1, 3, 1}, arrB: []int{1, 2, 3, 4}, expected: false, }, { arrA: []int{4, 2, 1, 3}, arrB: []int{1, 1, 2, 3, 4}, expected: false, }, } for _, tt := range tests { assert.Equal(t, tt.expected, array.Equal(tt.arrA, tt.arrB)) } }
package main import ( "time" "log" "encoding/json" "github.com/liujianping/consumer" ) type context struct{ stop chan bool } func (c *context) Do(req interface{}) error { r, _ := req.(*MyProduct) return r.Do(c) } func (c *context) Encode(request interface{}) ([]byte, error) { return json.Marshal(request) } func (c *context) Decode(data []byte) (interface{}, error) { var p MyProduct err := json.Unmarshal(data, &p) return &p, err } func (p *MyProduct) Do(c *context) error { log.Printf("product No(%d) do", p.No) time.Sleep(time.Millisecond * 250) log.Printf("product No(%d) Done", p.No) return nil } type MyProduct struct{ No int } func main() { core := &context{ stop: make(chan bool, 0)} consumer := consumer.NewPersistConsumer("sleepy", 10, "./", 10240, 8, time.Second) consumer.Resume(core, 2) //! uncomment and mod for your test // for i:= 31; i <= 60; i++ { // consumer.Put(&MyProduct{i}) // } log.Printf("consumer running %v", consumer.Running()) consumer.Close() log.Printf("consumer running %v", consumer.Running()) }
package modules import ( "encoding/csv" "encoding/xml" "fmt" "io/ioutil" "net/http" "os" "runtime" "strconv" "strings" "time" ) type QuoteResponse struct { Status string Name string LastPrice float32 Change float32 ChangePercent float32 TimeStamp string MSDate float32 MarketCap int Volume int ChangeYTD float32 ChangePercentYTD float32 High float32 Low float32 Open float32 } type Invoice struct { Number string Amount float64 PurchaseOrderNumber int InvoiceDate time.Time } const watchedPath = "./source" // Goroutines ... func Goroutines() { // filewatcher runtime.GOMAXPROCS(4) for { d, _ := os.Open(watchedPath) files, _ := d.Readdir(-1) for _, fi := range files { filePath := watchedPath + "/" + fi.Name() f, _ := os.Open(filePath) data, _ := ioutil.ReadAll(f) f.Close() os.Remove(filePath) go func(data string) { reader := csv.NewReader(strings.NewReader(data)) records, _ := reader.ReadAll() for _, r := range records { invoice := new(Invoice) invoice.Number = r[0] invoice.Amount, _ = strconv.ParseFloat(r[1], 64) invoice.PurchaseOrderNumber, _ = strconv.Atoi(r[2]) unixTime, _ := strconv.ParseInt(r[3], 10, 64) invoice.InvoiceDate = time.Unix(unixTime, 0) fmt.Printf("Recieved invoice '%v' for $%2.f and submitted", invoice.Number, invoice.Amount) } }(string(data)) } } } // Goroutines3 .. func Goroutines3() { runtime.GOMAXPROCS(4) start := time.Now() stockSymbols := []string{ "googl", "msft", "aapl", "bbry", "hpq", "vz", "t", "tmus", "s"} numComplete := 0 for _, symbol := range stockSymbols { go func(symbol string) { resp, err := http.Get("http://dev.markitondemand.com/MODApis/Api/v2/Quote?symbol=" + symbol) if err != nil { return } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) quote := new(QuoteResponse) xml.Unmarshal(body, &quote) fmt.Printf("%s : %.2f\n", quote.Name, quote.LastPrice) numComplete++ }(symbol) } for numComplete < len(stockSymbols) { time.Sleep(10 * time.Millisecond) } elapsed := time.Since(start) fmt.Printf("Execution time: %s", elapsed) } // Goroutines2 .. func Goroutines2() { resp, err := http.Get("http://dev.markitondemand.com/MODApis/Api/v2/Quote?symbol=googl") if err != nil { return } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) quote := new(QuoteResponse) xml.Unmarshal(body, &quote) fmt.Printf("%s : %.2f", quote.Name, quote.LastPrice) } // Goroutines1 ... func Goroutines1() { godur, _ := time.ParseDuration("10ms") runtime.GOMAXPROCS(2) go func() { for index := 0; index < 100; index++ { println("Hello") time.Sleep(godur) } }() go func() { for index := 0; index < 100; index++ { println("Go") time.Sleep(godur) } }() dur, _ := time.ParseDuration("1s") time.Sleep(dur) } // http://dev.markitondemand.com/MODApis/Api/v2/Quote?symbol=googl
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/logging/alpha/logging_alpha_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/alpha" ) // LogExclusionServer implements the gRPC interface for LogExclusion. type LogExclusionServer struct{} // ProtoToLogExclusion converts a LogExclusion resource from its proto representation. func ProtoToLogExclusion(p *alphapb.LoggingAlphaLogExclusion) *alpha.LogExclusion { obj := &alpha.LogExclusion{ Name: dcl.StringOrNil(p.GetName()), Description: dcl.StringOrNil(p.GetDescription()), Filter: dcl.StringOrNil(p.GetFilter()), Disabled: dcl.Bool(p.GetDisabled()), CreateTime: dcl.StringOrNil(p.GetCreateTime()), UpdateTime: dcl.StringOrNil(p.GetUpdateTime()), Parent: dcl.StringOrNil(p.GetParent()), } return obj } // LogExclusionToProto converts a LogExclusion resource to its proto representation. func LogExclusionToProto(resource *alpha.LogExclusion) *alphapb.LoggingAlphaLogExclusion { p := &alphapb.LoggingAlphaLogExclusion{} p.SetName(dcl.ValueOrEmptyString(resource.Name)) p.SetDescription(dcl.ValueOrEmptyString(resource.Description)) p.SetFilter(dcl.ValueOrEmptyString(resource.Filter)) p.SetDisabled(dcl.ValueOrEmptyBool(resource.Disabled)) p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime)) p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime)) p.SetParent(dcl.ValueOrEmptyString(resource.Parent)) return p } // applyLogExclusion handles the gRPC request by passing it to the underlying LogExclusion Apply() method. func (s *LogExclusionServer) applyLogExclusion(ctx context.Context, c *alpha.Client, request *alphapb.ApplyLoggingAlphaLogExclusionRequest) (*alphapb.LoggingAlphaLogExclusion, error) { p := ProtoToLogExclusion(request.GetResource()) res, err := c.ApplyLogExclusion(ctx, p) if err != nil { return nil, err } r := LogExclusionToProto(res) return r, nil } // applyLoggingAlphaLogExclusion handles the gRPC request by passing it to the underlying LogExclusion Apply() method. func (s *LogExclusionServer) ApplyLoggingAlphaLogExclusion(ctx context.Context, request *alphapb.ApplyLoggingAlphaLogExclusionRequest) (*alphapb.LoggingAlphaLogExclusion, error) { cl, err := createConfigLogExclusion(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return s.applyLogExclusion(ctx, cl, request) } // DeleteLogExclusion handles the gRPC request by passing it to the underlying LogExclusion Delete() method. func (s *LogExclusionServer) DeleteLoggingAlphaLogExclusion(ctx context.Context, request *alphapb.DeleteLoggingAlphaLogExclusionRequest) (*emptypb.Empty, error) { cl, err := createConfigLogExclusion(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteLogExclusion(ctx, ProtoToLogExclusion(request.GetResource())) } // ListLoggingAlphaLogExclusion handles the gRPC request by passing it to the underlying LogExclusionList() method. func (s *LogExclusionServer) ListLoggingAlphaLogExclusion(ctx context.Context, request *alphapb.ListLoggingAlphaLogExclusionRequest) (*alphapb.ListLoggingAlphaLogExclusionResponse, error) { cl, err := createConfigLogExclusion(ctx, request.GetServiceAccountFile()) if err != nil { return nil, err } resources, err := cl.ListLogExclusion(ctx, request.GetParent()) if err != nil { return nil, err } var protos []*alphapb.LoggingAlphaLogExclusion for _, r := range resources.Items { rp := LogExclusionToProto(r) protos = append(protos, rp) } p := &alphapb.ListLoggingAlphaLogExclusionResponse{} p.SetItems(protos) return p, nil } func createConfigLogExclusion(ctx context.Context, service_account_file string) (*alpha.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return alpha.NewClient(conf), nil }
// This file was generated for SObject FeedLike, API Version v43.0 at 2018-07-30 03:47:18.743501415 -0400 EDT m=+5.086298705 package sobjects import ( "fmt" "strings" ) type FeedLike struct { BaseSObject CreatedById string `force:",omitempty"` CreatedDate string `force:",omitempty"` FeedEntityId string `force:",omitempty"` FeedItemId string `force:",omitempty"` Id string `force:",omitempty"` InsertedById string `force:",omitempty"` IsDeleted bool `force:",omitempty"` } func (t *FeedLike) ApiName() string { return "FeedLike" } func (t *FeedLike) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("FeedLike #%s - %s\n", t.Id, t.Name)) builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById)) builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate)) builder.WriteString(fmt.Sprintf("\tFeedEntityId: %v\n", t.FeedEntityId)) builder.WriteString(fmt.Sprintf("\tFeedItemId: %v\n", t.FeedItemId)) builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id)) builder.WriteString(fmt.Sprintf("\tInsertedById: %v\n", t.InsertedById)) builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted)) return builder.String() } type FeedLikeQueryResponse struct { BaseQuery Records []FeedLike `json:"Records" force:"records"` }
package main import ( "flag" "fmt" // "io" "os" "github.com/midbel/sdp" ) func main() { flag.Parse() r, err := os.Open(flag.Arg(0)) if err != nil { fmt.Fprintln(os.Stderr, "open:", err) os.Exit(1) } defer r.Close() f, err := sdp.Parse(r) if err != nil { fmt.Fprintln(os.Stderr, "parse:", err) os.Exit(2) } raw := f.Dump() fmt.Println(raw) }
// Package authorize is a pomerium service that is responsible for determining // if a given request should be authorized (AuthZ). package authorize import ( "context" "fmt" "sync" "time" "golang.org/x/sync/errgroup" "github.com/pomerium/pomerium/authorize/evaluator" "github.com/pomerium/pomerium/authorize/internal/store" "github.com/pomerium/pomerium/config" "github.com/pomerium/pomerium/internal/atomicutil" "github.com/pomerium/pomerium/internal/log" "github.com/pomerium/pomerium/internal/telemetry/metrics" "github.com/pomerium/pomerium/internal/telemetry/trace" "github.com/pomerium/pomerium/pkg/cryptutil" "github.com/pomerium/pomerium/pkg/grpc" "github.com/pomerium/pomerium/pkg/grpc/databroker" "github.com/pomerium/pomerium/pkg/storage" ) // Authorize struct holds type Authorize struct { state *atomicutil.Value[*authorizeState] store *store.Store currentOptions *atomicutil.Value[*config.Options] accessTracker *AccessTracker globalCache storage.Cache // The stateLock prevents updating the evaluator store simultaneously with an evaluation. // This should provide a consistent view of the data at a given server/record version and // avoid partial updates. stateLock sync.RWMutex } // New validates and creates a new Authorize service from a set of config options. func New(cfg *config.Config) (*Authorize, error) { a := &Authorize{ currentOptions: config.NewAtomicOptions(), store: store.New(), globalCache: storage.NewGlobalCache(time.Minute), } a.accessTracker = NewAccessTracker(a, accessTrackerMaxSize, accessTrackerDebouncePeriod) state, err := newAuthorizeStateFromConfig(cfg, a.store) if err != nil { return nil, err } a.state = atomicutil.NewValue(state) return a, nil } // GetDataBrokerServiceClient returns the current DataBrokerServiceClient. func (a *Authorize) GetDataBrokerServiceClient() databroker.DataBrokerServiceClient { return a.state.Load().dataBrokerClient } // Run runs the authorize service. func (a *Authorize) Run(ctx context.Context) error { eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { a.accessTracker.Run(ctx) return nil }) eg.Go(func() error { _ = grpc.WaitForReady(ctx, a.state.Load().dataBrokerClientConnection, time.Second*10) return nil }) return eg.Wait() } func validateOptions(o *config.Options) error { sharedKey, err := o.GetSharedKey() if err != nil { return fmt.Errorf("authorize: bad 'SHARED_SECRET': %w", err) } if _, err := cryptutil.NewAEADCipher(sharedKey); err != nil { return fmt.Errorf("authorize: bad 'SHARED_SECRET': %w", err) } return nil } // newPolicyEvaluator returns an policy evaluator. func newPolicyEvaluator(opts *config.Options, store *store.Store) (*evaluator.Evaluator, error) { metrics.AddPolicyCountCallback("pomerium-authorize", func() int64 { return int64(len(opts.GetAllPolicies())) }) ctx := context.Background() ctx, span := trace.StartSpan(ctx, "authorize.newPolicyEvaluator") defer span.End() clientCA, err := opts.DownstreamMTLS.GetCA() if err != nil { return nil, fmt.Errorf("authorize: invalid client CA: %w", err) } clientCRL, err := opts.DownstreamMTLS.GetCRL() if err != nil { return nil, fmt.Errorf("authorize: invalid client CRL: %w", err) } authenticateURL, err := opts.GetInternalAuthenticateURL() if err != nil { return nil, fmt.Errorf("authorize: invalid authenticate url: %w", err) } signingKey, err := opts.GetSigningKey() if err != nil { return nil, fmt.Errorf("authorize: invalid signing key: %w", err) } // It is important to add an invalid_client_certificate rule even when the // mTLS enforcement behavior is set to reject connections at the listener // level, because of the per-route TLSDownstreamClientCA setting. addDefaultClientCertificateRule := opts.HasAnyDownstreamMTLSClientCA() && opts.DownstreamMTLS.GetEnforcement() != config.MTLSEnforcementPolicy clientCertConstraints, err := evaluator.ClientCertConstraintsFromConfig(&opts.DownstreamMTLS) if err != nil { return nil, fmt.Errorf( "authorize: internal error: couldn't build client cert constraints: %w", err) } return evaluator.New(ctx, store, evaluator.WithPolicies(opts.GetAllPolicies()), evaluator.WithClientCA(clientCA), evaluator.WithAddDefaultClientCertificateRule(addDefaultClientCertificateRule), evaluator.WithClientCRL(clientCRL), evaluator.WithClientCertConstraints(clientCertConstraints), evaluator.WithSigningKey(signingKey), evaluator.WithAuthenticateURL(authenticateURL.String()), evaluator.WithGoogleCloudServerlessAuthenticationServiceAccount(opts.GetGoogleCloudServerlessAuthenticationServiceAccount()), evaluator.WithJWTClaimsHeaders(opts.JWTClaimsHeaders), ) } // OnConfigChange updates internal structures based on config.Options func (a *Authorize) OnConfigChange(ctx context.Context, cfg *config.Config) { a.currentOptions.Store(cfg.Options) if state, err := newAuthorizeStateFromConfig(cfg, a.store); err != nil { log.Error(ctx).Err(err).Msg("authorize: error updating state") } else { a.state.Store(state) } }