text
stringlengths
11
4.05M
package main import ( "testing" "github.com/stretchr/testify/assert" ) func Test(t *testing.T) { t.Run("root", func(t *testing.T) { assert.NoError(t, argosay()) assert.Error(t, argosay("garbage")) }) t.Run("assert_contains", func(t *testing.T) { assert.NoError(t, argosay("echo", "foo", "/tmp/foo")) assert.Error(t, argosay("assert_contains")) assert.Error(t, argosay("assert_contains", "/tmp/foo")) assert.Error(t, argosay("assert_contains", "/tmp/not-exists", "foo")) assert.NoError(t, argosay("assert_contains", "/tmp/foo", "foo")) assert.Error(t, argosay("assert_contains", "/tmp/foo", "bar")) }) t.Run("echo", func(t *testing.T) { assert.NoError(t, argosay("echo")) assert.NoError(t, argosay("echo", "foo")) assert.NoError(t, argosay("echo", "foo", "/tmp/foo")) assert.Error(t, argosay("echo", "foo", "/tmp/foo", "garbage")) }) t.Run("cat", func(t *testing.T) { assert.NoError(t, argosay("cat", "/tmp/foo", "/tmp/foo")) assert.Error(t, argosay("cat", "/tmp/non")) }) t.Run("sleep", func(t *testing.T) { assert.NoError(t, argosay("sleep", "1s")) assert.Error(t, argosay("sleep", "garbage")) }) t.Run("exit", func(t *testing.T) { assert.NoError(t, argosay("exit")) assert.NoError(t, argosay("exit", "0")) assert.Equal(t, exitError{1}, argosay("exit", "1")) }) }
package install import ( "fmt" "testing" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestDeploymentStatusViewerStatus(t *testing.T) { tests := []struct { generation int64 status appsv1.DeploymentStatus err error msg string done bool }{ { status: appsv1.DeploymentStatus{ Conditions: []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentProgressing, Reason: TimedOutReason, }, }, }, err: fmt.Errorf("deployment \"foo\" exceeded its progress deadline"), done: false, }, { status: appsv1.DeploymentStatus{ Conditions: []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentProgressing, Reason: "NotTimedOut", }, { Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue, }, }, }, msg: "deployment \"foo\" is up-to-date and available", done: true, }, { generation: 1, status: appsv1.DeploymentStatus{ ObservedGeneration: 0, }, msg: "waiting for spec update of deployment \"foo\" to be observed...", done: false, }, { status: appsv1.DeploymentStatus{ Replicas: 5, UpdatedReplicas: 3, }, msg: "deployment \"foo\" waiting for 2 outdated replica(s) to be terminated", done: false, }, { status: appsv1.DeploymentStatus{}, msg: fmt.Sprintf("deployment \"foo\" not available: missing condition %q", appsv1.DeploymentAvailable), done: false, }, { status: appsv1.DeploymentStatus{ Conditions: []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentAvailable, Status: corev1.ConditionFalse, Message: "test message", }, }, }, msg: "deployment \"foo\" not available: test message", done: false, }, { status: appsv1.DeploymentStatus{ Conditions: []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentAvailable, Status: corev1.ConditionUnknown, Message: "test message", }, }, }, msg: "deployment \"foo\" not available: test message", done: false, }, { status: appsv1.DeploymentStatus{ Conditions: []appsv1.DeploymentCondition{ { Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue, }, }, }, msg: "deployment \"foo\" is up-to-date and available", done: true, }, } for i, test := range tests { t.Run(fmt.Sprintf("%d", i+1), func(t *testing.T) { d := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: "bar", Name: "foo", Generation: test.generation, }, Status: test.status, } msg, done, err := DeploymentStatus(d) assert := assert.New(t) if test.err == nil { assert.NoError(err) } else { assert.EqualError(err, test.err.Error()) } assert.Equal(test.done, done) assert.Equal(test.msg, msg) }) } }
package commands import ( "errors" "fmt" "strings" "code.cloudfoundry.org/garden" ) type List struct { Properties []string `short:"p" long:"properties" description:"filter by properties (name=value)"` Verbose bool `short:"v" long:"verbose" description:"print additional details about each container"` Separator string `long:"separator" description:"separator to print between containers in verbose mode"` } func (command *List) Execute(args []string) error { properties := garden.Properties{} for _, prop := range command.Properties { segs := strings.SplitN(prop, "=", 2) if len(segs) < 2 { fail(errors.New("malformed property pair (must be name=value)")) } properties[segs[0]] = segs[1] } containers, err := globalClient().Containers(properties) failIf(err) for _, container := range containers { fmt.Println(container.Handle()) if command.Verbose { props, _ := container.Properties() for k, v := range props { fmt.Printf(" %s=%s\n", k, v) } fmt.Print(command.Separator) } } return nil }
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net // Use of this source code is governed by a license that can be found in the LICENSE file. package id_test import "testing" func TestUpdateAllEqual(t *testing.T) { sOri := []string{` TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); `, ` TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); `, ` TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); TRICE_S( Id(200), "sig:generated=%s\n", x ); TRICE_S( Id(200), "sig:generated=%s\n", x ); TRICE_S( Id(200), "sig:generated=%s\n", x ); `, ` TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); TRICE_S( Id(200), "sig:generated=%s\n", x ); TRICE_S( Id(200), "sig:generated=%s\n", x ); `} listExp := `[ { "id": 100, "fmtType": "TRICE32_2", "fmtStrg": "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\\r\\n", "created": 0, "removed": 0 }, { "id": 200, "fmtType": "TRICE_S", "fmtStrg": "sig:generated=%s\\n", "created": 0, "removed": 0 } ]` doUpdate(t, sOri, sOri, listExp) } // Currently a TRICE macro must be complete with Id(0) or Id(12345) or can be without parameter count specification but must not contain an Id(0) or Id(12345) in that case. // Examples: // TRICE8( "%d", v) // ok // TRICE8_1( Id(0), "%d", v) // ok // TRICE8_1( Id(7), "%d", v) // ok // TRICE8_1( "%d", v ) // NOT ok: remove _1 or add Id(0) // TRICE8( Id(0), "%d", v ) // NOT ok: add _1 or remove Id(0) func TestUpdateWithIdAndNoParamCount(t *testing.T) { sOri := []string{` TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); // to do: remove _2 `, ` TRICE_S( "wr:fmtSting %s", x ) `, ` `, ` `} sExp := []string{` TRICE32_2( Id(100), "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\r\n", pAddress, Value ); // to do: remove _2 `, ` TRICE_S( Id(43274), "wr:fmtSting %s", x ) `, ` `, ` `} listExp := `[ { "id": 100, "fmtType": "TRICE32_2", "fmtStrg": "rd_: { (uint32_t*) 0x%08x, 0x%08xu },\\r\\n", "created": 0, "removed": 0 }, { "id": 43274, "fmtType": "TRICE_S", "fmtStrg": "wr:fmtSting %s", "created": 0, "removed": 0 } ]` doUpdate(t, sOri, sExp, listExp) }
package database import ( "portal/model" ) var insertSql = "INSERT INTO portal_resource(`app_id`, `type`, `resource_id`) VALUES(?, ?, ?)" var menuSql = "SELECT" + " r1.id AS DetailId," + " r1.name," + " r1.parent," + " r3.app AS `group`," + " r2.type," + " r2.id AS RESID" + " FROM" + " portal_router AS r1" + " JOIN portal_resource AS r2 ON r1.id = r2.resource_id" + " JOIN portal_app AS r3 ON r2.app_id = r3.uuid" var interfaceSql = "SELECT" + " r1.id AS DetailId," + " r1.name," + " -1 AS `parent`," + " r3.app AS `group`," + " r2.type," + " r2.id AS RESID" + " FROM" + " portal_interface AS r1" + " JOIN portal_resource AS r2 ON r1.id = r2.resource_id" + " JOIN portal_app AS r3 ON r2.app_id = r3.uuid" // Insert record // menu,interface记录关联到资源管理表 func InsertRes(params model.Resource) error { tx, err := ConnDB().Begin() if err != nil { return err } defer tx.Rollback() _, err = tx.Exec(insertSql, params.AppId, params.ResType, params.ResId) if err != nil { return err } return tx.Commit() } // Search menu, interface, return menu, interface // data list func FindAllResource() (*model.MixResource, error) { var ( menu []model.ResCollection inter []model.ResCollection sqlList = []string{menuSql, interfaceSql} ) // 查询Menu, Interface for i := 0; i < len(sqlList); i++ { rows, err := ConnDB().Query(sqlList[i]) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var ele = model.ResCollection{} if err := rows.Scan( &ele.DetailId, &ele.Name, &ele.ParentId, &ele.Group, &ele.ResType, &ele.RESId, ); err != nil { return nil, err } else { switch i { case 0: menu = append(menu, ele) case 1: inter = append(inter, ele) } } } } result := &model.MixResource{Menu: menu, Inter: inter} return result, nil } // remove row func DelResourceRow(resourceId int) (int, error) { _, err := ConnDB().Exec(`DELETE FROM portal_resource WHERE resource_id = ?`, resourceId) if err != nil { return 1, err } return 0, nil }
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package filestore import ( "context" "fmt" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/alpha" "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured" ) type Instance struct{} func InstanceToUnstructured(r *dclService.Instance) *unstructured.Resource { u := &unstructured.Resource{ STV: unstructured.ServiceTypeVersion{ Service: "filestore", Version: "alpha", Type: "Instance", }, Object: make(map[string]interface{}), } if r.CreateTime != nil { u.Object["createTime"] = *r.CreateTime } if r.Description != nil { u.Object["description"] = *r.Description } if r.Etag != nil { u.Object["etag"] = *r.Etag } var rFileShares []interface{} for _, rFileSharesVal := range r.FileShares { rFileSharesObject := make(map[string]interface{}) if rFileSharesVal.CapacityGb != nil { rFileSharesObject["capacityGb"] = *rFileSharesVal.CapacityGb } if rFileSharesVal.Name != nil { rFileSharesObject["name"] = *rFileSharesVal.Name } var rFileSharesValNfsExportOptions []interface{} for _, rFileSharesValNfsExportOptionsVal := range rFileSharesVal.NfsExportOptions { rFileSharesValNfsExportOptionsObject := make(map[string]interface{}) if rFileSharesValNfsExportOptionsVal.AccessMode != nil { rFileSharesValNfsExportOptionsObject["accessMode"] = string(*rFileSharesValNfsExportOptionsVal.AccessMode) } if rFileSharesValNfsExportOptionsVal.AnonGid != nil { rFileSharesValNfsExportOptionsObject["anonGid"] = *rFileSharesValNfsExportOptionsVal.AnonGid } if rFileSharesValNfsExportOptionsVal.AnonUid != nil { rFileSharesValNfsExportOptionsObject["anonUid"] = *rFileSharesValNfsExportOptionsVal.AnonUid } var rFileSharesValNfsExportOptionsValIPRanges []interface{} for _, rFileSharesValNfsExportOptionsValIPRangesVal := range rFileSharesValNfsExportOptionsVal.IPRanges { rFileSharesValNfsExportOptionsValIPRanges = append(rFileSharesValNfsExportOptionsValIPRanges, rFileSharesValNfsExportOptionsValIPRangesVal) } rFileSharesValNfsExportOptionsObject["ipRanges"] = rFileSharesValNfsExportOptionsValIPRanges if rFileSharesValNfsExportOptionsVal.SquashMode != nil { rFileSharesValNfsExportOptionsObject["squashMode"] = string(*rFileSharesValNfsExportOptionsVal.SquashMode) } rFileSharesValNfsExportOptions = append(rFileSharesValNfsExportOptions, rFileSharesValNfsExportOptionsObject) } rFileSharesObject["nfsExportOptions"] = rFileSharesValNfsExportOptions if rFileSharesVal.SourceBackup != nil { rFileSharesObject["sourceBackup"] = *rFileSharesVal.SourceBackup } rFileShares = append(rFileShares, rFileSharesObject) } u.Object["fileShares"] = rFileShares if r.Labels != nil { rLabels := make(map[string]interface{}) for k, v := range r.Labels { rLabels[k] = v } u.Object["labels"] = rLabels } if r.Location != nil { u.Object["location"] = *r.Location } if r.Name != nil { u.Object["name"] = *r.Name } var rNetworks []interface{} for _, rNetworksVal := range r.Networks { rNetworksObject := make(map[string]interface{}) var rNetworksValIPAddresses []interface{} for _, rNetworksValIPAddressesVal := range rNetworksVal.IPAddresses { rNetworksValIPAddresses = append(rNetworksValIPAddresses, rNetworksValIPAddressesVal) } rNetworksObject["ipAddresses"] = rNetworksValIPAddresses var rNetworksValModes []interface{} for _, rNetworksValModesVal := range rNetworksVal.Modes { rNetworksValModes = append(rNetworksValModes, string(rNetworksValModesVal)) } rNetworksObject["modes"] = rNetworksValModes if rNetworksVal.Network != nil { rNetworksObject["network"] = *rNetworksVal.Network } if rNetworksVal.ReservedIPRange != nil { rNetworksObject["reservedIPRange"] = *rNetworksVal.ReservedIPRange } rNetworks = append(rNetworks, rNetworksObject) } u.Object["networks"] = rNetworks if r.Project != nil { u.Object["project"] = *r.Project } if r.State != nil { u.Object["state"] = string(*r.State) } if r.StatusMessage != nil { u.Object["statusMessage"] = *r.StatusMessage } if r.Tier != nil { u.Object["tier"] = string(*r.Tier) } return u } func UnstructuredToInstance(u *unstructured.Resource) (*dclService.Instance, error) { r := &dclService.Instance{} if _, ok := u.Object["createTime"]; ok { if s, ok := u.Object["createTime"].(string); ok { r.CreateTime = dcl.String(s) } else { return nil, fmt.Errorf("r.CreateTime: expected string") } } if _, ok := u.Object["description"]; ok { if s, ok := u.Object["description"].(string); ok { r.Description = dcl.String(s) } else { return nil, fmt.Errorf("r.Description: expected string") } } if _, ok := u.Object["etag"]; ok { if s, ok := u.Object["etag"].(string); ok { r.Etag = dcl.String(s) } else { return nil, fmt.Errorf("r.Etag: expected string") } } if _, ok := u.Object["fileShares"]; ok { if s, ok := u.Object["fileShares"].([]interface{}); ok { for _, o := range s { if objval, ok := o.(map[string]interface{}); ok { var rFileShares dclService.InstanceFileShares if _, ok := objval["capacityGb"]; ok { if i, ok := objval["capacityGb"].(int64); ok { rFileShares.CapacityGb = dcl.Int64(i) } else { return nil, fmt.Errorf("rFileShares.CapacityGb: expected int64") } } if _, ok := objval["name"]; ok { if s, ok := objval["name"].(string); ok { rFileShares.Name = dcl.String(s) } else { return nil, fmt.Errorf("rFileShares.Name: expected string") } } if _, ok := objval["nfsExportOptions"]; ok { if s, ok := objval["nfsExportOptions"].([]interface{}); ok { for _, o := range s { if objval, ok := o.(map[string]interface{}); ok { var rFileSharesNfsExportOptions dclService.InstanceFileSharesNfsExportOptions if _, ok := objval["accessMode"]; ok { if s, ok := objval["accessMode"].(string); ok { rFileSharesNfsExportOptions.AccessMode = dclService.InstanceFileSharesNfsExportOptionsAccessModeEnumRef(s) } else { return nil, fmt.Errorf("rFileSharesNfsExportOptions.AccessMode: expected string") } } if _, ok := objval["anonGid"]; ok { if i, ok := objval["anonGid"].(int64); ok { rFileSharesNfsExportOptions.AnonGid = dcl.Int64(i) } else { return nil, fmt.Errorf("rFileSharesNfsExportOptions.AnonGid: expected int64") } } if _, ok := objval["anonUid"]; ok { if i, ok := objval["anonUid"].(int64); ok { rFileSharesNfsExportOptions.AnonUid = dcl.Int64(i) } else { return nil, fmt.Errorf("rFileSharesNfsExportOptions.AnonUid: expected int64") } } if _, ok := objval["ipRanges"]; ok { if s, ok := objval["ipRanges"].([]interface{}); ok { for _, ss := range s { if strval, ok := ss.(string); ok { rFileSharesNfsExportOptions.IPRanges = append(rFileSharesNfsExportOptions.IPRanges, strval) } } } else { return nil, fmt.Errorf("rFileSharesNfsExportOptions.IPRanges: expected []interface{}") } } if _, ok := objval["squashMode"]; ok { if s, ok := objval["squashMode"].(string); ok { rFileSharesNfsExportOptions.SquashMode = dclService.InstanceFileSharesNfsExportOptionsSquashModeEnumRef(s) } else { return nil, fmt.Errorf("rFileSharesNfsExportOptions.SquashMode: expected string") } } rFileShares.NfsExportOptions = append(rFileShares.NfsExportOptions, rFileSharesNfsExportOptions) } } } else { return nil, fmt.Errorf("rFileShares.NfsExportOptions: expected []interface{}") } } if _, ok := objval["sourceBackup"]; ok { if s, ok := objval["sourceBackup"].(string); ok { rFileShares.SourceBackup = dcl.String(s) } else { return nil, fmt.Errorf("rFileShares.SourceBackup: expected string") } } r.FileShares = append(r.FileShares, rFileShares) } } } else { return nil, fmt.Errorf("r.FileShares: expected []interface{}") } } if _, ok := u.Object["labels"]; ok { if rLabels, ok := u.Object["labels"].(map[string]interface{}); ok { m := make(map[string]string) for k, v := range rLabels { if s, ok := v.(string); ok { m[k] = s } } r.Labels = m } else { return nil, fmt.Errorf("r.Labels: expected map[string]interface{}") } } if _, ok := u.Object["location"]; ok { if s, ok := u.Object["location"].(string); ok { r.Location = dcl.String(s) } else { return nil, fmt.Errorf("r.Location: expected string") } } if _, ok := u.Object["name"]; ok { if s, ok := u.Object["name"].(string); ok { r.Name = dcl.String(s) } else { return nil, fmt.Errorf("r.Name: expected string") } } if _, ok := u.Object["networks"]; ok { if s, ok := u.Object["networks"].([]interface{}); ok { for _, o := range s { if objval, ok := o.(map[string]interface{}); ok { var rNetworks dclService.InstanceNetworks if _, ok := objval["ipAddresses"]; ok { if s, ok := objval["ipAddresses"].([]interface{}); ok { for _, ss := range s { if strval, ok := ss.(string); ok { rNetworks.IPAddresses = append(rNetworks.IPAddresses, strval) } } } else { return nil, fmt.Errorf("rNetworks.IPAddresses: expected []interface{}") } } if _, ok := objval["modes"]; ok { if s, ok := objval["modes"].([]interface{}); ok { for _, ss := range s { if strval, ok := ss.(string); ok { rNetworks.Modes = append(rNetworks.Modes, dclService.InstanceNetworksModesEnum(strval)) } } } else { return nil, fmt.Errorf("rNetworks.Modes: expected []interface{}") } } if _, ok := objval["network"]; ok { if s, ok := objval["network"].(string); ok { rNetworks.Network = dcl.String(s) } else { return nil, fmt.Errorf("rNetworks.Network: expected string") } } if _, ok := objval["reservedIPRange"]; ok { if s, ok := objval["reservedIPRange"].(string); ok { rNetworks.ReservedIPRange = dcl.String(s) } else { return nil, fmt.Errorf("rNetworks.ReservedIPRange: expected string") } } r.Networks = append(r.Networks, rNetworks) } } } else { return nil, fmt.Errorf("r.Networks: expected []interface{}") } } if _, ok := u.Object["project"]; ok { if s, ok := u.Object["project"].(string); ok { r.Project = dcl.String(s) } else { return nil, fmt.Errorf("r.Project: expected string") } } if _, ok := u.Object["state"]; ok { if s, ok := u.Object["state"].(string); ok { r.State = dclService.InstanceStateEnumRef(s) } else { return nil, fmt.Errorf("r.State: expected string") } } if _, ok := u.Object["statusMessage"]; ok { if s, ok := u.Object["statusMessage"].(string); ok { r.StatusMessage = dcl.String(s) } else { return nil, fmt.Errorf("r.StatusMessage: expected string") } } if _, ok := u.Object["tier"]; ok { if s, ok := u.Object["tier"].(string); ok { r.Tier = dclService.InstanceTierEnumRef(s) } else { return nil, fmt.Errorf("r.Tier: expected string") } } return r, nil } func GetInstance(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) { c := dclService.NewClient(config) r, err := UnstructuredToInstance(u) if err != nil { return nil, err } r, err = c.GetInstance(ctx, r) if err != nil { return nil, err } return InstanceToUnstructured(r), nil } func ListInstance(ctx context.Context, config *dcl.Config, project string, location string) ([]*unstructured.Resource, error) { c := dclService.NewClient(config) l, err := c.ListInstance(ctx, project, location) if err != nil { return nil, err } var resources []*unstructured.Resource for { for _, r := range l.Items { resources = append(resources, InstanceToUnstructured(r)) } if !l.HasNext() { break } if err := l.Next(ctx, c); err != nil { return nil, err } } return resources, nil } func ApplyInstance(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) { c := dclService.NewClient(config) r, err := UnstructuredToInstance(u) if err != nil { return nil, err } if ush := unstructured.FetchStateHint(opts); ush != nil { sh, err := UnstructuredToInstance(ush) if err != nil { return nil, err } opts = append(opts, dcl.WithStateHint(sh)) } r, err = c.ApplyInstance(ctx, r, opts...) if err != nil { return nil, err } return InstanceToUnstructured(r), nil } func InstanceHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) { c := dclService.NewClient(config) r, err := UnstructuredToInstance(u) if err != nil { return false, err } if ush := unstructured.FetchStateHint(opts); ush != nil { sh, err := UnstructuredToInstance(ush) if err != nil { return false, err } opts = append(opts, dcl.WithStateHint(sh)) } opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification)) _, err = c.ApplyInstance(ctx, r, opts...) if err != nil { if _, ok := err.(dcl.ApplyInfeasibleError); ok { return true, nil } return false, err } return false, nil } func DeleteInstance(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error { c := dclService.NewClient(config) r, err := UnstructuredToInstance(u) if err != nil { return err } return c.DeleteInstance(ctx, r) } func InstanceID(u *unstructured.Resource) (string, error) { r, err := UnstructuredToInstance(u) if err != nil { return "", err } return r.ID() } func (r *Instance) STV() unstructured.ServiceTypeVersion { return unstructured.ServiceTypeVersion{ "filestore", "Instance", "alpha", } } func (r *Instance) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Instance) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Instance) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error { return unstructured.ErrNoSuchMethod } func (r *Instance) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Instance) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Instance) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) { return nil, unstructured.ErrNoSuchMethod } func (r *Instance) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) { return GetInstance(ctx, config, resource) } func (r *Instance) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) { return ApplyInstance(ctx, config, resource, opts...) } func (r *Instance) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) { return InstanceHasDiff(ctx, config, resource, opts...) } func (r *Instance) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error { return DeleteInstance(ctx, config, resource) } func (r *Instance) ID(resource *unstructured.Resource) (string, error) { return InstanceID(resource) } func init() { unstructured.Register(&Instance{}) }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package chain import ( "encoding/base64" "fmt" "strconv" "testing" "time" ) import ( "github.com/stretchr/testify/assert" ) import ( "github.com/apache/dubbo-go/cluster/router" "github.com/apache/dubbo-go/cluster/router/condition" "github.com/apache/dubbo-go/common" "github.com/apache/dubbo-go/common/config" "github.com/apache/dubbo-go/common/constant" "github.com/apache/dubbo-go/common/extension" _ "github.com/apache/dubbo-go/config_center/zookeeper" "github.com/apache/dubbo-go/protocol" "github.com/apache/dubbo-go/protocol/invocation" "github.com/apache/dubbo-go/remoting/zookeeper" ) func TestNewRouterChain(t *testing.T) { ts, z, _, err := zookeeper.NewMockZookeeperClient("test", 15*time.Second) assert.NoError(t, err) err = z.Create("/dubbo/config/dubbo/test-condition.condition-router") assert.NoError(t, err) testyml := `enabled: true force: true runtime: false conditions: - => host != 172.22.3.91 ` _, err = z.Conn.Set("/dubbo/config/dubbo/test-condition.condition-router", []byte(testyml), 0) assert.NoError(t, err) defer ts.Stop() defer z.Close() zkUrl, _ := common.NewURL("zookeeper://127.0.0.1:" + strconv.Itoa(ts.Servers[0].Port)) configuration, err := extension.GetConfigCenterFactory("zookeeper").GetDynamicConfiguration(&zkUrl) config.GetEnvInstance().SetDynamicConfiguration(configuration) assert.Nil(t, err) assert.NotNil(t, configuration) chain, err := NewRouterChain(getRouteUrl("test-condition")) assert.Nil(t, err) assert.Equal(t, 1, len(chain.routers)) appRouter := chain.routers[0].(*condition.AppRouter) assert.NotNil(t, appRouter) assert.NotNil(t, appRouter.RouterRule()) rule := appRouter.RouterRule() assert.Equal(t, "", rule.Scope) assert.True(t, rule.Force) assert.True(t, rule.Enabled) assert.True(t, rule.Valid) assert.Equal(t, testyml, rule.RawRule) assert.Equal(t, false, rule.Runtime) assert.Equal(t, false, rule.Dynamic) assert.Equal(t, "", rule.Key) } func TestNewRouterChainURLNil(t *testing.T) { chain, err := NewRouterChain(nil) assert.NoError(t, err) assert.NotNil(t, chain) } func TestRouterChain_AddRouters(t *testing.T) { ts, z, _, err := zookeeper.NewMockZookeeperClient("test", 15*time.Second) assert.NoError(t, err) err = z.Create("/dubbo/config/dubbo/test-condition.condition-router") assert.NoError(t, err) testyml := `enabled: true force: true runtime: false conditions: - => host != 172.22.3.91 ` _, err = z.Conn.Set("/dubbo/config/dubbo/test-condition.condition-router", []byte(testyml), 0) assert.NoError(t, err) defer ts.Stop() defer z.Close() zkUrl, _ := common.NewURL("zookeeper://127.0.0.1:" + strconv.Itoa(ts.Servers[0].Port)) configuration, err := extension.GetConfigCenterFactory("zookeeper").GetDynamicConfiguration(&zkUrl) config.GetEnvInstance().SetDynamicConfiguration(configuration) chain, err := NewRouterChain(getConditionRouteUrl("test-condition")) assert.Nil(t, err) assert.Equal(t, 2, len(chain.routers)) url := getConditionRouteUrl("test-condition") assert.NotNil(t, url) factory := extension.GetRouterFactory(url.Protocol) r, err := factory.NewRouter(url) assert.Nil(t, err) assert.NotNil(t, r) routers := make([]router.Router, 0) routers = append(routers, r) chain.AddRouters(routers) assert.Equal(t, 3, len(chain.routers)) } func TestRouterChain_Route(t *testing.T) { ts, z, _, err := zookeeper.NewMockZookeeperClient("test", 15*time.Second) defer ts.Stop() defer z.Close() zkUrl, _ := common.NewURL("zookeeper://127.0.0.1:" + strconv.Itoa(ts.Servers[0].Port)) configuration, err := extension.GetConfigCenterFactory("zookeeper").GetDynamicConfiguration(&zkUrl) config.GetEnvInstance().SetDynamicConfiguration(configuration) chain, err := NewRouterChain(getConditionRouteUrl("test-condition")) assert.Nil(t, err) assert.Equal(t, 1, len(chain.routers)) url := getConditionRouteUrl("test-condition") assert.NotNil(t, url) invokers := []protocol.Invoker{} dubboURL, _ := common.NewURL(fmt.Sprintf("dubbo://1.2.3.4:20000/com.foo.BarService")) invokers = append(invokers, protocol.NewBaseInvoker(dubboURL)) targetURL, _ := common.NewURL(fmt.Sprintf("consumer://1.1.1.1/com.foo.BarService")) inv := &invocation.RPCInvocation{} finalInvokers := chain.Route(invokers, &targetURL, inv) assert.Equal(t, 1, len(finalInvokers)) } func TestRouterChain_Route_AppRouter(t *testing.T) { ts, z, _, err := zookeeper.NewMockZookeeperClient("test", 15*time.Second) assert.NoError(t, err) err = z.Create("/dubbo/config/dubbo/test-condition.condition-router") assert.NoError(t, err) testyml := `enabled: true force: true runtime: false conditions: - => host = 1.1.1.1 => host != 1.2.3.4 ` _, err = z.Conn.Set("/dubbo/config/dubbo/test-condition.condition-router", []byte(testyml), 0) assert.NoError(t, err) defer ts.Stop() defer z.Close() zkUrl, _ := common.NewURL("zookeeper://127.0.0.1:" + strconv.Itoa(ts.Servers[0].Port)) configuration, err := extension.GetConfigCenterFactory("zookeeper").GetDynamicConfiguration(&zkUrl) config.GetEnvInstance().SetDynamicConfiguration(configuration) chain, err := NewRouterChain(getConditionRouteUrl("test-condition")) assert.Nil(t, err) assert.Equal(t, 2, len(chain.routers)) invokers := []protocol.Invoker{} dubboURL, _ := common.NewURL(fmt.Sprintf("dubbo://1.2.3.4:20000/com.foo.BarService")) invokers = append(invokers, protocol.NewBaseInvoker(dubboURL)) targetURL, _ := common.NewURL(fmt.Sprintf("consumer://1.1.1.1/com.foo.BarService")) inv := &invocation.RPCInvocation{} finalInvokers := chain.Route(invokers, &targetURL, inv) assert.Equal(t, 0, len(finalInvokers)) } func TestRouterChain_Route_NoRoute(t *testing.T) { ts, z, _, err := zookeeper.NewMockZookeeperClient("test", 15*time.Second) defer ts.Stop() defer z.Close() zkUrl, _ := common.NewURL("zookeeper://127.0.0.1:" + strconv.Itoa(ts.Servers[0].Port)) configuration, err := extension.GetConfigCenterFactory("zookeeper").GetDynamicConfiguration(&zkUrl) config.GetEnvInstance().SetDynamicConfiguration(configuration) chain, err := NewRouterChain(getConditionNoRouteUrl("test-condition")) assert.Nil(t, err) assert.Equal(t, 1, len(chain.routers)) url := getConditionRouteUrl("test-condition") assert.NotNil(t, url) invokers := []protocol.Invoker{} dubboURL, _ := common.NewURL(fmt.Sprintf("dubbo://1.2.3.4:20000/com.foo.BarService")) invokers = append(invokers, protocol.NewBaseInvoker(dubboURL)) targetURL, _ := common.NewURL(fmt.Sprintf("consumer://1.1.1.1/com.foo.BarService")) inv := &invocation.RPCInvocation{} finalInvokers := chain.Route(invokers, &targetURL, inv) assert.Equal(t, 0, len(finalInvokers)) } func getConditionNoRouteUrl(applicationKey string) *common.URL { url, _ := common.NewURL("condition://0.0.0.0/com.foo.BarService") url.AddParam("application", applicationKey) url.AddParam("force", "true") rule := base64.URLEncoding.EncodeToString([]byte("host = 1.1.1.1 => host != 1.2.3.4")) url.AddParam(constant.RULE_KEY, rule) return &url } func getConditionRouteUrl(applicationKey string) *common.URL { url, _ := common.NewURL("condition://0.0.0.0/com.foo.BarService") url.AddParam("application", applicationKey) url.AddParam("force", "true") rule := base64.URLEncoding.EncodeToString([]byte("host = 1.1.1.1 => host = 1.2.3.4")) url.AddParam(constant.RULE_KEY, rule) return &url } func getRouteUrl(applicationKey string) *common.URL { url, _ := common.NewURL("condition://0.0.0.0/com.foo.BarService") url.AddParam("application", applicationKey) url.AddParam("force", "true") return &url }
package msg import ( "github.com/name5566/leaf/network/protobuf" ) var Processor = protobuf.NewProcessor() // protobuf func init() { Processor.Register(&Number{}) // Processor.Register(&Number{}) // Json 协议 // var Processor = json.NewProcessor() // json } // Number 一个结构体定义了一个 JSON 消息的格式,消息名为 Number //type Number struct { // Num int //}
// NOTE: Generated By hrp v4.3.4, DO NOT EDIT! package main import ( "github.com/httprunner/funplugin/fungo" ) func main() { fungo.Register("SumTwoInt", SumTwoInt) fungo.Register("SumInts", SumInts) fungo.Register("Sum", Sum) fungo.Register("SetupHookExample", SetupHookExample) fungo.Register("TeardownHookExample", TeardownHookExample) fungo.Register("GetUserAgent", GetUserAgent) fungo.Serve() }
package routers import ( "github.com/astaxie/beego" "github.com/astaxie/beego/context/param" ) func init() { beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"], beego.ControllerComments{ Method: "GetAll", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"], beego.ControllerComments{ Method: "GetOne", Router: "/:id", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"], beego.ControllerComments{ Method: "Analysis", Router: "/analysis", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:AlarmController"], beego.ControllerComments{ Method: "Export", Router: "/export", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"], beego.ControllerComments{ Method: "Login", Router: "/login", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"], beego.ControllerComments{ Method: "Logout", Router: "/logout", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"], beego.ControllerComments{ Method: "Receive", Router: "/receive", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:BeforeUserController"], beego.ControllerComments{ Method: "Webhook", Router: "/webhook", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:EchartController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:EchartController"], beego.ControllerComments{ Method: "GetHistory", Router: "/history", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ExpController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ExpController"], beego.ControllerComments{ Method: "GetItemHistory", Router: "/history", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ExpController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ExpController"], beego.ControllerComments{ Method: "Inspect", Router: "/inspect", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ExpController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ExpController"], beego.ControllerComments{ Method: "GetItemTrend", Router: "/trend", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:GraphController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:GraphController"], beego.ControllerComments{ Method: "Post", Router: "/:hostid", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:GraphController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:GraphController"], beego.ControllerComments{ Method: "Exp", Router: "/exp", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HistoryController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HistoryController"], beego.ControllerComments{ Method: "GetHistoryByItemID", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"], beego.ControllerComments{ Method: "Post", Router: "/", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"], beego.ControllerComments{ Method: "GetAll", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"], beego.ControllerComments{ Method: "GetOne", Router: "/:id", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostController"], beego.ControllerComments{ Method: "GetApplication", Router: "/application/:hostid", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostGroupsController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostGroupsController"], beego.ControllerComments{ Method: "GetAll", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostGroupsController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostGroupsController"], beego.ControllerComments{ Method: "GetList", Router: "/list", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostGroupsController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:HostGroupsController"], beego.ControllerComments{ Method: "GetHostByGroupID", Router: "/list/:id", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ImagesController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ImagesController"], beego.ControllerComments{ Method: "GetOne", Router: "/:id", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:IndexController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:IndexController"], beego.ControllerComments{ Method: "GetInfo", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ItemController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ItemController"], beego.ControllerComments{ Method: "GetItemByKey", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ItemController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ItemController"], beego.ControllerComments{ Method: "GetAllItemByKey", Router: "/list", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ManagerController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ManagerController"], beego.ControllerComments{ Method: "GetOne", Router: "/:id", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ManagerController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ManagerController"], beego.ControllerComments{ Method: "Chpwd", Router: "/chpwd", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ManagerController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ManagerController"], beego.ControllerComments{ Method: "Info", Router: "/info", AllowHTTPMethods: []string{"post"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ProblemsController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:ProblemsController"], beego.ControllerComments{ Method: "GetInfo", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TemplateController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TemplateController"], beego.ControllerComments{ Method: "GetInfo", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TemplateController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TemplateController"], beego.ControllerComments{ Method: "GetAll", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TrendController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TrendController"], beego.ControllerComments{ Method: "GetTrendByItemID", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TriggersController"] = append(beego.GlobalControllerRouter["github.com/canghai908/zbxtable/controllers:TriggersController"], beego.ControllerComments{ Method: "GetInfo", Router: "/", AllowHTTPMethods: []string{"get"}, MethodParams: param.Make(), Filters: nil, Params: nil}) }
package data import "github.com/aren55555/shepherd-backend/models" type Store interface { GetForms() []*models.Form GetFormBy(string) *models.Form GetApplicationBy(string) *models.Application CreateApplicationFrom(string) (*models.Application, error) UpdateApplicationFormData(string, []byte) (*models.Application, error) CompleteApplication(string) error }
package trello type Member struct { Id string `json:"id"` AvatarHash string `json:"avatarHash"` Bio string `json:"string"` BioData struct { Emoji struct { } `json:"emoji"` } `json:"bioData"` Confirmed bool `json:"confirmed"` FullName string `json:"fullName"` IdPremOrgsAdmin []string `json:"idPremOrgsAdmin"` Initials string `json:"initials"` MemberType string `json:"memberType"` Products []int `json:"products"` Status string `json:"status"` Url string `json:"url"` Username string `json:"username"` }
package deployment import ( "github.com/cloudfoundry-incubator/candiedyaml" bosherr "github.com/cloudfoundry/bosh-agent/errors" boshsys "github.com/cloudfoundry/bosh-agent/system" ) type boshDeploymentParser struct { fs boshsys.FileSystem } func NewBoshDeploymentParser(fs boshsys.FileSystem) ManifestParser { return boshDeploymentParser{fs: fs} } type boshDeploymentManifest struct { Name string Update UpdateSpec Networks []Network ResourcePools []ResourcePool `yaml:"resource_pools"` Jobs []Job } type UpdateSpec struct { UpdateWatchTime *string `yaml:"update_watch_time"` } var boshDeploymentDefaults = Deployment{ Update: Update{ UpdateWatchTime: WatchTime{ Start: 0, End: 300000, }, }, } func (p boshDeploymentParser) Parse(path string) (Deployment, error) { contents, err := p.fs.ReadFile(path) if err != nil { return Deployment{}, bosherr.WrapError(err, "Reading file %s", path) } depManifest := boshDeploymentManifest{} err = candiedyaml.Unmarshal(contents, &depManifest) if err != nil { return Deployment{}, bosherr.WrapError(err, "Unmarshalling BOSH deployment manifest") } deployment := boshDeploymentDefaults deployment.Name = depManifest.Name deployment.Networks = depManifest.Networks deployment.ResourcePools = depManifest.ResourcePools deployment.Jobs = depManifest.Jobs if depManifest.Update.UpdateWatchTime != nil { updateWatchTime, err := NewWatchTime(*depManifest.Update.UpdateWatchTime) if err != nil { return Deployment{}, bosherr.WrapError(err, "Parsing update watch time") } deployment.Update = Update{ UpdateWatchTime: updateWatchTime, } } return deployment, nil }
package problem0441 func arrangeCoins(n int) int { low := 1 high := n for low <= high{ mid := low + (high - low) / 2 value := (1+mid) * mid / 2 if value == n { return mid }else if value < n { low = mid + 1 }else{ high = mid - 1 } } return high }
package config import ( "fmt" "io/ioutil" "os" "gopkg.in/yaml.v2" ) const Dev = "development" const Test = "test" const Prod = "production" type AppConfig struct { DBUrl string `yaml:"database_url"` Salt string `yaml:"salt"` S3Bucket string `yaml:"bucket"` } func GetConfig() (*AppConfig, error) { env := GetEnv() conf, err := readSettings(env) if err != nil { return nil, err } return conf, nil } func GetEnv() string { env := os.Getenv("GO_APP_ENV") if env == "" { env = Dev } return env } func IsDev() bool { return GetEnv() == Dev } func IsTest() bool { return GetEnv() == Test } func IsProd() bool { return GetEnv() == Prod } func readSettings(env string) (*AppConfig, error) { file, err := ioutil.ReadFile("config/settings.yml") if err != nil { return nil, err } file = []byte(os.ExpandEnv(string(file))) confs := make(map[string]*AppConfig) if err := yaml.Unmarshal(file, confs); err != nil { return nil, err } conf := confs[env] if conf == nil { return nil, fmt.Errorf("environment '%s' is not found on config/settings.yml", env) } return conf, nil }
package main import ( "fmt" "github.com/gorilla/websocket" "math/rand" "os" ) var address = "ws://10.64.221.117" func main() { token := connectToPayserver() port := enterMatchmaker(token) outcome := enterGame(token, port) fmt.Println("Test concluded, game outcome: " + outcome) } func connectToPayserver() string { fmt.Println("Connecting to payserver") conn, _, err := websocket.DefaultDialer.Dial(address+":7000/ws", nil) defer conn.Close() if err != nil { fmt.Println(err) os.Exit(1) } // Get Wallet message := map[string]string{} conn.ReadJSON(&message) fmt.Println(message) // Get Token message = map[string]string{} conn.ReadJSON(&message) fmt.Println(message) token := message["token"] // Wait for status: paid message = map[string]string{} conn.ReadJSON(&message) status := message["status"] if status != "paid" { fmt.Println("FAIL") os.Exit(1) return "" } return token } func enterMatchmaker(token string) string { fmt.Println("Connecting to matchmaker") conn, _, err := websocket.DefaultDialer.Dial(address+":8000/ws", nil) defer conn.Close() if err != nil { fmt.Println(err) os.Exit(1) } conn.WriteJSON(map[string]string{ "Token": token, }) for { fmt.Println("Waiting for matchmaker port/status") // Status or Port message := map[string]string{} conn.ReadJSON(&message) if _, ok := message["Status"]; ok { fmt.Println(message) } if _, ok := message["Port"]; ok { fmt.Println(message) return message["Port"] } } } func enterGame(token string, port string) string { fmt.Println("Connecting to gameserver") conn, _, err := websocket.DefaultDialer.Dial(address+":"+port+"/ws", nil) defer conn.Close() if err != nil { fmt.Println(err) os.Exit(1) } conn.WriteJSON(map[string]string{ "Token": token, }) frames := 0 for { message := map[string]string{} conn.ReadJSON(&message) fmt.Println(message) if _, ok := message["Leaderboard"]; ok { conn.WriteJSON(map[string]interface{}{ "CurrentZoomLevel": rand.Intn(24), "CurrentSprint": rand.Intn(1) == 0, "CurrentDirection": rand.Intn(4), }) frames++ fmt.Println(frames) } } return "unknown" }
/* * Copyright (c) 2019. Alexey Shtepa <as.shtepa@gmail.com> LICENSE MIT * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. */ package uuid import ( "testing" "github.com/satori/go.uuid" ) func BenchmarkGenerateBytesUUID(b *testing.B) { for i := 1; i < b.N; i++ { _ = GenerateBytesUUID() } b.ReportAllocs() } func BenchmarkNewV4(b *testing.B) { for i := 1; i < b.N; i++ { _ = uuid.NewV4() } b.ReportAllocs() } func BenchmarkGenerateIntUUID(b *testing.B) { for i := 1; i < b.N; i++ { _ = GenerateIntUUID() } b.ReportAllocs() } func BenchmarkGenerateUUID(b *testing.B) { for i := 1; i < b.N; i++ { _ = GenerateUUID() } b.ReportAllocs() } func TestGenerateBytesUUID(t *testing.T) { u := GenerateBytesUUID() t.Logf("Generated bytes UUID: %v", u) } func TestGenerateIntUUID(t *testing.T) { ui := GenerateIntUUID() t.Logf("Generated big.Int UUID: %v", ui) } func TestGenerateUUID(t *testing.T) { us := GenerateUUID() t.Logf("Generated string UUID: %v", us) } func TestUUIDFromString(t *testing.T) { u1 := GenerateBytesUUID() us := idBytesToStr(u1) u2, err := UUIDFromString(us) if err != nil { t.Fatalf("Failed to convert string to UUID: %v", err) } t.Logf("Generated: %v -> %s -> %v", u1, us, u2) }
package main import "fmt" func main() { fmt.Println(exchange([]int{1, 2, 3, 4})) fmt.Println(exchange([]int{2, 16, 3, 5, 13, 1, 16, 1, 12, 18, 11, 8, 11, 11, 5, 1})) } func exchange(nums []int) []int { l1, l2 := 0, 1 // l1 管基数 l2 管偶数 for l1 < len(nums) && l2 < len(nums) { if nums[l2]%2 == 0 { l2++ } else { nums[l1], nums[l2] = nums[l2], nums[l1] l2++ } if nums[l1]%2 == 1 { l1++ } } return nums }
package msgtypetype import ( "encoding/xml" "os" "time" ) type CDATAText struct { Text string `xml:",innerxml"` } //创建菜单微信返回json格式 type MenErrorResponse struct { ErrorCode string ErrMsg string } type msgBase struct { ToUserName string FromUserName string CreateTime time.Duration MsgType string Content string } //请求普通消息格式 type RequestBody struct { XMLName xml.Name `xml:"xml"` msgBase MsgId int Event string EventKey string } type repMsgBase struct { ToUserName CDATAText FromUserName CDATAText CreateTime time.Duration MsgType CDATAText Content CDATAText } //响应普通消息格式 type TextReponseBody struct { XMLName xml.Name `xml:"xml"` repMsgBase } type ClickResponse struct { XMLName xml.Name `xml:"xml"` repMsgBase Event CDATAText EventKey CDATAText } //图文素材格式 type Articles struct { Title string `json:"title"` //标题 ThumbMediaId string `json:"thumb_media_id"` //图文消息的封面图片素材ID Author string `json:"author"` //作者 Digest string `json:"digest"` //图文消息摘要 ShowCoverPic int `json:"show_cover_pic"` //是否显示封面 Content string `json:"content"` //图文消息的具体内容 ContentSourceUrl string `json:"content_source_url"` //图文消息的原文地址,即点击“阅读原文”后的URL } type ArticlesReq struct { Articles []Articles `json:"articles"` } type ArticlesResp struct { MediaId string `json:"media_id"` NewsType string `json:"type"` Created_at int64 `json:"created_at"` } type Material struct { File *os.File Filename string } type MaterialReq struct { Media []byte `json:"media"` Filename string `json:"filename"` }
package main import ( "fmt" ) const ( HELLO1 = "123" HELLO2 = HELLO1 HELLO4 = HELLO3 HELLO3 = "456" ) func main() { fmt.Println("hello world!", HELLO3, HELLO2) fmt.Println("IMY********", aa, -5/2) }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ast_test import ( "fmt" "testing" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" "github.com/stretchr/testify/require" ) type visitor struct{} func (v visitor) Enter(in ast.Node) (ast.Node, bool) { return in, false } func (v visitor) Leave(in ast.Node) (ast.Node, bool) { return in, true } type visitor1 struct { visitor } func (visitor1) Enter(in ast.Node) (ast.Node, bool) { return in, true } func TestMiscVisitorCover(t *testing.T) { valueExpr := ast.NewValueExpr(42, mysql.DefaultCharset, mysql.DefaultCollationName) stmts := []ast.Node{ &ast.AdminStmt{}, &ast.AlterUserStmt{}, &ast.BeginStmt{}, &ast.BinlogStmt{}, &ast.CommitStmt{}, &ast.CompactTableStmt{Table: &ast.TableName{}}, &ast.CreateUserStmt{}, &ast.DeallocateStmt{}, &ast.DoStmt{}, &ast.ExecuteStmt{UsingVars: []ast.ExprNode{valueExpr}}, &ast.ExplainStmt{Stmt: &ast.ShowStmt{}}, &ast.GrantStmt{}, &ast.PrepareStmt{SQLVar: &ast.VariableExpr{Value: valueExpr}}, &ast.RollbackStmt{}, &ast.SetPwdStmt{}, &ast.SetStmt{Variables: []*ast.VariableAssignment{ { Value: valueExpr, }, }}, &ast.UseStmt{}, &ast.AnalyzeTableStmt{ TableNames: []*ast.TableName{ {}, }, }, &ast.FlushStmt{}, &ast.PrivElem{}, &ast.VariableAssignment{Value: valueExpr}, &ast.KillStmt{}, &ast.DropStatsStmt{ Tables: []*ast.TableName{ {}, }, }, &ast.ShutdownStmt{}, } for _, v := range stmts { v.Accept(visitor{}) v.Accept(visitor1{}) } } func TestDDLVisitorCoverMisc(t *testing.T) { sql := ` create table t (c1 smallint unsigned, c2 int unsigned); alter table t add column a smallint unsigned after b; alter table t add column (a int, constraint check (a > 0)); create index t_i on t (id); create database test character set utf8; drop database test; drop index t_i on t; drop table t; truncate t; create table t ( jobAbbr char(4) not null, constraint foreign key (jobabbr) references ffxi_jobtype (jobabbr) on delete cascade on update cascade ); ` parse := parser.New() stmts, _, err := parse.Parse(sql, "", "") require.NoError(t, err) for _, stmt := range stmts { stmt.Accept(visitor{}) stmt.Accept(visitor1{}) } } func TestDMLVistorCover(t *testing.T) { sql := `delete from somelog where user = 'jcole' order by timestamp_column limit 1; delete t1, t2 from t1 inner join t2 inner join t3 where t1.id=t2.id and t2.id=t3.id; select * from t where exists(select * from t k where t.c = k.c having sum(c) = 1); insert into t_copy select * from t where t.x > 5; (select /*+ TIDB_INLJ(t1) */ a from t1 where a=10 and b=1) union (select /*+ TIDB_SMJ(t2) */ a from t2 where a=11 and b=2) order by a limit 10; update t1 set col1 = col1 + 1, col2 = col1; show create table t; load data infile '/tmp/t.csv' into table t fields terminated by 'ab' enclosed by 'b'; import into t from '/file.csv'` p := parser.New() stmts, _, err := p.Parse(sql, "", "") require.NoError(t, err) for _, stmt := range stmts { stmt.Accept(visitor{}) stmt.Accept(visitor1{}) } } // test Change Pump or drainer status sql parser func TestChangeStmt(t *testing.T) { sql := `change pump to node_state='paused' for node_id '127.0.0.1:8249'; change drainer to node_state='paused' for node_id '127.0.0.1:8249'; shutdown;` p := parser.New() stmts, _, err := p.Parse(sql, "", "") require.NoError(t, err) for _, stmt := range stmts { stmt.Accept(visitor{}) stmt.Accept(visitor1{}) } } func TestSensitiveStatement(t *testing.T) { positive := []ast.StmtNode{ &ast.SetPwdStmt{}, &ast.CreateUserStmt{}, &ast.AlterUserStmt{}, &ast.GrantStmt{}, } for i, stmt := range positive { _, ok := stmt.(ast.SensitiveStmtNode) require.Truef(t, ok, "%d, %#v fail", i, stmt) } negative := []ast.StmtNode{ &ast.DropUserStmt{}, &ast.RevokeStmt{}, &ast.AlterTableStmt{}, &ast.CreateDatabaseStmt{}, &ast.CreateIndexStmt{}, &ast.CreateTableStmt{}, &ast.DropDatabaseStmt{}, &ast.DropIndexStmt{}, &ast.DropTableStmt{}, &ast.RenameTableStmt{}, &ast.TruncateTableStmt{}, } for _, stmt := range negative { _, ok := stmt.(ast.SensitiveStmtNode) require.False(t, ok) } } func TestUserSpec(t *testing.T) { hashString := "*3D56A309CD04FA2EEF181462E59011F075C89548" u := ast.UserSpec{ User: &auth.UserIdentity{ Username: "test", }, AuthOpt: &ast.AuthOption{ ByAuthString: false, AuthString: "xxx", HashString: hashString, }, } pwd, ok := u.EncodedPassword() require.True(t, ok) require.Equal(t, u.AuthOpt.HashString, pwd) u.AuthOpt.HashString = "not-good-password-format" _, ok = u.EncodedPassword() require.False(t, ok) u.AuthOpt.ByAuthString = true pwd, ok = u.EncodedPassword() require.True(t, ok) require.Equal(t, hashString, pwd) u.AuthOpt.AuthString = "" pwd, ok = u.EncodedPassword() require.True(t, ok) require.Equal(t, "", pwd) } func TestTableOptimizerHintRestore(t *testing.T) { testCases := []NodeRestoreTestCase{ {"USE_INDEX(t1 c1)", "USE_INDEX(`t1` `c1`)"}, {"USE_INDEX(test.t1 c1)", "USE_INDEX(`test`.`t1` `c1`)"}, {"USE_INDEX(@sel_1 t1 c1)", "USE_INDEX(@`sel_1` `t1` `c1`)"}, {"USE_INDEX(t1@sel_1 c1)", "USE_INDEX(`t1`@`sel_1` `c1`)"}, {"USE_INDEX(test.t1@sel_1 c1)", "USE_INDEX(`test`.`t1`@`sel_1` `c1`)"}, {"USE_INDEX(test.t1@sel_1 partition(p0) c1)", "USE_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"}, {"FORCE_INDEX(t1 c1)", "FORCE_INDEX(`t1` `c1`)"}, {"FORCE_INDEX(test.t1 c1)", "FORCE_INDEX(`test`.`t1` `c1`)"}, {"FORCE_INDEX(@sel_1 t1 c1)", "FORCE_INDEX(@`sel_1` `t1` `c1`)"}, {"FORCE_INDEX(t1@sel_1 c1)", "FORCE_INDEX(`t1`@`sel_1` `c1`)"}, {"FORCE_INDEX(test.t1@sel_1 c1)", "FORCE_INDEX(`test`.`t1`@`sel_1` `c1`)"}, {"FORCE_INDEX(test.t1@sel_1 partition(p0) c1)", "FORCE_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"}, {"IGNORE_INDEX(t1 c1)", "IGNORE_INDEX(`t1` `c1`)"}, {"IGNORE_INDEX(@sel_1 t1 c1)", "IGNORE_INDEX(@`sel_1` `t1` `c1`)"}, {"IGNORE_INDEX(t1@sel_1 c1)", "IGNORE_INDEX(`t1`@`sel_1` `c1`)"}, {"IGNORE_INDEX(t1@sel_1 partition(p0, p1) c1)", "IGNORE_INDEX(`t1`@`sel_1` PARTITION(`p0`, `p1`) `c1`)"}, {"ORDER_INDEX(t1 c1)", "ORDER_INDEX(`t1` `c1`)"}, {"ORDER_INDEX(test.t1 c1)", "ORDER_INDEX(`test`.`t1` `c1`)"}, {"ORDER_INDEX(@sel_1 t1 c1)", "ORDER_INDEX(@`sel_1` `t1` `c1`)"}, {"ORDER_INDEX(t1@sel_1 c1)", "ORDER_INDEX(`t1`@`sel_1` `c1`)"}, {"ORDER_INDEX(test.t1@sel_1 c1)", "ORDER_INDEX(`test`.`t1`@`sel_1` `c1`)"}, {"ORDER_INDEX(test.t1@sel_1 partition(p0) c1)", "ORDER_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"}, {"NO_ORDER_INDEX(t1 c1)", "NO_ORDER_INDEX(`t1` `c1`)"}, {"NO_ORDER_INDEX(test.t1 c1)", "NO_ORDER_INDEX(`test`.`t1` `c1`)"}, {"NO_ORDER_INDEX(@sel_1 t1 c1)", "NO_ORDER_INDEX(@`sel_1` `t1` `c1`)"}, {"NO_ORDER_INDEX(t1@sel_1 c1)", "NO_ORDER_INDEX(`t1`@`sel_1` `c1`)"}, {"NO_ORDER_INDEX(test.t1@sel_1 c1)", "NO_ORDER_INDEX(`test`.`t1`@`sel_1` `c1`)"}, {"NO_ORDER_INDEX(test.t1@sel_1 partition(p0) c1)", "NO_ORDER_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"}, {"TIDB_SMJ(`t1`)", "TIDB_SMJ(`t1`)"}, {"TIDB_SMJ(t1)", "TIDB_SMJ(`t1`)"}, {"TIDB_SMJ(t1,t2)", "TIDB_SMJ(`t1`, `t2`)"}, {"TIDB_SMJ(@sel1 t1,t2)", "TIDB_SMJ(@`sel1` `t1`, `t2`)"}, {"TIDB_SMJ(t1@sel1,t2@sel2)", "TIDB_SMJ(`t1`@`sel1`, `t2`@`sel2`)"}, {"TIDB_INLJ(t1,t2)", "TIDB_INLJ(`t1`, `t2`)"}, {"TIDB_INLJ(@sel1 t1,t2)", "TIDB_INLJ(@`sel1` `t1`, `t2`)"}, {"TIDB_INLJ(t1@sel1,t2@sel2)", "TIDB_INLJ(`t1`@`sel1`, `t2`@`sel2`)"}, {"TIDB_HJ(t1,t2)", "TIDB_HJ(`t1`, `t2`)"}, {"TIDB_HJ(@sel1 t1,t2)", "TIDB_HJ(@`sel1` `t1`, `t2`)"}, {"TIDB_HJ(t1@sel1,t2@sel2)", "TIDB_HJ(`t1`@`sel1`, `t2`@`sel2`)"}, {"MERGE_JOIN(t1,t2)", "MERGE_JOIN(`t1`, `t2`)"}, {"BROADCAST_JOIN(t1,t2)", "BROADCAST_JOIN(`t1`, `t2`)"}, {"INL_HASH_JOIN(t1,t2)", "INL_HASH_JOIN(`t1`, `t2`)"}, {"INL_MERGE_JOIN(t1,t2)", "INL_MERGE_JOIN(`t1`, `t2`)"}, {"INL_JOIN(t1,t2)", "INL_JOIN(`t1`, `t2`)"}, {"HASH_JOIN(t1,t2)", "HASH_JOIN(`t1`, `t2`)"}, {"HASH_JOIN_BUILD(t1)", "HASH_JOIN_BUILD(`t1`)"}, {"HASH_JOIN_PROBE(t1)", "HASH_JOIN_PROBE(`t1`)"}, {"LEADING(t1)", "LEADING(`t1`)"}, {"LEADING(t1, c1)", "LEADING(`t1`, `c1`)"}, {"LEADING(t1, c1, t2)", "LEADING(`t1`, `c1`, `t2`)"}, {"LEADING(@sel1 t1, c1)", "LEADING(@`sel1` `t1`, `c1`)"}, {"LEADING(@sel1 t1)", "LEADING(@`sel1` `t1`)"}, {"LEADING(@sel1 t1, c1, t2)", "LEADING(@`sel1` `t1`, `c1`, `t2`)"}, {"LEADING(t1@sel1)", "LEADING(`t1`@`sel1`)"}, {"LEADING(t1@sel1, c1)", "LEADING(`t1`@`sel1`, `c1`)"}, {"LEADING(t1@sel1, c1, t2)", "LEADING(`t1`@`sel1`, `c1`, `t2`)"}, {"MAX_EXECUTION_TIME(3000)", "MAX_EXECUTION_TIME(3000)"}, {"MAX_EXECUTION_TIME(@sel1 3000)", "MAX_EXECUTION_TIME(@`sel1` 3000)"}, {"TIDB_KV_READ_TIMEOUT(3000)", "TIDB_KV_READ_TIMEOUT(3000)"}, {"TIDB_KV_READ_TIMEOUT(@sel1 3000)", "TIDB_KV_READ_TIMEOUT(@`sel1` 3000)"}, {"USE_INDEX_MERGE(t1 c1)", "USE_INDEX_MERGE(`t1` `c1`)"}, {"USE_INDEX_MERGE(@sel1 t1 c1)", "USE_INDEX_MERGE(@`sel1` `t1` `c1`)"}, {"USE_INDEX_MERGE(t1@sel1 c1)", "USE_INDEX_MERGE(`t1`@`sel1` `c1`)"}, {"USE_TOJA(TRUE)", "USE_TOJA(TRUE)"}, {"USE_TOJA(FALSE)", "USE_TOJA(FALSE)"}, {"USE_TOJA(@sel1 TRUE)", "USE_TOJA(@`sel1` TRUE)"}, {"USE_CASCADES(TRUE)", "USE_CASCADES(TRUE)"}, {"USE_CASCADES(FALSE)", "USE_CASCADES(FALSE)"}, {"USE_CASCADES(@sel1 TRUE)", "USE_CASCADES(@`sel1` TRUE)"}, {"QUERY_TYPE(OLAP)", "QUERY_TYPE(OLAP)"}, {"QUERY_TYPE(OLTP)", "QUERY_TYPE(OLTP)"}, {"QUERY_TYPE(@sel1 OLTP)", "QUERY_TYPE(@`sel1` OLTP)"}, {"NTH_PLAN(10)", "NTH_PLAN(10)"}, {"NTH_PLAN(@sel1 30)", "NTH_PLAN(@`sel1` 30)"}, {"MEMORY_QUOTA(1 GB)", "MEMORY_QUOTA(1024 MB)"}, {"MEMORY_QUOTA(@sel1 1 GB)", "MEMORY_QUOTA(@`sel1` 1024 MB)"}, {"HASH_AGG()", "HASH_AGG()"}, {"HASH_AGG(@sel1)", "HASH_AGG(@`sel1`)"}, {"STREAM_AGG()", "STREAM_AGG()"}, {"STREAM_AGG(@sel1)", "STREAM_AGG(@`sel1`)"}, {"AGG_TO_COP()", "AGG_TO_COP()"}, {"AGG_TO_COP(@sel_1)", "AGG_TO_COP(@`sel_1`)"}, {"LIMIT_TO_COP()", "LIMIT_TO_COP()"}, {"MERGE()", "MERGE()"}, {"STRAIGHT_JOIN()", "STRAIGHT_JOIN()"}, {"NO_INDEX_MERGE()", "NO_INDEX_MERGE()"}, {"NO_INDEX_MERGE(@sel1)", "NO_INDEX_MERGE(@`sel1`)"}, {"READ_CONSISTENT_REPLICA()", "READ_CONSISTENT_REPLICA()"}, {"READ_CONSISTENT_REPLICA(@sel1)", "READ_CONSISTENT_REPLICA(@`sel1`)"}, {"QB_NAME(sel1)", "QB_NAME(`sel1`)"}, {"READ_FROM_STORAGE(@sel TIFLASH[t1, t2])", "READ_FROM_STORAGE(@`sel` TIFLASH[`t1`, `t2`])"}, {"READ_FROM_STORAGE(@sel TIFLASH[t1 partition(p0)])", "READ_FROM_STORAGE(@`sel` TIFLASH[`t1` PARTITION(`p0`)])"}, {"TIME_RANGE('2020-02-02 10:10:10','2020-02-02 11:10:10')", "TIME_RANGE('2020-02-02 10:10:10', '2020-02-02 11:10:10')"}, {"RESOURCE_GROUP(rg1)", "RESOURCE_GROUP(`rg1`)"}, {"RESOURCE_GROUP(`default`)", "RESOURCE_GROUP(`default`)"}, } extractNodeFunc := func(node ast.Node) ast.Node { return node.(*ast.SelectStmt).TableHints[0] } runNodeRestoreTest(t, testCases, "select /*+ %s */ * from t1 join t2", extractNodeFunc) } func TestChangeStmtRestore(t *testing.T) { testCases := []NodeRestoreTestCase{ {"CHANGE PUMP TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'", "CHANGE PUMP TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'"}, {"CHANGE DRAINER TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'", "CHANGE DRAINER TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'"}, } extractNodeFunc := func(node ast.Node) ast.Node { return node.(*ast.ChangeStmt) } runNodeRestoreTest(t, testCases, "%s", extractNodeFunc) } func TestBRIESecureText(t *testing.T) { testCases := []struct { input string secured string }{ { input: "restore database * from 'local:///tmp/br01' snapshot = 23333", secured: `^\QRESTORE DATABASE * FROM 'local:///tmp/br01' SNAPSHOT = 23333\E$`, }, { input: "backup database * to 's3://bucket/prefix?region=us-west-2'", secured: `^\QBACKUP DATABASE * TO 's3://bucket/prefix?region=us-west-2'\E$`, }, { // we need to use regexp to match to avoid the random ordering since a map was used. // unfortunately Go's regexp doesn't support lookahead assertion, so the test case below // has false positives. input: "backup database * to 's3://bucket/prefix?access-key=abcdefghi&secret-access-key=123&force-path-style=true'", secured: `^\QBACKUP DATABASE * TO 's3://bucket/prefix?\E((access-key=xxxxxx|force-path-style=true|secret-access-key=xxxxxx)(&|'$)){3}`, }, { input: "backup database * to 'gcs://bucket/prefix?access-key=irrelevant&credentials-file=/home/user/secrets.txt'", secured: `^\QBACKUP DATABASE * TO 'gcs://bucket/prefix?\E((access-key=irrelevant|credentials-file=/home/user/secrets\.txt)(&|'$)){2}`, }, } p := parser.New() for _, tc := range testCases { comment := fmt.Sprintf("input = %s", tc.input) node, err := p.ParseOneStmt(tc.input, "", "") require.NoError(t, err, comment) n, ok := node.(ast.SensitiveStmtNode) require.True(t, ok, comment) require.Regexp(t, tc.secured, n.SecureText(), comment) } } func TestCompactTableStmtRestore(t *testing.T) { testCases := []NodeRestoreTestCase{ {"alter table abc compact tiflash replica", "ALTER TABLE `abc` COMPACT TIFLASH REPLICA"}, {"alter table abc compact", "ALTER TABLE `abc` COMPACT"}, {"alter table test.abc compact", "ALTER TABLE `test`.`abc` COMPACT"}, } extractNodeFunc := func(node ast.Node) ast.Node { return node.(*ast.CompactTableStmt) } runNodeRestoreTest(t, testCases, "%s", extractNodeFunc) } func TestPlanReplayerStmtRestore(t *testing.T) { testCases := []NodeRestoreTestCase{ {"plan replayer dump with stats as of timestamp '2023-06-28 12:34:00' explain select * from t where a > 10", "PLAN REPLAYER DUMP WITH STATS AS OF TIMESTAMP _UTF8MB4'2023-06-28 12:34:00' EXPLAIN SELECT * FROM `t` WHERE `a`>10"}, {"plan replayer dump explain analyze select * from t where a > 10", "PLAN REPLAYER DUMP EXPLAIN ANALYZE SELECT * FROM `t` WHERE `a`>10"}, {"plan replayer dump with stats as of timestamp 12345 explain analyze select * from t where a > 10", "PLAN REPLAYER DUMP WITH STATS AS OF TIMESTAMP 12345 EXPLAIN ANALYZE SELECT * FROM `t` WHERE `a`>10"}, {"plan replayer dump explain analyze 'test'", "PLAN REPLAYER DUMP EXPLAIN ANALYZE 'test'"}, {"plan replayer dump with stats as of timestamp '12345' explain analyze 'test2'", "PLAN REPLAYER DUMP WITH STATS AS OF TIMESTAMP _UTF8MB4'12345' EXPLAIN ANALYZE 'test2'"}, } extractNodeFunc := func(node ast.Node) ast.Node { return node.(*ast.PlanReplayerStmt) } runNodeRestoreTest(t, testCases, "%s", extractNodeFunc) } func TestRedactURL(t *testing.T) { type args struct { str string } tests := []struct { args args want string }{ {args{""}, ""}, {args{":"}, ":"}, {args{"~/file"}, "~/file"}, {args{"gs://bucket/file"}, "gs://bucket/file"}, // gs don't have access-key/secret-access-key, so it will NOT be redacted {args{"gs://bucket/file?access-key=123"}, "gs://bucket/file?access-key=123"}, {args{"gs://bucket/file?secret-access-key=123"}, "gs://bucket/file?secret-access-key=123"}, {args{"s3://bucket/file"}, "s3://bucket/file"}, {args{"s3://bucket/file?other-key=123"}, "s3://bucket/file?other-key=123"}, {args{"s3://bucket/file?access-key=123"}, "s3://bucket/file?access-key=xxxxxx"}, {args{"s3://bucket/file?secret-access-key=123"}, "s3://bucket/file?secret-access-key=xxxxxx"}, // underline {args{"s3://bucket/file?access_key=123"}, "s3://bucket/file?access_key=xxxxxx"}, {args{"s3://bucket/file?secret_access_key=123"}, "s3://bucket/file?secret_access_key=xxxxxx"}, } for _, tt := range tests { t.Run(tt.args.str, func(t *testing.T) { got := ast.RedactURL(tt.args.str) if got != tt.want { t.Errorf("RedactURL() got = %v, want %v", got, tt.want) } }) } }
// go build -race // Sample program to show how to use a read/write mutex to define critical // sections of code that needs synchronous access. package main import ( "fmt" "math/rand" "runtime" "sync" "sync/atomic" "time" ) var ( // data is a slice that will be shared. data []string // wg is used to wait for the program to finish. wg sync.WaitGroup // rwMutex is used to define a critical section of code. rwMutex sync.RWMutex // Number of reads occuring at ay given time. readCount int64 ) // init is called before main is executed. func init() { rand.Seed(time.Now().UnixNano()) } // main is the entry point for all Go programs. func main() { runtime.GOMAXPROCS(runtime.NumCPU()) // Add the one goroutines for the writer. wg.Add(1) // Create the writer goroutine. go writer() // Create seven reader goroutines. for i := 1; i <= 7; i++ { go reader(i) } // Wait for the write goroutine to finish. wg.Wait() fmt.Println("Program Complete") // To keep the sample simple we are allowing the runtime to // kill the reader goroutines. This is something we should // control before allowing main to exit. } // writer adds 10 new strings to the slice in random intervals. func writer() { for i := 1; i <= 10; i++ { // Only allow one goroutine to read/write to the // slice at a time. rwMutex.Lock() { // Capture the current read count. // Keep this safe though we can due without this call. rc := atomic.LoadInt64(&readCount) // Perform some work since we have a full lock. fmt.Printf("****> : Performing Write : RCount[%d]\n", rc) data = append(data, fmt.Sprintf("String: %d", i)) } rwMutex.Unlock() // Release the lock and allow any waiting goroutines // to continue using the slice. // Sleep a random amount of time. time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) } // Tell main we are done. wg.Done() } // reader wakes up and iterates over the data slice. func reader(id int) { for { // Any goroutine can read when no write // operation is taking place. rwMutex.RLock() { // Increment the read count value by 1. rc := atomic.AddInt64(&readCount, 1) // Perform some read work and display values. time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) fmt.Printf("%d : Performing Read : Length[%d] RCount[%d]\n", id, len(data), rc) // Decrement the read count value by 1. atomic.AddInt64(&readCount, -1) } rwMutex.RUnlock() // Release the read lock. } }
package seev import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document00200105 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.002.001.05 Document"` Message *MeetingCancellationV05 `xml:"MtgCxl"` } func (d *Document00200105) AddMessage() *MeetingCancellationV05 { d.Message = new(MeetingCancellationV05) return d.Message } // Scope // The MeetingCancellation message is sent by the party that sent the MeetingNotification message to the original receiver. It is sent to cancel the previous MeetingNotification message or to advise the cancellation of a meeting. // Usage // The MeetingCancellation message is used in two different situations. // First, it is used to cancel a previously sent MeetingNotification message. In this case, the MessageCancellation, the MeetingReference and the Reason building blocks need to be present. // Second, it is used to advise that the meeting is cancelled. In this case, only the MeetingReference and Reason building blocks need to be present. // This message definition is intended for use with the Business Application Header (head.001.001.01). type MeetingCancellationV05 struct { // Series of elements which allow to identify a meeting. MeetingReference *iso20022.MeetingReference6 `xml:"MtgRef"` // Identifies the security for which the meeting was organised. Security []*iso20022.SecurityPosition8 `xml:"Scty,omitempty"` // Defines the justification for the cancellation. Reason *iso20022.MeetingCancellationReason2 `xml:"Rsn"` // Additional information that can not be captured in the structured fields and/or any other specific block. SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"` } func (m *MeetingCancellationV05) AddMeetingReference() *iso20022.MeetingReference6 { m.MeetingReference = new(iso20022.MeetingReference6) return m.MeetingReference } func (m *MeetingCancellationV05) AddSecurity() *iso20022.SecurityPosition8 { newValue := new(iso20022.SecurityPosition8) m.Security = append(m.Security, newValue) return newValue } func (m *MeetingCancellationV05) AddReason() *iso20022.MeetingCancellationReason2 { m.Reason = new(iso20022.MeetingCancellationReason2) return m.Reason } func (m *MeetingCancellationV05) AddSupplementaryData() *iso20022.SupplementaryData1 { newValue := new(iso20022.SupplementaryData1) m.SupplementaryData = append(m.SupplementaryData, newValue) return newValue }
package remote import ( "context" "net/http" "strings" "time" "github.com/pterodactyl/wings/api" ) type Client interface { GetBackupRemoteUploadURLs(ctx context.Context, backup string, size int64) (api.BackupRemoteUploadResponse, error) GetInstallationScript(ctx context.Context, uuid string) (api.InstallationScript, error) GetServerConfiguration(ctx context.Context, uuid string) (api.ServerConfigurationResponse, error) GetServers(context context.Context, perPage int) ([]api.RawServerData, error) SetArchiveStatus(ctx context.Context, uuid string, successful bool) error SetBackupStatus(ctx context.Context, backup string, data api.BackupRequest) error SetInstallationStatus(ctx context.Context, uuid string, successful bool) error SetTransferStatus(ctx context.Context, uuid string, successful bool) error ValidateSftpCredentials(ctx context.Context, request api.SftpAuthRequest) (api.SftpAuthResponse, error) } type client struct { httpClient *http.Client baseUrl string tokenId string token string retries int } type ClientOption func(c *client) func CreateClient(base, tokenId, token string, opts ...ClientOption) Client { httpClient := &http.Client{ Timeout: time.Second * 15, } base = strings.TrimSuffix(base, "/") c := &client{ baseUrl: base + "/api/remote", tokenId: tokenId, token: token, httpClient: httpClient, retries: 3, } for _, o := range opts { o(c) } return c } func WithTimeout(timeout time.Duration) ClientOption { return func(c *client) { c.httpClient.Timeout = timeout } }
package main import ( "fmt" "log" "net/http" "ocg-be/database" "ocg-be/routes" "github.com/gorilla/handlers" "github.com/gorilla/mux" ) func main() { database.Connect() r := mux.NewRouter() routes.Setup(r) handleCross := handlers.CORS( handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization", "Origin", "Accept"}), handlers.AllowedMethods([]string{"GET", "POST", "PUT", "DELETE", "PATCH"}), handlers.AllowedOrigins([]string{"http://localhost:8080"}), handlers.AllowCredentials(), ) host := fmt.Sprintf(":%d", 3000) //config fmt.Println("http://localhost" + host) log.Fatal(http.ListenAndServe(host, handleCross(r))) }
package main import ( "log" mc "github.com/ikascrew/core/multicast" ) func main() { s, err := mc.NewServer( mc.ServerName("ikasbox"), mc.Type(mc.TypeIkasbox), ) if err != nil { log.Fatal(err) } err = s.Dial() if err != nil { log.Fatal(err) } }
package postgres import ( "context" "encoding/json" "fmt" "time" "gorm.io/datatypes" "github.com/odpf/optimus/store" "github.com/google/uuid" "github.com/odpf/optimus/models" "github.com/pkg/errors" "gorm.io/gorm" ) type Resource struct { ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` ProjectID uuid.UUID Project Project `gorm:"foreignKey:ProjectID"` NamespaceID uuid.UUID Namespace Namespace `gorm:"foreignKey:NamespaceID"` Version int Name string `gorm:"not null"` Type string `gorm:"not null"` Datastore string `gorm:"not null"` URN string `gorm:"not null"` Spec []byte Assets datatypes.JSON Labels datatypes.JSON CreatedAt time.Time `gorm:"not null" json:"created_at"` UpdatedAt time.Time `gorm:"not null" json:"updated_at"` DeletedAt gorm.DeletedAt } func (r Resource) FromSpec(resourceSpec models.ResourceSpec) (Resource, error) { assetBytes, err := json.Marshal(resourceSpec.Assets) if err != nil { return Resource{}, err } labelBytes, err := json.Marshal(resourceSpec.Labels) if err != nil { return Resource{}, err } // serialize resource spec without assets to one of the datastore provided wire format controller, ok := resourceSpec.Datastore.Types()[resourceSpec.Type] if !ok { return Resource{}, fmt.Errorf("unknown type of datastore %s", resourceSpec.Type) } binaryReadySpec := resourceSpec binaryReadySpec.Assets = nil binaryReadySpec.Labels = nil serializedSpec, err := controller.Adapter().ToYaml(binaryReadySpec) if err != nil { return Resource{}, errors.Wrapf(err, "controller.Adapter().ToYaml: %v", binaryReadySpec) } urn, err := controller.GenerateURN(resourceSpec.Spec) if err != nil { return Resource{}, err } return Resource{ ID: resourceSpec.ID, Version: resourceSpec.Version, Name: resourceSpec.Name, Type: resourceSpec.Type.String(), Datastore: resourceSpec.Datastore.Name(), URN: urn, Spec: serializedSpec, Assets: assetBytes, Labels: labelBytes, }, nil } func (r Resource) FromSpecWithNamespace(resourceSpec models.ResourceSpec, namespace models.NamespaceSpec) (Resource, error) { adaptResource, err := r.FromSpec(resourceSpec) if err != nil { return Resource{}, err } // namespace adaptNamespace, err := Namespace{}.FromSpecWithProject(namespace, namespace.ProjectSpec) if err != nil { return Resource{}, err } adaptResource.NamespaceID = adaptNamespace.ID adaptResource.Namespace = adaptNamespace // project adaptProject, err := Project{}.FromSpec(namespace.ProjectSpec) if err != nil { return Resource{}, err } adaptResource.ProjectID = adaptProject.ID adaptResource.Project = adaptProject return adaptResource, nil } func (r Resource) ToSpec(ds models.Datastorer) (models.ResourceSpec, error) { resourceType := models.ResourceType(r.Type) // deserialize resource spec without assets to one of the datastore provided wire format controller, ok := ds.Types()[resourceType] if !ok { return models.ResourceSpec{}, fmt.Errorf("unknown type of datastore %s", resourceType) } deserializedSpec, err := controller.Adapter().FromYaml(r.Spec) if err != nil { return models.ResourceSpec{}, errors.Wrapf(err, "controller.Adapter().FromYaml: %s", string(r.Spec)) } var assets map[string]string if err := json.Unmarshal(r.Assets, &assets); err != nil { return models.ResourceSpec{}, err } var labels map[string]string if err := json.Unmarshal(r.Labels, &labels); err != nil { return models.ResourceSpec{}, err } return models.ResourceSpec{ ID: r.ID, Version: r.Version, Name: r.Name, Type: resourceType, Datastore: ds, URN: r.URN, Spec: deserializedSpec.Spec, Assets: assets, Labels: labels, }, nil } type projectResourceSpecRepository struct { db *gorm.DB project models.ProjectSpec datastore models.Datastorer } func (repo *projectResourceSpecRepository) GetByName(ctx context.Context, name string) (models.ResourceSpec, models.NamespaceSpec, error) { var r Resource if err := repo.db.WithContext(ctx).Preload("Namespace").Where("project_id = ? AND datastore = ? AND name = ?", repo.project.ID, repo.datastore.Name(), name).First(&r).Error; err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { return models.ResourceSpec{}, models.NamespaceSpec{}, store.ErrResourceNotFound } return models.ResourceSpec{}, models.NamespaceSpec{}, err } resourceSpec, err := r.ToSpec(repo.datastore) if err != nil { return models.ResourceSpec{}, models.NamespaceSpec{}, err } namespaceSpec, err := r.Namespace.ToSpec(repo.project) if err != nil { return models.ResourceSpec{}, models.NamespaceSpec{}, err } return resourceSpec, namespaceSpec, nil } func (repo *projectResourceSpecRepository) GetAll(ctx context.Context) ([]models.ResourceSpec, error) { specs := []models.ResourceSpec{} resources := []Resource{} if err := repo.db.WithContext(ctx).Where("project_id = ? AND datastore = ?", repo.project.ID, repo.datastore.Name()).Find(&resources).Error; err != nil { return specs, err } for _, r := range resources { adapted, err := r.ToSpec(repo.datastore) if err != nil { return specs, errors.Wrap(err, "failed to adapt resource") } specs = append(specs, adapted) } return specs, nil } func NewProjectResourceSpecRepository(db *gorm.DB, project models.ProjectSpec, ds models.Datastorer) *projectResourceSpecRepository { return &projectResourceSpecRepository{ db: db, project: project, datastore: ds, } } type resourceSpecRepository struct { db *gorm.DB namespace models.NamespaceSpec datastore models.Datastorer projectResourceSpecRepo store.ProjectResourceSpecRepository } func (repo *resourceSpecRepository) Insert(ctx context.Context, resource models.ResourceSpec) error { if len(resource.Name) == 0 { return errors.New("name cannot be empty") } p, err := Resource{}.FromSpecWithNamespace(resource, repo.namespace) if err != nil { return err } // if soft deleted earlier if err := repo.HardDelete(ctx, resource.Name); err != nil { return err } return repo.db.WithContext(ctx).Create(&p).Error } func (repo *resourceSpecRepository) Save(ctx context.Context, spec models.ResourceSpec) error { existingResource, namespaceSpec, err := repo.projectResourceSpecRepo.GetByName(ctx, spec.Name) if errors.Is(err, store.ErrResourceNotFound) { return repo.Insert(ctx, spec) } else if err != nil { return errors.Wrap(err, "unable to find resource by name") } if namespaceSpec.ID != repo.namespace.ID { return errors.New(fmt.Sprintf("resource %s already exists for the project %s", spec.Name, repo.namespace.ProjectSpec.Name)) } resource, err := Resource{}.FromSpec(spec) if err != nil { return err } resource.ID = existingResource.ID return repo.db.WithContext(ctx).Model(&resource).Updates(&resource).Error } func (repo *resourceSpecRepository) GetByName(ctx context.Context, name string) (models.ResourceSpec, error) { var r Resource if err := repo.db.WithContext(ctx).Where("namespace_id = ? AND datastore = ? AND name = ?", repo.namespace.ID, repo.datastore.Name(), name).First(&r).Error; err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { return models.ResourceSpec{}, store.ErrResourceNotFound } return models.ResourceSpec{}, err } return r.ToSpec(repo.datastore) } func (repo *resourceSpecRepository) GetByID(ctx context.Context, id uuid.UUID) (models.ResourceSpec, error) { var r Resource if err := repo.db.WithContext(ctx).Where("namespace_id = ? AND id = ?", repo.namespace.ID, id).First(&r).Error; err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { return models.ResourceSpec{}, store.ErrResourceNotFound } return models.ResourceSpec{}, err } return r.ToSpec(repo.datastore) } func (repo *resourceSpecRepository) GetByURN(ctx context.Context, urn string) (models.ResourceSpec, error) { var r Resource if err := repo.db.WithContext(ctx).Where("namespace_id = ? AND datastore = ? AND urn = ?", repo.namespace.ID, repo.datastore.Name(), urn).First(&r).Error; err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { return models.ResourceSpec{}, store.ErrResourceNotFound } return models.ResourceSpec{}, err } return r.ToSpec(repo.datastore) } func (repo *resourceSpecRepository) GetAll(ctx context.Context) ([]models.ResourceSpec, error) { specs := []models.ResourceSpec{} resources := []Resource{} if err := repo.db.WithContext(ctx).Where("namespace_id = ? AND datastore = ?", repo.namespace.ID, repo.datastore.Name()).Find(&resources).Error; err != nil { return specs, err } for _, r := range resources { adapted, err := r.ToSpec(repo.datastore) if err != nil { return specs, errors.Wrap(err, "failed to adapt resource") } specs = append(specs, adapted) } return specs, nil } func (repo *resourceSpecRepository) Delete(ctx context.Context, name string) error { return repo.db.WithContext(ctx).Where("namespace_id = ? AND datastore = ? AND name = ? ", repo.namespace.ID, repo.datastore.Name(), name).Delete(&Resource{}).Error } func (repo *resourceSpecRepository) HardDelete(ctx context.Context, name string) error { return repo.db.WithContext(ctx).Unscoped().Where("namespace_id = ? AND datastore = ? AND name = ? ", repo.namespace.ID, repo.datastore.Name(), name).Delete(&Resource{}).Error } func NewResourceSpecRepository(db *gorm.DB, namespace models.NamespaceSpec, ds models.Datastorer, projectResourceSpecRepo store.ProjectResourceSpecRepository) *resourceSpecRepository { return &resourceSpecRepository{ db: db, namespace: namespace, datastore: ds, projectResourceSpecRepo: projectResourceSpecRepo, } }
package main import ( "fmt" "time" ) func fibonacci(mychan chan int) { n := cap(mychan) x, y := 1, 1 for i := 0; i < n; i++ { mychan <- x x, y = y, x+y } close(mychan) fmt.Println("end, close mychan") } func main() { pipline := make(chan int, 10) go fibonacci(pipline) // for k := range pipline { // fmt.Println(k) // time.Sleep(1) // } for { v, ok := <-pipline fmt.Println(v, ok) time.Sleep(time.Second) if !ok { break } } }
package main import ( "tetra/lib/gui" "tetra/lib/store" ) // Window wrap operating systems's window object. type Window struct { gui.Window // super } // OnCreate event handler func (w *Window) OnCreate() { w.Window.OnCreate() id := w.ObjID() if id != "" { if err := store.LoadState("state", id, w); err != nil { store.LoadState("layout", "default", w) } } } // OnDestroy event handler func (w *Window) OnDestroy() { id := w.ObjID() if id != "" { store.SaveState("state", id, w) } w.Window.OnDestroy() }
// Copyright 2019-2023 The sakuracloud_exporter Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "log/slog" "time" "github.com/prometheus/client_golang/prometheus" "github.com/sacloud/sakuracloud_exporter/platform" ) // CouponCollector collects metrics about the account. type CouponCollector struct { ctx context.Context logger *slog.Logger errors *prometheus.CounterVec client platform.CouponClient Discount *prometheus.Desc RemainingDays *prometheus.Desc ExpDate *prometheus.Desc Usable *prometheus.Desc } // NewCouponCollector returns a new CouponCollector. func NewCouponCollector(ctx context.Context, logger *slog.Logger, errors *prometheus.CounterVec, client platform.CouponClient) *CouponCollector { errors.WithLabelValues("coupon").Add(0) labels := []string{"id", "member_id", "contract_id"} return &CouponCollector{ ctx: ctx, logger: logger, errors: errors, client: client, Discount: prometheus.NewDesc( "sakuracloud_coupon_discount", "The balance of coupon", labels, nil, ), RemainingDays: prometheus.NewDesc( "sakuracloud_coupon_remaining_days", "The count of coupon's remaining days", labels, nil, ), ExpDate: prometheus.NewDesc( "sakuracloud_coupon_exp_date", "Coupon expiration date in seconds since epoch (1970)", labels, nil, ), Usable: prometheus.NewDesc( "sakuracloud_coupon_usable", "1 if your coupon is usable", labels, nil, ), } } // Describe sends the super-set of all possible descriptors of metrics // collected by this Collector. func (c *CouponCollector) Describe(ch chan<- *prometheus.Desc) { ch <- c.Discount ch <- c.RemainingDays ch <- c.ExpDate ch <- c.Usable } // Collect is called by the Prometheus registry when collecting metrics. func (c *CouponCollector) Collect(ch chan<- prometheus.Metric) { coupons, err := c.client.Find(c.ctx) if err != nil { c.errors.WithLabelValues("coupon").Add(1) c.logger.Warn( "can't get coupon", slog.Any("err", err), ) return } for _, coupon := range coupons { labels := []string{ coupon.ID.String(), coupon.MemberID, fmt.Sprintf("%d", coupon.ContractID), } now := time.Now() // Discount ch <- prometheus.MustNewConstMetric( c.Discount, prometheus.GaugeValue, float64(coupon.Discount), labels..., ) // RemainingDays remainingDays := int(coupon.UntilAt.Sub(now).Hours() / 24) if remainingDays < 0 { remainingDays = 0 } ch <- prometheus.MustNewConstMetric( c.RemainingDays, prometheus.GaugeValue, float64(remainingDays), labels..., ) // Expiration date ch <- prometheus.MustNewConstMetric( c.ExpDate, prometheus.GaugeValue, float64(coupon.UntilAt.Unix())*1000, labels..., ) // Usable var usable float64 if coupon.Discount > 0 && coupon.AppliedAt.Before(now) && coupon.UntilAt.After(now) { usable = 1 } ch <- prometheus.MustNewConstMetric( c.Usable, prometheus.GaugeValue, usable, labels..., ) } }
package session import ( "crypto/x509" "fmt" "github.com/fasthttp/session/v2" "github.com/authelia/authelia/v4/internal/configuration/schema" "github.com/authelia/authelia/v4/internal/logging" ) // Provider contains a list of domain sessions. type Provider struct { sessions map[string]*Session } // NewProvider instantiate a session provider given a configuration. func NewProvider(config schema.Session, certPool *x509.CertPool) *Provider { log := logging.Logger() name, p, s, err := NewSessionProvider(config, certPool) if err != nil { log.Fatal(err) } provider := &Provider{ sessions: map[string]*Session{}, } var ( holder *session.Session ) for _, dconfig := range config.Cookies { if _, holder, err = NewProviderConfigAndSession(dconfig, name, s, p); err != nil { log.Fatal(err) } provider.sessions[dconfig.Domain] = &Session{ Config: dconfig, sessionHolder: holder, } } return provider } // Get returns session information for specified domain. func (p *Provider) Get(domain string) (*Session, error) { if domain == "" { return nil, fmt.Errorf("can not get session from an undefined domain") } s, found := p.sessions[domain] if !found { return nil, fmt.Errorf("no session found for domain '%s'", domain) } return s, nil }
package method_interface import "fmt" func Do(i interface{}) { switch i.(type) { case int: fmt.Printf("int , value: %d\n", i) case string: fmt.Printf("string, value: %s\n", i) case byte: fmt.Printf("byte, value: %d", i) } }
package parse import ( "fmt" "time" ) const lessThanMin = "less than a minute" // PrettyDuration returns a human-readable duration that should fit the // phrase "X ago", e.g., "less than a minute ago", "2 minutes ago", etc. func PrettyDuration(d time.Duration) string { if d < time.Minute { return lessThanMin } else if d < time.Hour { mins := int64(d.Minutes()) ending := "" if mins > 1 { ending = "s" } return fmt.Sprintf("%d minute%s", mins, ending) } else if d < 48*time.Hour { hrs := int64(d.Hours()) ending := "" if hrs > 1 { ending = "s" } return fmt.Sprintf("%d hour%s", hrs, ending) } days := int64(d.Hours()) / 24 return fmt.Sprintf("%d days", days) }
package cmd import ( "fmt" "amru.in/cli/db" "github.com/spf13/cobra" ) // listCmd represents the list command var listCmd = &cobra.Command{ Use: "list", Short: "Lists all the to-do tasks", Run: func(cmd *cobra.Command, args []string) { // fmt.Println("list called") taskList, err := db.ListTaskItems() if err != nil { fmt.Println("Some error occured", err) return } if len(taskList) == 0 { fmt.Println("No tasks exist") return } fmt.Println("All tasks: ") for i, task := range taskList { fmt.Println((i + 1), task.Value) } }, } func init() { RootCmd.AddCommand(listCmd) }
package audit import ( "encoding/json" "reflect" "testing" pc_fields "github.com/square/p2/pkg/pc/fields" "github.com/square/p2/pkg/types" ) func TestRCRetargetingEventDetails(t *testing.T) { podID := types.PodID("some_pod_id") clusterName := pc_fields.ClusterName("some_cluster_name") az := pc_fields.AvailabilityZone("some_availability_zone") nodes := []types.NodeName{"node1", "node2"} detailsJSON, err := NewRCRetargetingEventDetails(podID, az, clusterName, nodes) if err != nil { t.Fatal(err) } var details RCRetargetingDetails err = json.Unmarshal(detailsJSON, &details) if err != nil { t.Fatal(err) } if details.PodID != podID { t.Errorf("expected pod id to be %s but was %s", podID, details.PodID) } if details.AvailabilityZone != az { t.Errorf("expected availability zone to be %s but was %s", az, details.AvailabilityZone) } if details.ClusterName != clusterName { t.Errorf("expected cluster name to be %s but was %s", clusterName, details.ClusterName) } if !reflect.DeepEqual(details.Nodes, nodes) { t.Errorf("expected node list to be %s but was %s", nodes, details.Nodes) } }
// Copyright 2021 PingCAP, Inc. Licensed under Apache-2.0. package utils import ( "os" "testing" "github.com/stretchr/testify/require" ) func TestProxyFields(t *testing.T) { revIndex := map[string]int{ "http_proxy": 0, "https_proxy": 1, "no_proxy": 2, } envs := [...]string{"http_proxy", "https_proxy", "no_proxy"} envPreset := [...]string{"http://127.0.0.1:8080", "https://127.0.0.1:8443", "localhost,127.0.0.1"} // Exhaust all combinations of those environment variables' selection. // Each bit of the mask decided whether this index of `envs` would be set. for mask := 0; mask <= 0b111; mask++ { for _, env := range envs { require.NoError(t, os.Unsetenv(env)) } for i := 0; i < 3; i++ { if (1<<i)&mask != 0 { require.NoError(t, os.Setenv(envs[i], envPreset[i])) } } for _, field := range proxyFields() { idx, ok := revIndex[field.Key] require.True(t, ok) require.NotZero(t, (1<<idx)&mask) require.Equal(t, envPreset[idx], field.String) } } }
package structs import ( "fmt" "reflect" ) // naming // just like variables first letter capital (Pascal) means it's exported for out package use // and first letter small (camelCase) means it's for in package scope // not just the struct name but also the the fields in the struct should be named the same way // create struct type Vehicle struct { wheels int `required:"true" max:"100"` // this called tag look how to use it (line 48) motorType string } func Structs() { // use struct as a type var v Vehicle = Vehicle{ wheels: 4, motorType: "IDK", } // notice: you can create a struct and then add data to it //v := Vehicle{} //v.wheels = 4 //v.motorType = "IDK" // positional syntax (not recommended) //v := Vehicle{4, "IDK"} // anonymous struct //v := struct{wheels int}{3} // get struct data fmt.Println(v) // get specific field fmt.Println(v.wheels) // change field value v.wheels = 2 fmt.Println(v.wheels) // use field tags (common cases: validate data) t := reflect.TypeOf(Vehicle{}) field, _ := t.FieldByName("wheels") fmt.Println(field.Tag) // go structs inheritance type Car struct { Vehicle doors int } c := Car{} c.wheels = 4 c.motorType = "IDK" c.doors = 3 fmt.Println(c) }
package mysql import ( "project/app/admin/models" orm "project/common/global" ) func migrateModel() error { err := orm.Eloquent.AutoMigrate(&models.SysUser{}) return err }
// Copyright (c) 2021 Alexey Khan // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. package main import ( "github.com/spf13/cobra" ) var assistCmd = &cobra.Command{ Use: "assistCmd", SilenceUsage: true, Run: func(cmd *cobra.Command, args []string) { _ = cmd.Help() }, Example: commandOverview( "Персональный ассистент для планирования личных финансов", "Используйте команду `decompose`, чтобы декомпозировать финансовую цель, например, "+ "узнать минимальные необходимые условия для достижения вашей цели к конкретному сроку. "+ "Подход: от желаемого результата.\n\n"+ "Используйте команду `calculate`, чтобы посмотреть, каких результатов можно достигнуть "+ "за указанный период, если соблюдать конкретные условия. Подход: от текущей ситуации.", []string{ "./bin/assist decompose --help", "./bin/assist calculate --help", "./bin/assist --help", }, ), }
package model /* type Student struct { Name string Age int } */ //当Student变成小写student的时候,没法向外调用(其他包没法调用),使用工厂模式来实现跨包创建结构体实例 type student struct { Name string age int } //提供一个函数对外获取到student对象 func NewStudent(name string, age int) *student { return &student{ Name: name, age: age, } } //如果age属性首字母小写,在其他包不能直接获取,提供一个对完的方法去获取 func (s *student) GetAge() int { return s.age }
package ring import ( "go.skia.org/infra/go/skerr" ) // StringRing stores the last N strings passed to Put(). It is not thread-safe. type StringRing struct { len int content []string } // NewStringRing returns a StringRing with the given capacity. func NewStringRing(capacity int) (*StringRing, error) { if capacity < 1 { return nil, skerr.Fmt("Invalid ring capacity, must be > 0: %d", capacity) } return &StringRing{ content: make([]string, capacity), }, nil } // GetAll returns all values stored in the ring. func (r *StringRing) GetAll() []string { rv := make([]string, 0, r.len%cap(r.content)) start := r.len - cap(r.content) if start < 0 { start = 0 } for i := start; i < r.len; i++ { rv = append(rv, r.content[i%cap(r.content)]) } return rv } // Put appends the given value to the ring, possibly overwriting a previous // value. func (r *StringRing) Put(s string) { r.content[r.len%cap(r.content)] = s r.len++ }
package travis func hehe() int { return 1 }
package convexhull import ( "github.com/ivanterekh/qt-go-examples/internal/geometry" ) func SolveJarvis(points []geometry.Point) []geometry.Point { var res []geometry.Point left, right, top, down := getBounds(points) appendSector( &res, left, top, points, func(curr, next geometry.Point) float64 { return float64(next.Y-curr.Y) / float64(next.X-curr.X) }, func(curr, next geometry.Point) bool { return curr != next && curr.X <= next.X && curr.Y <= next.Y }, func(curr, cand geometry.Point) bool { return curr.X == cand.X }, ) appendSector( &res, top, right, points, func(curr, next geometry.Point) float64 { return float64(next.X-curr.X) / float64(curr.Y-next.Y) }, func(curr, next geometry.Point) bool { return curr != next && curr.X <= next.X && curr.Y >= next.Y }, func(curr, cand geometry.Point) bool { return curr.Y == cand.Y }, ) appendSector( &res, right, down, points, func(curr, next geometry.Point) float64 { return float64(curr.Y-next.Y) / float64(curr.X-next.X) }, func(curr, next geometry.Point) bool { return curr != next && curr.X >= next.X && curr.Y >= next.Y }, func(curr, cand geometry.Point) bool { return curr.X == cand.X }, ) appendSector( &res, down, left, points, func(curr, next geometry.Point) float64 { return float64(curr.X-next.X) / float64(next.Y-curr.Y) }, func(curr, next geometry.Point) bool { return curr != next && curr.X >= next.X && curr.Y <= next.Y }, func(curr, cand geometry.Point) bool { return curr.Y == cand.Y }, ) return res } func appendSector( res *[]geometry.Point, start, end geometry.Point, points []geometry.Point, getTg func(curr, next geometry.Point) float64, check func(curr, next geometry.Point) bool, isNext func(curr, cand geometry.Point) bool, ) { curr := start for curr != end { var tg float64 var cand = end for _, p := range points { if !check(curr, p) { continue } if isNext(curr, p) { cand = p break } tgp := getTg(curr, p) if tg < tgp { tg = tgp cand = p } } *res = append(*res, cand) curr = cand } }
package nationstatdb import( "stockdb" ns "entity/nsentity" //"util" "fmt" ) const( IndexInsert = "insert %s set id=?, parent=?, name=?, ename=?, unit=?, eunit=?, note=?, enote=?, readid=?" IndexDelete = "delete from %s where id=?" IndexUpdate = "update %s set parent=?, name=?, ename=?, unit=?, eunit=?, note=?, enote=?, readid=?" IndexSelect = "select id, parent, name, ename, unit, eunit, note, enote, readid from %s where id=?" IndexSelectAll = "select id, parent, name, ename, unit, eunit, note, enote, readid from %s" ) type IndexDB struct { stockdb.DBBase dbtable string } func (s *IndexDB) getSql(sql string) string { return fmt.Sprintf(sql, s.dbtable) } func (s *IndexDB) Insert(idx ns.NSDBIndex) int { db := s.Open() defer db.Close() sql := s.getSql(IndexInsert) stmt, err := db.Prepare(sql) defer stmt.Close() if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } res, err := stmt.Exec(idx.Id, idx.Parent, idx.Name, idx.EName, idx.Unit, idx.Eunit, idx.Note, idx.Enote, idx.Readid) if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } _, reserr := res.LastInsertId() if reserr != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, reserr) return -1 } return 0 } func (s *IndexDB) Delete(id string) int { db := s.Open() defer db.Close() sql := s.getSql(IndexDelete) stmt, err := db.Prepare(sql) defer stmt.Close() if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } res, err := stmt.Exec(id) if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } _, reserr := res.RowsAffected() if reserr != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, reserr) return -1 } return 0 } func (s *IndexDB) Update(idx ns.NSDBIndex) int { db := s.Open() defer db.Close() sql := s.getSql(IndexUpdate) stmt, err := db.Prepare(sql) defer stmt.Close() if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } res, err := stmt.Exec(idx.Parent, idx.Name, idx.EName, idx.Unit, idx.Eunit, idx.Note, idx.Enote, idx.Readid) if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } _, reserr := res.RowsAffected() if reserr != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, reserr) return -1 } return 0 } func (s *IndexDB) Query(id string) ns.NSDBIndex { db := s.Open() defer db.Close() idx := ns.NSDBIndex{} sql := s.getSql(IndexSelect) stmt, err := db.Prepare(sql) defer stmt.Close() if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return idx } err = stmt.QueryRow(id).Scan(&idx.Id, &idx.Parent, &idx.Name, &idx.EName, &idx.Unit, &idx.Eunit, &idx.Note, &idx.Enote, &idx.Readid) if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return idx } return idx } func (s *IndexDB) TranInsert(idxes []ns.NSDBIndex) int { db := s.Open() defer db.Close() tx, err := db.Begin() if err != nil { s.Logger.Error("Database error: ", s.Dbtype, s.Dbcon, err) return -1 } sql := s.getSql(IndexInsert) for _, idx := range idxes { stmt, err := tx.Prepare(sql) if err != nil { s.Logger.Error("Database error in transaction insert index: ", s.Dbtype, s.Dbcon, err, sql) continue } _, reserr := stmt.Exec(idx.Id, idx.Parent, idx.Name, idx.EName, idx.Unit, idx.Eunit, idx.Note, idx.Enote, idx.Readid) if reserr != nil { s.Logger.Error("Database error in transaction insert index: ", s.Dbtype, s.Dbcon, reserr, idx) continue } defer stmt.Close() } err = tx.Commit() if err != nil { s.Logger.Error("Database error - cannot commit in transaction insert index: ", s.Dbtype, s.Dbcon, err) return -1 } return 0 } func NewIndexDB(dbname, dbtable string) *IndexDB { db := new(IndexDB) db.Init(dbname) db.dbtable = dbtable return db }
package config import "os" const ( apiGithubAccessToken = "SECRET_GITHUB_ACCESS_TOKEN" //apiGithubAccessToken = "f36172560521502ab348f31b06d1ea4b98435072" ) var ( githubAccessToken = os.Getenv(apiGithubAccessToken) ) func GetGithubAccessToken() string { //return githubAccessToken return "f36172560521502ab348f31b06d1ea4b98435072" }
package cmdopts import ( "fmt" "reflect" "strings" ) // Options represents map or struct (or pointer to struct as a sequence of long named perameters func Options(src interface{}) ([]string, error) { return generateOptions(src, src) } func generateOptions(orig interface{}, src interface{}) ([]string, error) { value := reflect.ValueOf(src) switch t := reflect.TypeOf(src); t.Kind() { case reflect.Map: if t.Key().Kind() != reflect.String { return nil, fmt.Errorf("only map[string]T is accepted, got %T", orig) } var res []string for _, key := range value.MapKeys() { v := value.MapIndex(key) res = append(res, "--"+key.String(), fmt.Sprintf("%v", v.Interface())) } return res, nil case reflect.Struct: var res []string for i := 0; i < t.NumField(); i++ { field := value.Field(i) fieldType := t.Field(i) if fieldType.Anonymous { // анонимная хрень, ныряем вниз sub, err := generateOptions(field.Interface(), field.Interface()) if err != nil { return nil, err } res = append(res, sub...) continue } if !isPublic(fieldType.Name) { continue } paramName, ok := fieldType.Tag.Lookup("param") if !ok { return nil, fmt.Errorf("public field %s must have param tag which is to be used for param name", fieldType.Name) } res = append(res, "--"+paramName, fmt.Sprintf("%v", field.Interface())) } return res, nil case reflect.Ptr: return generateOptions(orig, value.Elem().Interface()) default: return nil, fmt.Errorf("map[string]T or struct (pointer to struct) expected, got %T", orig) } } func isPublic(s string) bool { if len(s) == 0 { return false } return strings.ToUpper(s)[0] == s[0] }
package leetcode /*We are given an array A of N lowercase letter strings, all of the same length. Now, we may choose any set of deletion indices, and for each string, we delete all the characters in those indices. For example, if we have an array A = ["abcdef","uvwxyz"] and deletion indices {0, 2, 3}, then the final array after deletions is ["bef", "vyz"],  and the remaining columns of A are ["b","v"], ["e","y"], and ["f","z"].  (Formally, the c-th column is [A[0][c], A[1][c], ..., A[A.length-1][c]].) Suppose we chose a set of deletion indices D such that after deletions, each remaining column in A is in non-decreasing sorted order. Return the minimum possible value of D.length. 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/delete-columns-to-make-sorted 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/ func minDeletionSize(A []string) int { rlt := 0 m := len(A) n := len(A[0]) for i := 0; i < n; i++ { for j := 0; j < m-1; j++ { if A[j][i] > A[j+1][i] { rlt++ break } } } return rlt }
package main import ( "flag" "fmt" "net/url" "os" "os/signal" "time" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" ) var ( addr = flag.String("addr", "localhost:8080", "http service address") deviceID = flag.String("id", "device0", "device id") ) type Device struct { ID string send chan string conn *websocket.Conn } func NewDevice(id string, u url.URL) (*Device, error) { c, _, err := websocket.DefaultDialer.Dial(u.String(), nil) if err != nil { return &Device{}, err } log.Info(fmt.Sprintf("Connected: %s", u.String())) d := &Device{ ID: id, send: make(chan string, 10), conn: c, } d.Send("register", "") return d, nil } func (d Device) Close() { d.conn.Close() } func (d Device) Send(action string, data string) { message := fmt.Sprintf("%s %s %s", d.ID, action, data) d.send <- message } func (d Device) Run() { log.Info(fmt.Sprintf("Running: %s", d.ID)) interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt) done := make(chan struct{}) ticker := time.NewTicker(time.Second) defer ticker.Stop() // ticker go func() { for { select { case t := <-ticker.C: d.Send("event", t.String()) } } }() // read go func() { defer close(done) for { _, message, err := d.conn.ReadMessage() if err != nil { log.Error(err) return } log.Info(fmt.Sprintf("Message: %s", message)) } }() // write for { select { case <-done: return case t := <-d.send: err := d.conn.WriteMessage(websocket.TextMessage, []byte(t)) if err != nil { log.Error(err) return } case <-interrupt: log.Info("Interrupt sent") // Cleanly close the connection by sending a close message and then // waiting (with timeout) for the server to close the connection. err := d.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err != nil { log.Error(err) return } select { case <-done: case <-time.After(time.Second): } return } } } func main() { log.SetReportCaller(true) flag.Parse() u := url.URL{Scheme: "ws", Host: *addr, Path: "/ws"} d, err := NewDevice(*deviceID, u) if err != nil { log.Fatal(fmt.Sprintf("Error creating the device: %v", err)) } d.Run() }
package req /* ToUserName 开发者微信号 FromUserName 发送方帐号(一个OpenID) CreateTime 消息创建时间 (整型) MsgType 消息类型,文本为text Content 文本消息内容 MsgId 消息id,64位整型 */ type Text struct { ToUserName string `json:"to_user_name"` FromUserName string `json:"from_user_name"` CreateTime int64 `json:"create_time"` MsgType string `json:"msg_type"` Content string `json:"content"` MsgId int64 `json:"msg_id"` } func NewText() *Text { return &Text{ ToUserName: "", FromUserName: "", CreateTime: 0, MsgType: "", Content: "", MsgId: 0, } }
package createplayerusecase import ( "backend/internal/adapters/brokenrepo" "backend/internal/adapters/inmemoryrepo" "backend/internal/domain" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" ) func Test_Create_player(t *testing.T) { // Arrange stubbedPlayerId := domain.NewFakePlayerId("some player id") playerIdGenerator := domain.NewFakePlayerIdGenerator(stubbedPlayerId) expectedPlayer := domain.NewPlayer(stubbedPlayerId, "John Doe") playerRepository := inmemoryrepo.NewPlayerRepository() sut := New(playerIdGenerator, playerRepository) // Act result, err := sut.Execute("John Doe") // Assert assert.NoError(t, err) assert.Equal(t, stubbedPlayerId, result.id) createdPlayer, err := playerRepository.FindById(expectedPlayer.Id()) require.NoError(t, err) assert.Equal(t, expectedPlayer, createdPlayer) } func Test_Create_player_with_broken_repository(t *testing.T) { // Arrange playerIdGenerator := domain.NewUUIDPlayerIdGenerator() playerRepository := brokenrepo.NewPlayerRepository() sut := New(playerIdGenerator, playerRepository) // Act createdUserId, err := sut.Execute("some") // Assert assert.Empty(t, createdUserId) assert.NotNil(t, err) }
package router import ( "neosmemo/backend/handler" "neosmemo/backend/handler/memo" "neosmemo/backend/handler/user" "net/http" "github.com/julienschmidt/httprouter" ) // Router router var Router *httprouter.Router = nil // NOTE: 在这里注册路由 func init() { Router = httprouter.New() // user about Router.GET("/api/user/me", handler.Middleware(user.GetMyUserInfo, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/user/signup", handler.Middleware(user.DoSignUp, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/user/signin", handler.Middleware(user.DoSignIn, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/user/signout", handler.Middleware(user.DoSignOut, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/user/check", handler.Middleware(user.CheckUsernameUsed, handler.MiddlewareConfig{Cors: true, JSON: true})) // just for test Router.GET("/api/user/all", handler.Middleware(user.GetAllUser, handler.MiddlewareConfig{Cors: true, JSON: true})) // Router.POST("/api/user/update", handler.Middleware(user.UpdateInfo, handler.MiddlewareConfig{Cors: true, JSON: true})) // memo about // Router.GET("/api/:id/", memo.GetMemoByID) Router.GET("/api/memo/all", handler.Middleware(memo.GetAllMemos, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/memo/new", handler.Middleware(memo.CreateMemo, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/memo/update", handler.Middleware(memo.UpdateMemo, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.POST("/api/memo/delete", handler.Middleware(memo.DeleteMemo, handler.MiddlewareConfig{Cors: true, JSON: true})) Router.NotFound = http.HandlerFunc(handler.NotFoundHandler) }
package config import ( "errors" "fmt" "io/ioutil" "log" "os" cfenv "github.com/cloudfoundry-community/go-cfenv" ) type WebConfig struct { commonConfig TopicID string } type WorkerConfig struct { commonConfig SubscriptionID string VisionURL string VisionAPIKey string } type commonConfig struct { ConnectionString string ProjectID string gcpKey os.File } const ( Port = "8080" cloudPubSubServiceName = "cloud-pubsub" gcpAppCredentialsEnvName = "GOOGLE_APPLICATION_CREDENTIALS" gcpProjectEnvName = "GOOGLE_CLOUD_PROJECT" pubsubSubEnvName = "PUBSUB_SUBSCRIPTION" pubsubTopicEnvName = "PUBSUB_TOPIC" ) func NewWebConfig() (WebConfig, error) { common, err := newCommonConfig() if err != nil { return WebConfig{}, fmt.Errorf("could not parse common config env %+v", err) } topicID, err := parsePubEnv() if err != nil { return WebConfig{}, fmt.Errorf("could not parse pub env %+v", err) } return WebConfig{common, topicID}, nil } func NewWorkerConfig() (WorkerConfig, error) { common, err := newCommonConfig() if err != nil { return WorkerConfig{}, fmt.Errorf("could not parse common config env %+v", err) } subscriptionID, err := parseSubEnv() if err != nil { return WorkerConfig{}, fmt.Errorf("could not parse sub env %+v", err) } apiKey, url, err := parseVisionEnv() if err != nil { log.Fatalf("Could not parse vision service env") } return WorkerConfig{common, subscriptionID, url, apiKey}, nil } func (c commonConfig) RemoveTmpFile() { os.Remove(c.gcpKey.Name()) } func newCommonConfig() (commonConfig, error) { key, projectID, err := parseKeyAndProjectIDFromEnv() if err != nil { return commonConfig{}, fmt.Errorf("could not parse pubsub env %+v", err) } var tmpFile os.File if _, ok := os.LookupEnv(gcpAppCredentialsEnvName); !ok { tmpFile, err := writeGCPKeyfile(key) if err != nil { return commonConfig{}, fmt.Errorf("could not write gcp file") } os.Setenv(gcpAppCredentialsEnvName, tmpFile.Name()) } conn, err := parsePostgresEnv() if err != nil { return commonConfig{}, fmt.Errorf("could not parse postgres env %+v", err) } return commonConfig{conn, projectID, tmpFile}, nil } func parsePostgresEnv() (conn string, err error) { if connectionString, ok := os.LookupEnv("POSTGRESQL_URI"); ok { // in k8s return connectionString, nil } service, err := readFirstServiceWithLabel("azure-postgresql-9-6") if err != nil { return conn, err } conn, ok := service.CredentialString("uri") if !ok { return conn, errors.New("could not load postgres uri") } return conn, err } func parseKeyAndProjectIDFromEnv() (key, projectID string, err error) { if projectID, ok := os.LookupEnv(gcpProjectEnvName); ok { // k8s return key, projectID, nil } service, err := readFirstServiceWithLabel(cloudPubSubServiceName) if err != nil { return key, projectID, err } key, ok := service.CredentialString("privateKeyData") if !ok { return key, projectID, fmt.Errorf("could not load privatekey") } projectID, ok = service.CredentialString("projectId") if !ok { return key, projectID, fmt.Errorf("could not load projectId") } return key, projectID, nil } func parsePubEnv() (topicID string, err error) { if topicID, ok := os.LookupEnv(pubsubTopicEnvName); ok { // k8s return topicID, nil } // CF service, err := readFirstServiceWithLabel(cloudPubSubServiceName) if err != nil { return topicID, err } topicID, ok := service.CredentialString("topicId") if !ok { return topicID, errors.New("Could not find topicId") } return topicID, nil } func parseSubEnv() (subscriptionID string, err error) { if subscriptionID, ok := os.LookupEnv(pubsubSubEnvName); ok { return subscriptionID, nil } service, err := readFirstServiceWithLabel(cloudPubSubServiceName) if err != nil { return subscriptionID, err } subscriptionID, ok := service.CredentialString("subscriptionId") if !ok { return subscriptionID, errors.New("Could not find subscriptionId") } return subscriptionID, nil } func parseVisionEnv() (apiKey, url string, err error) { if apiKey, ok := os.LookupEnv("VISION_APIKEY"); ok { // k8s if url, ok := os.LookupEnv("VISION_URL"); ok { return apiKey, url, nil } } // CF service, err := readFirstServiceWithLabel("watson-vision-combined") if err != nil { return apiKey, url, err } apiKey, ok := service.CredentialString("apikey") if !ok { return apiKey, url, errors.New("Could not find apikey") } url, ok = service.CredentialString("url") if !ok { return apiKey, url, errors.New("Could not find url") } return apiKey, url, nil } func readFirstServiceWithLabel(label string) (service cfenv.Service, err error) { appEnv, err := cfenv.Current() if err != nil { return service, err } services, err := appEnv.Services.WithLabel(label) if err != nil { return service, err } if len(services) != 1 { return service, fmt.Errorf("Unexpected number of %s services %d", label, len(services)) } return services[0], nil } func writeGCPKeyfile(key string) (*os.File, error) { content := []byte(key) tmpFile, err := ioutil.TempFile("", "key") if err != nil { return nil, err } if _, err := tmpFile.Write(content); err != nil { return nil, err } if err := tmpFile.Close(); err != nil { return nil, err } return tmpFile, nil }
package cmd import ( "testing" "github.com/stretchr/testify/assert" "github.com/instructure-bridge/muss/proc" ) func TestDcCommand(t *testing.T) { withTestPath(t, func(t *testing.T) { t.Run("all args pass through", func(t *testing.T) { _, _, err := runTestCommand(nil, []string{ "dc", "--no-ansi", "down", "-v", }) assert.Nil(t, err) assert.Equal(t, []string{"docker-compose", "--no-ansi", "down", "-v"}, proc.LastExecArgv, "exec") }) }) }
package pack import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "math/rand" "os" "path/filepath" "runtime" "strings" "time" "github.com/ryex/dungeondraft-gopackager/internal/structures" "github.com/ryex/dungeondraft-gopackager/internal/utils" "github.com/sirupsen/logrus" ) // Packer packs up a folder into a dungeodraft_pack file // set the Overwrite field if you wish pack operations to overwrite an exsisting file type Packer struct { log logrus.FieldLogger name string id string path string Overwrite bool FileList []structures.FileInfo ValidExts []string } // DefaultValidExt returns a slice of valid file extentions for inclusion in a .dungeondraft_pack func DefaultValidExt() []string { return []string{ ".png", ".jpg", ".webp", ".dungeondraft_wall", ".dungeondraft_tileset", ".dungeondraft_tags", ".json", } } func GenPackID() string { var seededRand *rand.Rand = rand.New( rand.NewSource(time.Now().UnixNano())) charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" b := make([]byte, 8) for i := range b { b[i] = charset[seededRand.Intn(len(charset))] } return string(b) } // NewPackerFromFolder builds a new Packer from a folder with a valid pack.json func NewPackerFolder(log logrus.FieldLogger, folderPath string, name string, author string, version string, overwrite bool) (p *Packer, err error) { folderPath, err = filepath.Abs(folderPath) if err != nil { return } dirExists := utils.DirExists(folderPath) if !dirExists { err = errors.New("directory does not exists") log.WithError(err).WithField("path", folderPath).Error("can't package a non existent folder") return } packJSONPath := filepath.Join(folderPath, `pack.json`) packExists := utils.FileExists(packJSONPath) if packExists { if !overwrite { err = errors.New("a pack.json already exists and overwrite is not enabled") log.WithError(err).WithField("path", folderPath).Error("a pack.json already exists") return } else { log.WithField("path", folderPath).Warn("Overwriting pack.json") } } if name == "" { err = errors.New("name field can not be empty") log.WithError(err).Error("invalid pack info") return } if version == "" { err = errors.New("version field can not be empty") log.WithError(err).Error("invalid pack info") return } pack := structures.Package{ Name: name, Author: author, Version: version, ID: GenPackID(), } packJSONBytes, err := json.MarshalIndent(&pack, "", " ") if err != nil { log.WithError(err).WithField("path", folderPath).WithField("packJSONPath", packJSONPath).Error("can't create pack.json") return } err = ioutil.WriteFile(packJSONPath, packJSONBytes, 0644) if err != nil { log.WithError(err).WithField("path", folderPath).WithField("packJSONPath", packJSONPath).Error("can't write pack.json") return } p = NewPacker(log.WithField("path", folderPath).WithField("id", pack.ID).WithField("name", pack.Name), pack.Name, pack.ID, folderPath) return } // NewPackerFromFolder builds a new Packer from a folder with a valid pack.json func NewPackerFromFolder(log logrus.FieldLogger, folderPath string) (p *Packer, err error) { folderPath, err = filepath.Abs(folderPath) if err != nil { return } dirExists := utils.DirExists(folderPath) if !dirExists { err = errors.New("directory does not exists") log.WithError(err).WithField("path", folderPath).Error("can't package a non existent folder") return } packJSONPath := filepath.Join(folderPath, `pack.json`) packExists := utils.FileExists(packJSONPath) if !packExists { err = errors.New("no pack.json in directory, generate one first.") log.WithError(err).WithField("path", folderPath).Error("can't package without a pack.json") return } packJSONBytes, err := ioutil.ReadFile(packJSONPath) if err != nil { log.WithError(err).WithField("path", folderPath).WithField("packJSONPath", packJSONPath).Error("can't read pack.json") return } var pack structures.Package err = json.Unmarshal(packJSONBytes, &pack) if err != nil { log.WithError(err).WithField("path", folderPath).WithField("packJSONPath", packJSONPath).Error("can't parse pack.json") return } if pack.Name == "" { err = errors.New("pack.json's name field can not be empty") log.WithError(err).WithField("path", folderPath).WithField("packJSONPath", packJSONPath).Error("invalid pack.json") return } if pack.ID == "" { err = errors.New("pack.json's id field can not be empty") log.WithError(err).WithField("path", folderPath).WithField("packJSONPath", packJSONPath).Error("invalid pack.json") return } p = NewPacker(log.WithField("path", folderPath).WithField("id", pack.ID).WithField("name", pack.Name), pack.Name, pack.ID, folderPath) return } // NewPacker makes a new Packer, it does no validation so the subsiquent pack operations may fail badly func NewPacker(log logrus.FieldLogger, name string, id string, path string) *Packer { return &Packer{ log: log, name: name, id: id, path: path, ValidExts: DefaultValidExt(), } } // PackPackage packs up a directory into a .dungeondraft_pack file // assumes BuildFileList has been called first func (p *Packer) PackPackage(outDir string) (err error) { outDirPath, err := filepath.Abs(outDir) if err != nil { return } fileExists := utils.FileExists(outDirPath) if fileExists { err = errors.New("out folder already exists as a file") return } dirExists := utils.DirExists(outDirPath) if !dirExists { err = os.MkdirAll(outDirPath, 0777) if err != nil { return } } outPackagePath := filepath.Join(outDirPath, p.name+".dungeondraft_pack") l := p.log.WithField("outPackagePath", outPackagePath) packageExists := utils.FileExists(outPackagePath) if packageExists { if p.Overwrite { l.Warn("overwriting file") } else { err = errors.New("file exists") l.WithError(err).Error("package file already exists at destination and Overwrite not enabled") return } } l.Debug("writing package") var out *os.File out, err = os.Create(outPackagePath) if err != nil { l.WithError(err).Error("can not open package file for writing") return } err = p.write(l, out) if err != nil { l.WithError(err).Error("failed to write package file") return } err = out.Close() if err != nil { l.WithError(err).Error("failed to close package file") return } l.Info("packing complete") return } // BuildFileList builds a list of files at the target directory for inclusion in a .dungeondraft_pack file func (p *Packer) BuildFileList() (err error) { p.log.Debug("beginning directory traversal to collect file list") err = filepath.Walk(p.path, p.fileListWalkFunc) if err != nil { p.log.WithError(err).Error("failed to walk directory") return } // inject <GUID>.json packJSONPath := filepath.Join(p.path, `pack.json`) packJSONRelPath, err := filepath.Rel(p.path, filepath.Join(p.path, fmt.Sprintf(`%s.json`, p.id))) if err != nil { p.log.Error("can not get path relative to package root") return err } pathJSONResPath := "res://" + filepath.Join("packs", packJSONRelPath) if runtime.GOOS == "windows" { // windows path seperators..... pathJSONResPath = strings.ReplaceAll(pathJSONResPath, "\\", "/") } packJSONInfo, err := os.Stat(packJSONPath) if err != nil { p.log.WithError(err).Error("can't stat pack.json") } GUIDJSONInfo := structures.FileInfo{ Path: packJSONPath, Size: packJSONInfo.Size(), ResPath: pathJSONResPath, } // prepend the file to the list p.FileList = append(p.FileList, structures.FileInfo{}) // make space with empty struct copy(p.FileList[1:], p.FileList) // move things forward p.FileList[0] = GUIDJSONInfo // set to first spot return } func (p *Packer) makeResPath(l logrus.FieldLogger, path string) (string, error) { relPath, err := filepath.Rel(p.path, path) if err != nil { l.Error("can not get path relative to package root") return "", err } resPath := "res://" + filepath.Join("packs", p.id, relPath) if runtime.GOOS == "windows" { // windows path seperators..... resPath = strings.ReplaceAll(resPath, "\\", "/") } return resPath, nil } func (p *Packer) fileListWalkFunc(path string, info os.FileInfo, err error) error { l := p.log.WithField("filePath", path) if err != nil { l.WithError(err).Error("can't access file") return err } if info.IsDir() { l.Debug("is directory, decending into...") } else { ext := strings.ToLower(filepath.Ext(path)) if utils.StringInSlice(ext, p.ValidExts) { if info.Mode().IsRegular() { resPath, err := p.makeResPath(l, path) if err != nil { return err } fInfo := structures.FileInfo{ Path: path, Size: info.Size(), ResPath: resPath, } l.Info("including") p.FileList = append(p.FileList, fInfo) } } else { l.WithField("ext", ext).WithField("validExts", p.ValidExts).Debug("Invalid file ext, not including.") } } return nil } func (p *Packer) write(l logrus.FieldLogger, out io.WriteSeeker) (err error) { headers := structures.DefaultPackageHeaderBytes() headers.FileCount = uint32(len(p.FileList)) fileInfoList := structures.NewFileInfoList(p.FileList) l.Debug("writing package headers...") // write file header err = headers.Write(out) if !utils.CheckErrorWrite(l, err) { return } err = fileInfoList.Write(l, out, headers.SizeOf()) if !utils.CheckErrorWrite(l, err) { return } return }
package mmap import ( "hash/fnv" "sync" ) var SEGMENT_NUM = 32 // 分段锁 type ConcurrentMap []*ConcurrentMapSegment type ConcurrentMapSegment struct { mu sync.RWMutex data map[string]interface{} } func NewConcurrentMap() ConcurrentMap { cmap := make(ConcurrentMap, 0) for i := 0; i < SEGMENT_NUM; i++ { cmap[i] = &ConcurrentMapSegment{ data: make(map[string]interface{}), } } return cmap } func (cmap ConcurrentMap) GetSegment(key string) *ConcurrentMapSegment { hasher := fnv.New32() hasher.Write([]byte(key)) return cmap[hasher.Sum32()%uint32(SEGMENT_NUM)] } func (cmap ConcurrentMap) IsEmpty() bool { return cmap.Size() == 0 } func (cmap ConcurrentMap) Size() int { size := 0 for i := 0; i < SEGMENT_NUM; i++ { segment := cmap[i] segment.mu.RLock() size += len(segment.data) segment.mu.RUnlock() } return size } func (cmap ConcurrentMap) Contain(key string) bool { segment := cmap.GetSegment(key) segment.mu.RLock() defer segment.mu.Unlock() _, ok := segment.data[key] return ok } func (cmap ConcurrentMap) Get(key string) (interface{}, bool) { segment := cmap.GetSegment(key) segment.mu.RLock() defer segment.mu.RUnlock() v, ok := segment.data[key] return v, ok } func (cmap *ConcurrentMap) Set(key string, value interface{}) { segment := cmap.GetSegment(key) segment.mu.Lock() defer segment.mu.Unlock() segment.data[key] = value } func (cmap *ConcurrentMap) Update(key string, value interface{}) bool { segment := cmap.GetSegment(key) segment.mu.Lock() defer segment.mu.Unlock() if _, ok := segment.data[key]; ok { segment.data[key] = value return true } return false } func (cmap *ConcurrentMap) Remove(key string) { segment := cmap.GetSegment(key) segment.mu.Lock() defer segment.mu.Unlock() delete(segment.data, key) }
package fuse // Compilation test for DummyFuse and DummyPathFuse import ( "testing" ) func TestDummy(t *testing.T) { fs := new(DefaultRawFuseFileSystem) NewMountState(fs) pathFs := new(DefaultPathFilesystem) NewPathFileSystemConnector(pathFs) } func TestDummyFile(t *testing.T) { d := new(DefaultRawFuseFile) var filePtr RawFuseFile = d d2 := new(DefaultRawFuseDir) var fileDir RawFuseDir = d2 _ = fileDir _ = filePtr }
package bitbucket_v2 import ( "errors" "github.com/DaoCloud/go-bitbucket/bitbucket" ) var ( ErrNilClient = errors.New("client is nil") ) // New creates an instance of the Bitbucket Client func New(consumerKey, consumerSecret, accessToken, tokenSecret string) *Client { c := &Client{} c.ConsumerKey = consumerKey c.ConsumerSecret = consumerSecret c.AccessToken = accessToken c.TokenSecret = tokenSecret /* c.Keys = &KeyResource{c}*/ //c.Repos = &RepoResource{c} //c.Users = &UserResource{c} //c.Emails = &EmailResource{c} /*c.Brokers = &BrokerResource{c}*/ /* c.Teams = &TeamResource{c}*/ //c.RepoKeys = &RepoKeyResource{c} /*c.Sources = &SourceResource{c}*/ return c } type Client struct { ConsumerKey string ConsumerSecret string AccessToken string TokenSecret string /* Repos *RepoResource*/ //Users *UserResource //Emails *EmailResource //Keys *KeyResource //Brokers *BrokerResource //Teams *TeamResource //Sources *SourceResource /*RepoKeys *RepoKeyResource*/ } // Guest Client that can be used to access // public APIs that do not require authentication. var Guest = New("", "", "", "") func UpgradeClient(c *bitbucket.Client) *Client { return New(c.ConsumerKey, c.ConsumerSecret, c.AccessToken, c.TokenSecret) }
package handler import ( "github.com/gin-gonic/gin" "github.com/go-playground/validator/v10" _ "github.com/koind/cacher/docs" "github.com/koind/cacher/internal/domain/repository" "github.com/koind/cacher/internal/domain/service" "github.com/pkg/errors" "github.com/swaggo/files" "github.com/swaggo/gin-swagger" "io/ioutil" "net/http" ) var ( CacheKeyCannotBeEmptyErr = errors.New("ключ кэша не может быть пустым") ) // HTTP сервер type HTTPServer struct { http.Server router *gin.Engine domain string cacheService *service.CacheService } // Возвращает новый HTTP сервер func NewHTTPServer(cacheService *service.CacheService, domain string) *HTTPServer { gin.SetMode(gin.ReleaseMode) gin.DefaultWriter = ioutil.Discard r := gin.New() hs := HTTPServer{router: r, domain: domain, cacheService: cacheService} hs.router.GET("/get/:key", hs.GetOneHandle) hs.router.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) hs.router.GET("/list", hs.GetAllHandle) hs.router.POST("/upsert", hs.UpsertHandle) hs.router.DELETE("/delete/:key", hs.DeleteHandle) http.Handle("/", r) return &hs } // Запускает HTTP сервер func (s *HTTPServer) Start() error { return http.ListenAndServe(s.domain, s.router) } // Возвращет одну запись по ключу // @Summary Возвращет одну запись по ключу // @Description Возвращет одну запись по ключу // @Tags api // @ID get-one-handle // @Produce json // @Param key path string true "ключ кэша" // @Success 200 {object} repository.Cache // @Failure 400,404 {object} handler.response // @Failure 500 {object} handler.response // @Router /get/{key} [get] func (s *HTTPServer) GetOneHandle(c *gin.Context) { key := c.Param("key") if len(key) == 0 { c.JSON(http.StatusBadRequest, responseError(CacheKeyCannotBeEmptyErr)) return } cache, err := s.cacheService.GetOneByKey(c, key) if err != nil { c.JSON(http.StatusBadRequest, responseError(err)) return } c.JSON(http.StatusOK, responseSuccess(cache)) } // Возвращет все записи // @Summary Возвращет все записи // @Description Возвращет все записи // @Tags api // @ID get-all-handle // @Produce json // @Success 200 {array} repository.Cache // @Failure 400,404 {object} handler.response // @Failure 500 {object} handler.response // @Router /list [get] func (s *HTTPServer) GetAllHandle(c *gin.Context) { list, err := s.cacheService.GetAll(c) if err != nil { c.JSON(http.StatusInternalServerError, responseError(err)) return } c.JSON(http.StatusOK, responseSuccess(list)) } // Обновить запись, если существует, и создает, если нет // @Summary Обновить запись, если существует, и создает, если нет // @Description Обновить запись, если существует, и создает, если нет // @Tags api // @ID upsert-Handle // @Accept json // @Produce json // @Param data body repository.Cache true "данные кэша" // @Success 200 {object} repository.Cache // @Failure 400,404 {object} handler.response // @Failure 500 {object} handler.response // @Router /upsert [post] func (s *HTTPServer) UpsertHandle(c *gin.Context) { cache := repository.Cache{} if err := c.ShouldBindJSON(&cache); err != nil { c.JSON(http.StatusBadRequest, responseError(err)) return } if err := validator.New().StructCtx(c, cache); err != nil { c.JSON(http.StatusBadRequest, responseError(err)) return } newCache, err := s.cacheService.Upsert(c, cache) if err != nil { c.JSON(http.StatusInternalServerError, responseError(err)) return } c.JSON(http.StatusOK, responseSuccess(newCache)) } // Удаляет одну запись по ключу // @Summary Удаляет одну запись по ключу // @Description Удаляет одну запись по ключу // @Tags api // @ID delete-handle // @Produce json // @Param key path string true "ключ кэша" // @Success 200 {object} handler.response // @Failure 400,404 {object} handler.response // @Failure 500 {object} handler.response // @Router /delete/{key} [delete] func (s *HTTPServer) DeleteHandle(c *gin.Context) { key := c.Param("key") if len(key) == 0 { c.JSON(http.StatusBadRequest, responseError(CacheKeyCannotBeEmptyErr)) return } err := s.cacheService.Delete(c, key) if err != nil { c.JSON(http.StatusBadRequest, responseError(err)) return } c.JSON(http.StatusOK, responseSuccess(nil)) }
// Copyright (c) 2018 Palantir Technologies. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package integration import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "strings" "testing" "time" "github.com/palantir/pkg/httpserver" "github.com/palantir/pkg/metrics" "github.com/palantir/witchcraft-go-logging/conjure/witchcraft/api/logging" "github.com/palantir/witchcraft-go-server/config" "github.com/palantir/witchcraft-go-server/witchcraft" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestEmitMetrics verifies that metrics are printed periodically by a Witchcraft server. // We verify both custom metrics set in the InitFunc (with tags) and server.reponse metrics from the metrics middleware. func TestEmitMetrics(t *testing.T) { logOutputBuffer := &bytes.Buffer{} port, err := httpserver.AvailablePort() require.NoError(t, err) server, serverErr, cleanup := createAndRunCustomTestServer(t, port, port, func(ctx context.Context, info witchcraft.InitInfo) (deferFn func(), rErr error) { ctx = metrics.AddTags(ctx, metrics.MustNewTag("key", "val")) metrics.FromContext(ctx).Counter("my-counter").Inc(13) return nil, info.Router.Post("/error", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(500) })) }, logOutputBuffer, func(t *testing.T, initFn witchcraft.InitFunc, installCfg config.Install, logOutputBuffer io.Writer) *witchcraft.Server { installCfg.MetricsEmitFrequency = 100 * time.Millisecond return createTestServer(t, initFn, installCfg, logOutputBuffer) }) defer func() { require.NoError(t, server.Close()) }() defer cleanup() // Make POST that will 404 to trigger request size and error rate metrics _, err = testServerClient().Post(fmt.Sprintf("https://localhost:%d/%s/%s", port, basePath, "error"), "application/json", strings.NewReader("{}")) require.NoError(t, err) // Allow the metric emitter to do its thing. time.Sleep(150 * time.Millisecond) parts := strings.Split(logOutputBuffer.String(), "\n") var metricLogs []logging.MetricLogV1 for _, curr := range parts { if strings.Contains(curr, `"metric.1"`) { var currLog logging.MetricLogV1 require.NoError(t, json.Unmarshal([]byte(curr), &currLog)) metricLogs = append(metricLogs, currLog) } } var seenMyCounter, seenResponseTimer, seenResponseSize, seenRequestSize, seenResponseError bool for _, metricLog := range metricLogs { switch metricLog.MetricName { case "my-counter": seenMyCounter = true assert.Equal(t, "counter", metricLog.MetricType, "my-counter metric had incorrect type") assert.Equal(t, map[string]interface{}{"count": json.Number("13")}, metricLog.Values) assert.Equal(t, map[string]string{"key": "val"}, metricLog.Tags) case "server.response": seenResponseTimer = true assert.Equal(t, "timer", metricLog.MetricType, "server.response metric had incorrect type") assert.NotZero(t, metricLog.Values["count"]) assert.NotZero(t, metricLog.Values["mean"]) assert.NotZero(t, metricLog.Values["max"]) assert.NotZero(t, metricLog.Values["min"]) case "server.request.size": seenRequestSize = true assert.Equal(t, "histogram", metricLog.MetricType, "server.response metric had incorrect type") assert.NotZero(t, metricLog.Values["count"]) case "server.response.size": seenResponseSize = true assert.Equal(t, "histogram", metricLog.MetricType, "server.response metric had incorrect type") assert.NotZero(t, metricLog.Values["count"]) case "server.response.error": seenResponseError = true assert.Equal(t, "meter", metricLog.MetricType, "server.response metric had incorrect type") assert.NotZero(t, metricLog.Values["count"]) default: assert.Fail(t, "unexpected metric encountered: %s", metricLog.MetricName) } } assert.True(t, seenMyCounter, "my-counter metric was not emitted") assert.True(t, seenResponseTimer, "server.response metric was not emitted") assert.True(t, seenRequestSize, "server.request.size metric was not emitted") assert.True(t, seenResponseSize, "server.response.size metric was not emitted") assert.True(t, seenResponseError, "server.response.error metric was not emitted") select { case err := <-serverErr: require.NoError(t, err) default: } }
package shp import ( "encoding/binary" "fmt" "io" "math" "strings" ) const magic int32 = 0x0000270a // Reader provides a interface for reading Shapefiles. Calls // to the Next method will iterate through the objects in the // Shapefile. After a call to Next the object will be available // through the Shape method. type Reader struct { GeometryType ShapeType bbox Box err error //shpFs io.ReadCloser shp io.Reader shape Shape num int32 //filename string filelength int64 dbf io.Reader //readSeekCloser dbfSeek io.ReadSeeker dbfFields []Field dbfNumRecords int32 dbfHeaderLength int16 dbfRecordLength int16 } type readSeekCloser interface { io.Reader io.Seeker io.Closer } // ProviderConfigurator is the Reader configurator type ProviderConfigurator func(*Reader) error // New Creates a Reader from streams func New(shp io.Reader, conf ...ProviderConfigurator) (*Reader, error) { if shp == nil { return nil, fmt.Errorf("missing shp reader") } s := &Reader{shp: shp} for _, cnf := range conf { if err := cnf(s); err != nil { return nil, err } } return s, s.readHeaders() } // WithDBF appends a io.Reader as DBF source func WithDBF(dbf io.Reader) ProviderConfigurator { return func(r *Reader) error { if dbf == nil { return fmt.Errorf("missing reader") } if r.dbfSeek != nil { return fmt.Errorf("you can only provide one DBF source") } r.dbf = dbf return nil } } // WithSeekableDBF appends a io.ReadSeeker as DBF source func WithSeekableDBF(dbf io.ReadSeeker) ProviderConfigurator { return func(r *Reader) error { if dbf == nil { return fmt.Errorf("missing readseeker") } if r.dbf != nil { return fmt.Errorf("you can only provide one DBF source") } r.dbfSeek = dbf return nil } } // BBox returns the bounding box of the shapefile. func (r *Reader) BBox() Box { return r.bbox } // Read and parse headers in the Shapefile. This will // fill out GeometryType, filelength and bbox. func (r *Reader) readHeaders() error { er := &errReader{Reader: r.shp} var ( m int32 filelength int32 ) // Read magic err := binary.Read(er, binary.BigEndian, &m) if err != nil { return err } if magic != m { return fmt.Errorf("wrong magic, expected %04x, got %04x", magic, m) } // skip next 5 uint32 _, _ = r.shp.Read(make([]byte, 20)) err = binary.Read(er, binary.BigEndian, &filelength) if err != nil { return err } r.filelength = int64(filelength) // skip version (int32) _, _ = r.shp.Read(make([]byte, 4)) // Read Type err = binary.Read(er, binary.LittleEndian, &r.GeometryType) if err != nil { return err } r.bbox.MinX = readFloat64(er) r.bbox.MinY = readFloat64(er) r.bbox.MaxX = readFloat64(er) r.bbox.MaxY = readFloat64(er) // skip next 32 bytes _, _ = r.shp.Read(make([]byte, 32)) return er.e } func readFloat64(r io.Reader) float64 { var bits uint64 binary.Read(r, binary.LittleEndian, &bits) return math.Float64frombits(bits) } // Close closes the Shapefile. func (r *Reader) Close() error { /*if r.err == nil && r.shpFs != nil { r.err = r.shpFs.Close() if r.dbf != nil { r.dbf.Close() } } return r.err*/ return nil } // Shape returns the most recent feature that was read by // a call to Next. It returns two values, the int is the // object index starting from zero in the shapefile which // can be used as row in ReadAttribute, and the Shape is the object. func (r *Reader) Shape() (int, Shape) { return int(r.num) - 1, r.shape } // Attribute returns value of the n-th attribute of the most recent feature // that was read by a call to Next. func (r *Reader) Attribute(n int) string { return r.ReadAttribute(int(r.num)-1, n) } // newShape creates a new shape with a given type. func newShape(shapetype ShapeType) (Shape, error) { switch shapetype { case NULL: return new(Null), nil case POINT: return new(Point), nil case POLYLINE: return new(PolyLine), nil case POLYGON: return new(Polygon), nil case MULTIPOINT: return new(MultiPoint), nil case POINTZ: return new(PointZ), nil case POLYLINEZ: return new(PolyLineZ), nil case POLYGONZ: return new(PolygonZ), nil case MULTIPOINTZ: return new(MultiPointZ), nil case POINTM: return new(PointM), nil case POLYLINEM: return new(PolyLineM), nil case POLYGONM: return new(PolygonM), nil case MULTIPOINTM: return new(MultiPointM), nil case MULTIPATCH: return new(MultiPatch), nil default: return nil, fmt.Errorf("Unsupported shape type: %v", shapetype) } } // Next reads in the next Shape in the Shapefile, which // will then be available through the Shape method. It // returns false when the reader has reached the end of the // file or encounters an error. func (r *Reader) Next() bool { var size int32 var shapetype ShapeType er := &errReader{Reader: r.shp} _ = binary.Read(er, binary.BigEndian, &r.num) _ = binary.Read(er, binary.BigEndian, &size) // size counts the 16-bit words _ = binary.Read(er, binary.LittleEndian, &shapetype) if er.e != nil { if er.e != io.EOF { r.err = fmt.Errorf("Error when reading metadata of next shape: %v", er.e) } else { r.err = io.EOF } return false } var err error r.shape, err = newShape(shapetype) if err != nil { r.err = fmt.Errorf("Error decoding shape type: %v", err) return false } count := r.shape.read(er) if er.e != nil { r.err = fmt.Errorf("Error while reading next shape: %v", er.e) return false } offset := 2*size - int32(count) - 4 if offset < 0 { r.err = fmt.Errorf("too many bytes were read") return false } // move to next object if offset > 0 { _, err = r.shp.Read(make([]byte, offset)) if err != nil && err != io.EOF { r.err = err } if err != nil { return false } } return true } // Opens DBF file using r.filename + "dbf". This method // will parse the header and fill out all dbf* values int // the f object. func (r *Reader) openDbf() (err error) { if r.dbf == nil && r.dbfSeek == nil { return fmt.Errorf("missing DBF") } dbf := r.dbf if dbf == nil { dbf = r.dbfSeek } // skip next 4 bytes _, _ = r.shp.Read(make([]byte, 4)) // read header binary.Read(dbf, binary.LittleEndian, &r.dbfNumRecords) binary.Read(dbf, binary.LittleEndian, &r.dbfHeaderLength) binary.Read(dbf, binary.LittleEndian, &r.dbfRecordLength) // skip padding _, _ = r.shp.Read(make([]byte, 20)) numFields := int(math.Floor(float64(r.dbfHeaderLength-33) / 32.0)) r.dbfFields = make([]Field, numFields) binary.Read(dbf, binary.LittleEndian, &r.dbfFields) return } // Fields returns a slice of Fields that are present in the // DBF table. func (r *Reader) Fields() []Field { r.openDbf() // make sure we have dbf file to read from return r.dbfFields } // Err returns the last non-EOF error encountered. func (r *Reader) Err() error { if r.err == io.EOF { return nil } return r.err } // AttributeCount returns number of records in the DBF table. func (r *Reader) AttributeCount() int { r.openDbf() // make sure we have a dbf file to read from return int(r.dbfNumRecords) } // ReadAttribute returns the attribute value at row for field in // the DBF table as a string. Both values starts at 0. func (r *Reader) ReadAttribute(row int, field int) string { if r.dbfSeek == nil { return "" } r.openDbf() // make sure we have a dbf file to read from seekTo := 1 + int64(r.dbfHeaderLength) + (int64(row) * int64(r.dbfRecordLength)) for n := 0; n < field; n++ { seekTo += int64(r.dbfFields[n].Size) } r.dbfSeek.Seek(seekTo, io.SeekStart) buf := make([]byte, r.dbfFields[field].Size) r.dbfSeek.Read(buf) return strings.Trim(string(buf[:]), " ") }
// Package goavanza provides a minimialist Avanza API wrapper. package goavanza import ( "encoding/json" "fmt" "io/ioutil" "net/http" "net/url" "strings" ) // Client holds session id and state for the wrapper type Client struct { Username string Password string httpClient *http.Client BaseURL *url.URL IsAuthenticated bool AuthenticationSession string PushSubscriptionID string CustomerID string SecurityToken string Subscriptions []string } // New creates an instance of the client func New() *Client { return &Client{ BaseURL: &url.URL{ Host: BaseURL, Scheme: "https", }, httpClient: &http.Client{}, } } // GetPositions fetches all positions held by the current user func (c *Client) GetPositions() (*Portfolio, error) { path := &url.URL{ Path: PositionsPath, RawQuery: "sort=changeAsc", } resp, err := c.Request("GET", path, nil) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } defer resp.Body.Close() data := &Portfolio{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return data, nil } // GetOverview fetches an overview of the accounts of the current user func (c *Client) GetOverview() (*Portfolio, error) { path := &url.URL{ Path: OverviewPath, } resp, err := c.Request("GET", path, nil) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } defer resp.Body.Close() data := &Portfolio{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return data, nil } // GetAccountOverview fetches an overview for a given accountId of the current user func (c *Client) GetAccountOverview(accountID string) (*Account, error) { path := &url.URL{ Path: fmt.Sprintf(AccountOverviewPath, accountID), } resp, err := c.Request("GET", path, nil) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } defer resp.Body.Close() data := &Account{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return data, nil } // GetDealsAndOrders fetches recent transactions and orders by the current user func (c *Client) GetDealsAndOrders() { } // GetTransactions fetches specific transaction details for a given account func (c *Client) GetTransactions(accountID string, options map[string]string) { } // GetWatchlists fetches the current user's watchlists func (c *Client) GetWatchlists() { } // AddToWatchlist adds an instrument to a watchlist func (c *Client) AddToWatchlist(instrumentID string, watchlistID string) { } // GetStock fetches information about a stock func (c *Client) GetStock(id string) (*Stock, error) { path := &url.URL{ Path: fmt.Sprintf(StockPath, id), } resp, err := c.Request("GET", path, nil) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } defer resp.Body.Close() data := &Stock{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return data, nil } // GetFund fetches information about a fund func (c *Client) GetFund(id string) (*Fund, error) { path := &url.URL{ Path: fmt.Sprintf(FundPath, id), } resp, err := c.Request("GET", path, nil) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } defer resp.Body.Close() data := &Fund{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return data, nil } // GetOrderbook fetches detailed orderbook information for a given instrument. Note that both id and type is required. func (c *Client) GetOrderbook(id string, instrumentType string) (interface{}, error) { path := &url.URL{ Path: fmt.Sprintf(OrderbookPath, strings.ToLower(instrumentType)), RawQuery: fmt.Sprintf("orderbookId=%s", id), } resp, err := c.Request("GET", path, nil) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } defer resp.Body.Close() if strings.ToLower(instrumentType) == "fund" { data := &OrderbookFund{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return *data, nil } data := &OrderbookStock{} err = json.Unmarshal(body, &data) if err != nil { return nil, err } return *data, nil } // GetOrderbooks fetches a list of orderbook information. func (c *Client) GetOrderbooks(ids map[string]string) { } // GetChartdata fetches data points for a given orderbook id. func (c *Client) GetChartdata(id string, period string) { // Default period to OneYear } // PlaceOrder places an order. // options An object containing the following properties: price, validUntil ("Y-m-d"), volume, orderbookId, // orderType (either "BUY" or "SELL") and accountId. func (c *Client) PlaceOrder(options map[string]string) { } // CheckOrder fetches a request status (a request is what precedes an order) func (c *Client) CheckOrder(accountID string, requestID string) { } // DeleteOrder deletes an order func (c *Client) DeleteOrder(accountID string, orderID string) { } // Search searches for the given query. If type is supplied, only search for results of specified type. func (c *Client) Search(query string, instrumentType string) { } // Authenticate credentials func (c *Client) Authenticate(username string, password string) error { type authResponse struct { AuthenticationSession string `json:authenticationSession` PushSubscriptionID string `json:pushSubscriptionId` RegistrationComplete bool `json:registrationComplete` CustomerID string `json:customerId` } authData := map[string]string{ "maxInactiveMinutes": MaxInactiveMinutes, "password": password, "username": username, } jsonData, _ := json.Marshal(authData) path := &url.URL{Path: AuthenticationPath} resp, err := c.Request("POST", path, jsonData) if err != nil { return err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } defer resp.Body.Close() var data authResponse err = json.Unmarshal(body, &data) if err != nil { return err } c.AuthenticationSession = data.AuthenticationSession c.CustomerID = data.CustomerID c.PushSubscriptionID = data.PushSubscriptionID c.SecurityToken = resp.Header.Get("X-Securitytoken") c.IsAuthenticated = true return nil }
package dynamodb var machineTypeEntityKind = "MachineType" type machineTypeModel struct { DisplayName string `datastore:"name,noindex"` Features []string `datastore:"features,noindex"` Login string `datastore:"login"` Password string `datastore:"password,noindex"` }
package p9p import ( "net" "testing" ) func testConn(t *testing.T) (client, server *Conn) { t.Helper() fd, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } wait := make(chan interface{}) go func() { bio, err := Accept(fd) if err != nil { wait <- err return } wait <- bio close(wait) }() bio0, err := Dial("tcp", fd.Addr().String()) if err != nil { t.Fatal(err) } rt := <-wait switch rt.(type) { case error: t.Fatal(err) return nil, nil } return bio0, rt.(*Conn) } func TestConn(t *testing.T) { t.Skip("fails") testConn(t) } func TestVersion(t *testing.T) { t.Skip("fails") c, s := testConn(t) cv, err := c.Version() if err != nil { t.Fatalf("client version error: %s", err) } sv, err := s.Version() if err != nil { t.Fatalf("server version error: %s", err) } if cv != sv { t.Fatalf("client and server differ: %q vs %q\n", cv, sv) } }
package handlers import ( "net/http" "net/url" "github.com/google/uuid" "github.com/ory/fosite" "github.com/authelia/authelia/v4/internal/middlewares" "github.com/authelia/authelia/v4/internal/model" "github.com/authelia/authelia/v4/internal/oidc" "github.com/authelia/authelia/v4/internal/session" ) func handleOIDCAuthorizationConsentModeExplicit(ctx *middlewares.AutheliaCtx, issuer *url.URL, client oidc.Client, userSession session.UserSession, subject uuid.UUID, rw http.ResponseWriter, r *http.Request, requester fosite.AuthorizeRequester) (consent *model.OAuth2ConsentSession, handled bool) { var ( consentID uuid.UUID err error ) bytesConsentID := ctx.QueryArgs().PeekBytes(qryArgConsentID) switch len(bytesConsentID) { case 0: return handleOIDCAuthorizationConsentGenerate(ctx, issuer, client, userSession, subject, rw, r, requester) default: if consentID, err = uuid.ParseBytes(bytesConsentID); err != nil { ctx.Logger.Errorf(logFmtErrConsentParseChallengeID, requester.GetID(), client.GetID(), client.GetConsentPolicy(), bytesConsentID, err) ctx.Providers.OpenIDConnect.WriteAuthorizeError(ctx, rw, requester, oidc.ErrConsentMalformedChallengeID) return nil, true } return handleOIDCAuthorizationConsentModeExplicitWithID(ctx, issuer, client, userSession, subject, consentID, rw, r, requester) } } func handleOIDCAuthorizationConsentModeExplicitWithID(ctx *middlewares.AutheliaCtx, issuer *url.URL, client oidc.Client, userSession session.UserSession, subject uuid.UUID, consentID uuid.UUID, rw http.ResponseWriter, r *http.Request, requester fosite.AuthorizeRequester) (consent *model.OAuth2ConsentSession, handled bool) { var ( err error ) if consentID.ID() == 0 { ctx.Logger.Errorf(logFmtErrConsentZeroID, requester.GetID(), client.GetID(), client.GetConsentPolicy()) ctx.Providers.OpenIDConnect.WriteAuthorizeError(ctx, rw, requester, oidc.ErrConsentCouldNotLookup) return nil, true } if consent, err = ctx.Providers.StorageProvider.LoadOAuth2ConsentSessionByChallengeID(ctx, consentID); err != nil { ctx.Logger.Errorf(logFmtErrConsentLookupLoadingSession, requester.GetID(), client.GetID(), client.GetConsentPolicy(), consentID, err) ctx.Providers.OpenIDConnect.WriteAuthorizeError(ctx, rw, requester, oidc.ErrConsentCouldNotLookup) return nil, true } if subject.ID() != consent.Subject.UUID.ID() { ctx.Logger.Errorf(logFmtErrConsentSessionSubjectNotAuthorized, requester.GetID(), client.GetID(), client.GetConsentPolicy(), consent.ChallengeID, userSession.Username, subject, consent.Subject.UUID) ctx.Providers.OpenIDConnect.WriteAuthorizeError(ctx, rw, requester, oidc.ErrConsentCouldNotLookup) return nil, true } if !consent.CanGrant() { ctx.Logger.Errorf(logFmtErrConsentCantGrant, requester.GetID(), client.GetID(), client.GetConsentPolicy(), consent.ChallengeID, "explicit") ctx.Providers.OpenIDConnect.WriteAuthorizeError(ctx, rw, requester, oidc.ErrConsentCouldNotPerform) return nil, true } if !consent.IsAuthorized() { if consent.Responded() { ctx.Logger.Errorf(logFmtErrConsentCantGrantRejected, requester.GetID(), client.GetID(), client.GetConsentPolicy(), consent.ChallengeID) ctx.Providers.OpenIDConnect.WriteAuthorizeError(ctx, rw, requester, fosite.ErrAccessDenied) return nil, true } handleOIDCAuthorizationConsentRedirect(ctx, issuer, consent, client, userSession, rw, r, requester) return nil, true } return consent, false }
package golang import ( "reflect" "testing" ) var tests = []struct { encoded []int first int output []int }{ { encoded: []int{1, 2, 3}, first: 1, output: []int{1, 0, 2, 1}, }, { encoded: []int{6, 2, 7, 3}, first: 4, output: []int{4, 2, 0, 7, 4}, }, } func TestDecode(t *testing.T) { for idx, test := range tests { actual := decode(test.encoded, test.first) if !reflect.DeepEqual(actual, test.output) { t.Errorf( "TestCase[%d]: encoded = %v first = %d, expected %v but get %v", idx, test.encoded, test.first, test.output, actual, ) } } }
package metrics import ( "context" "testing" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "google.golang.org/grpc" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/wrapperspb" ) var statsHandler = &ocgrpc.ServerHandler{} type testInvoker struct { invokeResult error statsHandler stats.Handler } func (t testInvoker) UnaryInvoke(ctx context.Context, method string, _, reply any, _ *grpc.ClientConn, _ ...grpc.CallOption) error { r := reply.(*wrapperspb.StringValue) r.Value = "hello" ctx = t.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) t.statsHandler.HandleRPC(ctx, &stats.InPayload{Client: true, Length: len(r.Value)}) t.statsHandler.HandleRPC(ctx, &stats.OutPayload{Client: true, Length: len(r.Value)}) t.statsHandler.HandleRPC(ctx, &stats.End{Client: true, Error: t.invokeResult}) return t.invokeResult } func newTestCC(t *testing.T) *grpc.ClientConn { testCC, err := grpc.Dial("dns:localhost:9999", grpc.WithInsecure()) if err != nil { t.Fatalf("Failed to create testCC: %s", err) } return testCC } func Test_GRPCClientInterceptor(t *testing.T) { interceptor := GRPCClientInterceptor("test_service") tests := []struct { name string method string errorCode error wantgrpcClientResponseSize string wantgrpcClientRequestDuration string wantgrpcClientRequestCount string wantgrpcClientRequestSize string }{ { name: "ok authorize", method: "/authorize.Authorizer/Authorize", errorCode: nil, wantgrpcClientResponseSize: "{ { {grpc_client_status OK}{grpc_method Authorize}{grpc_service authorize.Authorizer}{host dns:localhost:9999}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", wantgrpcClientRequestDuration: "{ { {grpc_client_status OK}{grpc_method Authorize}{grpc_service authorize.Authorizer}{host dns:localhost:9999}{service test_service} }", wantgrpcClientRequestCount: "{ { {grpc_client_status OK}{grpc_method Authorize}{grpc_service authorize.Authorizer}{host dns:localhost:9999}{service test_service} }", wantgrpcClientRequestSize: "{ { {grpc_client_status OK}{grpc_method Authorize}{grpc_service authorize.Authorizer}{host dns:localhost:9999}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", }, { name: "unknown validate", method: "/authenticate.Authenticator/Validate", errorCode: status.Error(14, ""), wantgrpcClientResponseSize: "{ { {grpc_client_status UNAVAILABLE}{grpc_method Validate}{grpc_service authenticate.Authenticator}{host dns:localhost:9999}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", wantgrpcClientRequestDuration: "{ { {grpc_client_status UNAVAILABLE}{grpc_method Validate}{grpc_service authenticate.Authenticator}{host dns:localhost:9999}{service test_service} }", wantgrpcClientRequestCount: "{ { {grpc_client_status UNAVAILABLE}{grpc_method Validate}{grpc_service authenticate.Authenticator}{host dns:localhost:9999}{service test_service} }", wantgrpcClientRequestSize: "{ { {grpc_client_status UNAVAILABLE}{grpc_method Validate}{grpc_service authenticate.Authenticator}{host dns:localhost:9999}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", }, { name: "broken method parsing", method: "f", errorCode: status.Error(14, ""), wantgrpcClientResponseSize: "{ { {grpc_client_status UNAVAILABLE}{host dns:localhost:9999}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", wantgrpcClientRequestDuration: "{ { {grpc_client_status UNAVAILABLE}{host dns:localhost:9999}{service test_service} }", wantgrpcClientRequestCount: "{ { {grpc_client_status UNAVAILABLE}{host dns:localhost:9999}{service test_service} }", wantgrpcClientRequestSize: "{ { {grpc_client_status UNAVAILABLE}{host dns:localhost:9999}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { view.Unregister(GRPCClientViews...) view.Register(GRPCClientViews...) invoker := testInvoker{ invokeResult: tt.errorCode, statsHandler: &ocgrpc.ClientHandler{}, } var reply wrapperspb.StringValue interceptor(context.Background(), tt.method, nil, &reply, newTestCC(t), invoker.UnaryInvoke) testDataRetrieval(GRPCClientResponseSizeView, t, tt.wantgrpcClientResponseSize) testDataRetrieval(GRPCClientRequestDurationView, t, tt.wantgrpcClientRequestDuration) testDataRetrieval(GRPCClientRequestCountView, t, tt.wantgrpcClientRequestCount) testDataRetrieval(GRPCClientRequestSizeView, t, tt.wantgrpcClientRequestSize) }) } } func mockServerRPCHandle(metricsHandler *GRPCServerMetricsHandler, method string, errorCode error) { message := "hello" ctx := statsHandler.TagRPC(context.Background(), &stats.RPCTagInfo{FullMethodName: method}) ctx = metricsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) statsHandler.HandleRPC(ctx, &stats.InPayload{Client: false, Length: len(message)}) statsHandler.HandleRPC(ctx, &stats.OutPayload{Client: false, Length: len(message)}) statsHandler.HandleRPC(ctx, &stats.End{Client: false, Error: errorCode}) } func Test_GRPCServerMetricsHandler(t *testing.T) { tests := []struct { name string method string errorCode error wantgrpcServerResponseSize string wantgrpcServerRequestDuration string wantgrpcServerRequestCount string wantgrpcServerRequestSizeView string }{ { name: "ok authorize", method: "/authorize.Authorizer/Authorize", errorCode: nil, wantgrpcServerResponseSize: "{ { {grpc_method Authorize}{grpc_server_status OK}{grpc_service authorize.Authorizer}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", wantgrpcServerRequestDuration: "{ { {grpc_method Authorize}{grpc_server_status OK}{grpc_service authorize.Authorizer}{service test_service} }", wantgrpcServerRequestCount: "{ { {grpc_method Authorize}{grpc_server_status OK}{grpc_service authorize.Authorizer}{service test_service} }", wantgrpcServerRequestSizeView: "{ { {grpc_method Authorize}{grpc_server_status OK}{grpc_service authorize.Authorizer}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", }, { name: "unknown validate", method: "/authenticate.Authenticator/Validate", errorCode: status.Error(14, ""), wantgrpcServerResponseSize: "{ { {grpc_method Validate}{grpc_server_status UNAVAILABLE}{grpc_service authenticate.Authenticator}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", wantgrpcServerRequestDuration: "{ { {grpc_method Validate}{grpc_server_status UNAVAILABLE}{grpc_service authenticate.Authenticator}{service test_service} }", wantgrpcServerRequestCount: "{ { {grpc_method Validate}{grpc_server_status UNAVAILABLE}{grpc_service authenticate.Authenticator}{service test_service} }", wantgrpcServerRequestSizeView: "{ { {grpc_method Validate}{grpc_server_status UNAVAILABLE}{grpc_service authenticate.Authenticator}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", }, { name: "broken method parsing", method: "f", errorCode: status.Error(14, ""), wantgrpcServerResponseSize: "{ { {grpc_server_status UNAVAILABLE}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", wantgrpcServerRequestDuration: "{ { {grpc_server_status UNAVAILABLE}{service test_service} }", wantgrpcServerRequestCount: "{ { {grpc_server_status UNAVAILABLE}{service test_service} }", wantgrpcServerRequestSizeView: "{ { {grpc_server_status UNAVAILABLE}{service test_service} }&{1 5 5 5 0 [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { view.Unregister(GRPCServerViews...) view.Register(GRPCServerViews...) metricsHandler := NewGRPCServerMetricsHandler("test_service") mockServerRPCHandle(metricsHandler, tt.method, tt.errorCode) testDataRetrieval(GRPCServerResponseSizeView, t, tt.wantgrpcServerResponseSize) testDataRetrieval(GRPCServerRequestDurationView, t, tt.wantgrpcServerRequestDuration) testDataRetrieval(GRPCServerRequestCountView, t, tt.wantgrpcServerRequestCount) testDataRetrieval(GRPCServerRequestSizeView, t, tt.wantgrpcServerRequestSizeView) }) } }
package ssubnetting import ( "fmt" "os" "strconv" ) // Lee y transforma la configuración del subneteo desde la línea de comandos. // @return (ip, mask, host requirements, sort, flo, subtr, fok) func CaptureData() ([4]int, int, []int, string, bool, int, int, bool) { var ( ip [4]int hostsReq []int fSubtr int fsubtrerr error fAdd int fadderr error ) fIp, _ := GetFlagValue("-ip") strmasks, _ := GetFlagValue("-mask") fMask, err := strconv.Atoi(strmasks) fReq, _ := GetFlagValue("-req") fSort, fse := GetFlagValue("-sort") //fse : Flag Sort Exists. _, flo := GetFlagValue("-lo") //flo : Flag leftover. strsubtr, fste := GetFlagValue("-subtr") //fste: Flag Subtract Exist. stradd, fadde := GetFlagValue("-add") //fadde: Flag Add Exist. if fste { fSubtr, fsubtrerr = strconv.Atoi(strsubtr) if fsubtrerr != nil { subtrErrMsg := "el valor '%s' del parámetro -subtr no es válido por lo cual se omitirá.\n" fmt.Fprintf(os.Stderr, subtrErrMsg, strsubtr) fste = false } } if fadde { fAdd, fadderr = strconv.Atoi(stradd) if fadderr != nil { subtrErrMsg := "el valor '%s' del parámetro -add no es válido por lo cual se omitirá.\n" fmt.Fprintf(os.Stderr, subtrErrMsg, stradd) fadde = false } } if fadde && fste { msg := "Los flags -subtr y -add no se pueden usar conjuntamente. Se ignoraran." fmt.Fprintln(os.Stderr, msg) fadde = false fste = false fSubtr = 0 fAdd = 0 } if fse && fSort == ""{ fSort = "desc" } _ip, fok := StrToSeqOfInt(fIp, ".") if(fok) { for i := 0; i < 4; i++ { ip[i] = _ip[i] } } else { fmt.Fprintln(os.Stderr, "Falló al parsear la IP.") return ip, fMask, hostsReq, fSort, flo, fSubtr, fAdd, false } hostsReq, fok = StrToSeqOfInt(fReq, " ") if !fok && !(fste || fadde) { fmt.Fprintln(os.Stderr, "Falló al parsear los requerimeintos.") return ip, fMask, hostsReq, fSort, flo, fSubtr, fAdd, false } if err != nil && !(fste || fadde) { fMask = 32 fmt.Fprintln(os.Stderr, "Falló al convertir la máscara a entero.") return ip, fMask, hostsReq, fSort, flo, fSubtr, fAdd, false } return ip, fMask, hostsReq, fSort, flo, fSubtr, fAdd, true } // Imprime una dirección en formato Dot Decimal Nonation. func PrintDDN(a [4]int) { l := len(a) if(0 < l) { fmt.Print(a[0]) } for i := 1; i < l; i++ { fmt.Printf(".%d", a[i]) } } // Despliega una red con formato: // [nombre red]: [red] func DisplayNet(sn [4]int, message string) { fmt.Printf("%s: ", message) PrintDDN(sn) fmt.Println() } //Imprie los detalles del subneteo. func PrintSubnetting(sn []Subnet, flo bool, leftoverAddr [4]int, leftoverHosts int) { fmt.Println("Subneteo:") fmt.Println("-----------------------------------------") l := len(sn) for i := 0; i < l; i++ { fmt.Printf("Subred [%d]:\n", i) DisplayNet(sn[i].Id, "ID de red") DisplayNet(sn[i].Broadcast, "Broadcast de red") DisplayNet(sn[i].FirstU, "Primera dirección usable") DisplayNet(sn[i].LastU, "Última dirección usable") fmt.Printf("Máscara de subred (Decimal): %d\n", sn[i].DecMask) DisplayNet(sn[i].DDNMask, "Máscara de subred (DDN)") fmt.Printf("Direcciones disponibles: %d\n", sn[i].HostsAvailable) fmt.Println("-----------------------------------------") } if flo { if leftoverHosts != 0 { DisplayNet(leftoverAddr, "Dirección de inicio del bloque sobrante") } fmt.Printf("Direcciones sobrantes: %d\n", leftoverHosts) fmt.Println("-----------------------------------------") } }
// Package simple implements a Transformer that supports basic replacement based transformations package simple import ( "image/color" //nolint:misspell // I dont control others' package names "strings" "awesome-dragon.science/go/goGoGameBot/pkg/format/transformer/intermediate" "awesome-dragon.science/go/goGoGameBot/pkg/format/transformer/tokeniser" ) // Conf Holds a replacemap and a colourmap in a format that's simple to store in XML type Conf struct { ReplaceMap struct { Bold string Italic string Underline string Strikethrough string Reset string } `xml:"replace_map" comment:"Replace the listed formatting codes with the given string"` ColourMap []struct { R uint8 `toml:"red"` G uint8 `toml:"green"` B uint8 `toml:"blue"` Mapped string } `toml:"map_colour" comment:"maps the given RGB colour to a string"` } // MakeMaps creates a replace and colour map based on the given config func (s *Conf) MakeMaps() (replaceMap map[rune]string, colourMap map[color.Color]string) { replaceMap = map[rune]string{ intermediate.Bold: s.ReplaceMap.Bold, intermediate.Italic: s.ReplaceMap.Italic, intermediate.Underline: s.ReplaceMap.Underline, intermediate.Strikethrough: s.ReplaceMap.Strikethrough, intermediate.Reset: s.ReplaceMap.Reset, } colourMap = make(map[color.Color]string) for _, cc := range s.ColourMap { c := color.RGBA{ R: cc.R, G: cc.G, B: cc.B, A: 0xFF, } colourMap[c] = cc.Mapped } return replaceMap, colourMap } // Transformer is a Transformer implementation that does basic replacement based transformation. // Colours are handled by way of a palette and a map to transform colours in that palette to the transformer specific // format type Transformer struct { rplMap map[rune]string palette color.Palette colMap map[color.Color]string replacer *strings.Replacer } // New constructs a Transformer from the given args. A colour palette will be automatically // created from the colour map passed. func New(replaceMap map[rune]string, colourMap map[color.Color]string) *Transformer { var palette color.Palette for col := range colourMap { palette = append(palette, col) } var repl []string for k, v := range replaceMap { if v == "" { continue } repl = append(repl, v, intermediate.SentinelString+string(k)) } for col, v := range colourMap { repl = append(repl, v, tokeniser.EmitColour(col)) } repl = append(repl, intermediate.SentinelString, intermediate.SSentinelString) return &Transformer{ rplMap: replaceMap, palette: palette, colMap: colourMap, replacer: strings.NewReplacer(repl...), // the repl slice is reversed from the map* maps, this way it does an inverse } } // Transform implements the Transformer interface. Applies the simple conversions setup in the constructor func (s *Transformer) Transform(in string) string { return tokeniser.Map(in, s.rplMap, s.colourFn) } func (s *Transformer) colourFn(in color.Color) string { if s.palette == nil || len(s.palette) == 0 { return "" } return s.colMap[s.palette.Convert(in)] } // MakeIntermediate uses a simple replace operation to convert from a transformer specific implementation to the // intermediate format func (s *Transformer) MakeIntermediate(in string) string { return s.replacer.Replace(in) }
/* Go Language Raspberry Pi Interface (c) Copyright David Thorpe 2016-2018 All Rights Reserved Documentation http://djthorpe.github.io/gopi/ For Licensing and Usage information, please see LICENSE.md */ // Interacts with the BME680 sensor package main import ( "os" // Frameworks "github.com/djthorpe/gopi" // Modules _ "github.com/djthorpe/gopi/sys/logger" _ "github.com/djthorpe/sensors/sys/bme680" ) //////////////////////////////////////////////////////////////////////////////// // MAIN FUNCTION func Main(app *gopi.AppInstance, done chan<- struct{}) error { return nil } //////////////////////////////////////////////////////////////////////////////// // BOOTSTRAP func main() { // Create the configuration config := gopi.NewAppConfig(MODULE_NAME) // Run the command line tool os.Exit(gopi.CommandLineTool(config, Main)) }
package petstore type Tag struct { Id int64 `json:"id,omitempty"` Name string `json:"name,omitempty"` }
package sail_perf import "testing" func TestInit(t *testing.T) { scores := New(10,10) if len(scores.samples) != 10 { t.Errorf("Length of samples is not 10.") } if cap(scores.samples) != 10 { t.Errorf("Capacity of samples is not 10.") } for _, item := range scores.samples { if item != nil { t.Errorf("Found an initialized sub-slice.") } } }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package operator // WithSource is an interface that can be used to set the source of an operator. type WithSource[T any] interface { SetSource(channel DataChannel[T]) } // WithSink is an interface that can be used to set the sink of an operator. type WithSink[T any] interface { SetSink(channel DataChannel[T]) } // Compose sets the sink of op1 and the source of op2. func Compose[T any](op1 WithSink[T], op2 WithSource[T]) { ch := NewSimpleDataChannel(make(chan T)) op1.SetSink(ch) op2.SetSource(ch) } // DataChannel is a channel that can be used to transfer data between operators. type DataChannel[T any] interface { Channel() chan T Finish() } // SimpleDataChannel is a simple implementation of DataChannel. type SimpleDataChannel[T any] struct { channel chan T } // NewSimpleDataChannel creates a new SimpleDataChannel. func NewSimpleDataChannel[T any](ch chan T) *SimpleDataChannel[T] { return &SimpleDataChannel[T]{channel: ch} } // Channel returns the underlying channel of the SimpleDataChannel. func (s *SimpleDataChannel[T]) Channel() chan T { return s.channel } // Finish closes the underlying channel of the SimpleDataChannel. func (s *SimpleDataChannel[T]) Finish() { close(s.channel) }
package login import ( "encoding/json" "fmt" "github.com/xeha-gmbh/homelab/proxmox/common" "github.com/xeha-gmbh/homelab/shared" "net/http" "net/url" ) // Arguments for the 'proxmox login' command type ProxmoxLoginRequest struct { shared.ExtraArgs Username string Password string Realm string ApiServer string Force bool } // Performs a login using the parameters supplied. This method only performs a new login attempt // when ProxmoxLoginRequest#Force is set to true, or a ticket cache cannot be found or used. func (pl *ProxmoxLoginRequest) Login() (*common.ProxmoxSubject, bool, error) { if cachedSubject, err := common.ReadSubjectFromCache(); err != nil || pl.Force { s, e := pl.doLogin() return s, true, e } else { output.Info("Ticket exists in cache.", map[string]interface{}{}) return cachedSubject, false, nil } } // Performs a real login attempt. This method returns error when // 1) HTTP request returns error // 2) Proxmox returns status 401 (special case) // 3) Proxmox returns other non-200 status // 4) Response body cannot be decoded properly // Otherwise, it returns a nil error and a ProxmoxSubject func (pl *ProxmoxLoginRequest) doLogin() (*common.ProxmoxSubject, error) { var ( err error resp *http.Response client = common.HttpClient() form = url.Values{} ) form.Add("username", pl.Username) form.Add("password", pl.Password) form.Add("realm", pl.Realm) resp, err = client.PostForm(loginUrl(pl.ApiServer), form) if err != nil { return nil, common.ProxmoxError(err) } defer resp.Body.Close() output.Debug("Login request return status code {{index .code}}", map[string]interface{}{ "event": "login_response", "code": resp.StatusCode, "status": resp.Status, }) if resp.StatusCode == http.StatusUnauthorized { return nil, ErrAuth } if resp.StatusCode != http.StatusOK { return nil, ErrAuth } respData := make(map[string]interface{}) err = json.NewDecoder(resp.Body).Decode(&respData) if err != nil { return nil, shared.ErrParse } subject := &common.ProxmoxSubject{ Username: respData["data"].(map[string]interface{})["username"].(string), CSRFToken: respData["data"].(map[string]interface{})["CSRFPreventionToken"].(string), Ticket: respData["data"].(map[string]interface{})["ticket"].(string), ApiServer: pl.ApiServer, } output.Info("User {{index .user}} is now logged in.", map[string]interface{}{ "event": "login_success", "user": subject.Username, }) return subject, nil } // Returns the ticket API url for the given Proxmox host. func loginUrl(base string) string { return fmt.Sprintf("%s/api2/json/access/ticket", base) }
// Package mwgrs implements Image Region Metadata as defined by the // Metadata Working Group (MWG). The ExifTool docs contain a good // description of the schema: // // https://exiftool.org/TagNames/MWG.html#Regions package mwgrs import ( "fmt" "trimmer.io/go-xmp/xmp" ) var ( NsMwgRs = xmp.NewNamespace("mwg-rs", "http://www.metadataworkinggroup.com/schemas/regions/", NewModel) ) func init() { xmp.Register(NsMwgRs, xmp.XmpMetadata) } func NewModel(name string) xmp.Model { return &Regions{} } func MakeModel(d *xmp.Document) (*Regions, error) { m, err := d.MakeModel(NsMwgRs) if err != nil { return nil, err } x, _ := m.(*Regions) return x, nil } func FindModel(d *xmp.Document) *Regions { if m := d.FindModel(NsMwgRs); m != nil { return m.(*Regions) } return nil } type Regions struct { Regions RegionInfo `xmp:"mwg-rs:Regions"` } func (x Regions) Can(nsName string) bool { return NsMwgRs.GetName() == nsName } func (x Regions) Namespaces() xmp.NamespaceList { return xmp.NamespaceList{NsMwgRs} } func (x *Regions) SyncModel(d *xmp.Document) error { return nil } func (x *Regions) SyncFromXMP(d *xmp.Document) error { return nil } func (x Regions) SyncToXMP(d *xmp.Document) error { return nil } func (x *Regions) CanTag(tag string) bool { _, err := xmp.GetNativeField(x, tag) return err == nil } func (x *Regions) GetTag(tag string) (string, error) { if v, err := xmp.GetNativeField(x, tag); err != nil { return "", fmt.Errorf("%s: %v", NsMwgRs.GetName(), err) } else { return v, nil } } func (x *Regions) SetTag(tag, value string) error { if err := xmp.SetNativeField(x, tag, value); err != nil { return fmt.Errorf("%s: %v", NsMwgRs.GetName(), err) } return nil }
package main import ( "math/rand" "time" ) type IndividualConfig struct { GeneLength int FitnessCalc FitnessCalcBase } type Individual struct { genes []bool fitness int fitnessCalc FitnessCalcBase } func NewIndividual(c IndividualConfig) *Individual{ individual := Individual{ genes: make([]bool, c.GeneLength), fitness: 0, fitnessCalc: c.FitnessCalc, } return &individual } func (i *Individual) GetSize() int { return len(i.genes) } func (i *Individual) GenerateIndividual() { rand.Seed(time.Now().UTC().UnixNano()) for index, _ := range i.genes { i.genes[index] = rand.Intn(2) == 0 } } func (i *Individual) GetGene(index int) bool{ if index < len(i.genes) { return i.genes[index] } else { return false } } func (i *Individual) SetGene(index int, value bool) { if index < len(i.genes) { i.genes[index] = value } } func (i *Individual) GetFitness() int { if i.fitness == 0 { i.fitness = i.fitnessCalc.GetFitness(i) } return i.fitness } func (i *Individual) ToString() string { str := "" for _, gene := range i.genes { if gene { str += "1" } else { str += "0" } } return str }
package vm import ( "github.com/xeha-gmbh/homelab/shared" "github.com/spf13/cobra" "os" ) var ( output shared.MessagePrinter ) func NewProxmoxVMCommand() *cobra.Command { cmd := &cobra.Command{ Use: "vm", Short: "manage proxmox virtual machine", } cmd.AddCommand(NewProxmoxVMCreateCommand()) return cmd } func NewProxmoxVMCreateCommand() *cobra.Command { cmd := &cobra.Command{ Use: "create", Short: "create proxmox virtual machine", } for _, arch := range ArchetypeRepository().AllArchetypes() { subCmd := &cobra.Command{ Use: arch.Use(), Short: arch.Short(), Long: arch.Long(), PreRunE: func(cmd *cobra.Command, args []string) error { cmd.SetOutput(os.Stdout) if err := cmd.ParseFlags(args); err != nil { return err } return nil }, RunE: func(cmd *cobra.Command, args []string) error { if err := arch.CreateVM(); err != nil { return err } return nil }, } arch.BindFlags(subCmd) for _, requiredFlag := range arch.RequiredFlags() { subCmd.MarkPersistentFlagRequired(requiredFlag) subCmd.MarkFlagRequired(requiredFlag) } cmd.AddCommand(subCmd) } return cmd }
package main import "os" func main() { a := App{} a.Initialize( os.Getenv("DEV_DB_USERNAME"), os.Getenv("DEV_DB_PASSWORD"), os.Getenv("DEV_DB_NAME")) a.Run(":8080") }
package main import ( "context" "errors" "fmt" "net/http" "os" "strings" "cloud.google.com/go/firestore" firebase "firebase.google.com/go" "github.com/bwmarrin/discordgo" "google.golang.org/api/option" ) const ( collection = "stream" docID = "stream-key" ) func dochieURL(url string) error { opt := option.WithCredentialsFile("./dochie-firebase-sdk.json") ctx := context.Background() app, err := firebase.NewApp(ctx, nil, opt) if err != nil { return err } client, err := app.Firestore(ctx) if err != nil { return err } defer client.Close() update := []firestore.Update{ firestore.Update{ Path: "url", Value: url, }, } _, err = client.Collection(collection).Doc(docID).Update(ctx, update) if err != nil { return err } return nil } func dochieHandler(s *discordgo.Session, m *discordgo.MessageCreate) { if m.Author.ID == s.State.User.ID { return } url := m.Content if !strings.HasPrefix(url, "https://") { s.ChannelMessageSend(m.ChannelID, "Please for stream URL") return } else { res, err := http.Get(url) if err != nil { fmt.Fprint(os.Stderr, err) return } if res.StatusCode != 200 { fmt.Fprint(os.Stderr, errors.New("bad url")) return } } if err := dochieURL(url); err != nil { fmt.Fprint(os.Stderr, err) } else { s.ChannelMessageSend(m.ChannelID, "Done!") } } func main() { dg, err := discordgo.New("Bot " + os.Getenv("DISCORD_BOT_TOKEN")) if err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } dg.AddHandler(dochieHandler) if err := dg.Open(); err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } defer dg.Close() <-make(chan struct{}) }
package db import ( "errors" "log" "os" "path/filepath" "runtime" "github.com/vmlellis/imersao/codepix-go/domain/model" "github.com/joho/godotenv" "gorm.io/driver/postgres" "gorm.io/driver/sqlite" "gorm.io/gorm" "gorm.io/gorm/logger" ) func init() { _, b, _, _ := runtime.Caller(0) basepath := filepath.Dir(b) err := godotenv.Load(basepath + "/../../.env") if err != nil { log.Fatalf("Error loading .env files") } } func ConnectDB(env string) *gorm.DB { var dsn, dbType string var db *gorm.DB var err error config := &gorm.Config{} if os.Getenv("debug") == "true" { config.Logger = logger.Default.LogMode(logger.Info) } if env != "test" { dsn = os.Getenv("dsn") dbType = os.Getenv("dbType") } else { dsn = os.Getenv("dsnTest") dbType = os.Getenv("dbType") } if dbType == "postgres" { db, err = gorm.Open(postgres.Open(dsn), config) } else if dbType == "postgres" { db, err = gorm.Open(sqlite.Open(dsn), config) } else { err = errors.New("db type not suported") } if err != nil { log.Fatalf("Error connecting to database: %v", err) panic(err) } if os.Getenv("AutoMigrateDb") == "true" { currentLogger := db.Config.Logger db.Config.Logger = logger.Default.LogMode(logger.Error) db.AutoMigrate(&model.Bank{}, &model.Account{}, &model.PixKey{}, &model.Transaction{}) db.Config.Logger = currentLogger } return db }
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. // https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2 package postdata // Response holds the response body struct for the package postdata // // https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/ml/post_data/MlPostJobDataResponse.ts#L23-L41 type Response struct { BucketCount int64 `json:"bucket_count"` EarliestRecordTimestamp int64 `json:"earliest_record_timestamp"` EmptyBucketCount int64 `json:"empty_bucket_count"` InputBytes int64 `json:"input_bytes"` InputFieldCount int64 `json:"input_field_count"` InputRecordCount int64 `json:"input_record_count"` InvalidDateCount int64 `json:"invalid_date_count"` JobId string `json:"job_id"` LastDataTime int `json:"last_data_time"` LatestRecordTimestamp int64 `json:"latest_record_timestamp"` MissingFieldCount int64 `json:"missing_field_count"` OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` ProcessedFieldCount int64 `json:"processed_field_count"` ProcessedRecordCount int64 `json:"processed_record_count"` SparseBucketCount int64 `json:"sparse_bucket_count"` } // NewResponse returns a Response func NewResponse() *Response { r := &Response{} return r }
// Package greeting provides welcome package greeting // HelloWorld welcomes you to the world func HelloWorld() string { return "Hello, World!" }
package trello type Config struct { APIKey string Token string Board string UserID string Lists *Lists Labels *Labels Debug bool }
package parser import ( "io" "testing" "github.com/lalamove/konfig" "github.com/stretchr/testify/require" ) func TestParserFunc(t *testing.T) { var ran bool var f = Func(func(r io.Reader, s konfig.Values) error { ran = true return nil }) f.Parse(nil, nil) require.True(t, ran) } func TestNopParser(t *testing.T) { var p = NopParser{} require.Nil(t, p.Parse(nil, nil)) }
package design_pattern_in_go import "testing" func TestNewUser(t *testing.T) { user, err := NewUser("1", "da", WithAge(20), WithEmail("100231")) if err != nil { t.Log(err) } t.Log(user) }
package models import ( _ "github.com/lib/pq" "database/sql" "encoding/json" "log" "fmt" ) type Datastore interface { AllPlayers() ([]*Player, error) } type DB struct { *sql.DB } func (db *DB) QueryD(qs string) (*sql.Rows, error) { return db.Query(qs) } func NewDB(dataSourceName string) (*DB, error) { db, err := sql.Open("postgres", dataSourceName) db.SetMaxIdleConns(10) db.SetMaxOpenConns(10) if err != nil { log.Fatal(err) return nil, err } err = db.Ping() if err != nil { log.Fatal(err) return nil, err } fmt.Println("Db connection opened.") return &DB{db}, nil } type JsonNullInt64 struct { sql.NullInt64 } func (v JsonNullInt64) MarshalJSON() ([]byte, error) { if v.Valid { return json.Marshal(v.Int64) } else { return json.Marshal(nil) } } func (v *JsonNullInt64) UnmarshalJSON(data []byte) error { // Unmarshalling into a pointer will let us detect null var x *int64 if err := json.Unmarshal(data, &x); err != nil { return err } if x != nil { v.Valid = true v.Int64 = *x } else { v.Valid = false } return nil } type JsonNullString struct { sql.NullString } func (v JsonNullString) MarshalJSON() ([]byte, error) { if v.Valid { return json.Marshal(v.String) } else { return json.Marshal(nil) } } func (v *JsonNullString) UnmarshalJSON(data []byte) error { // Unmarshalling into a pointer will let us detect null var x *string if err := json.Unmarshal(data, &x); err != nil { return err } if x != nil { v.Valid = true v.String = *x } else { v.Valid = false } return nil }
package main import ( "fmt" "github.com/achakravarty/30daysofgo/day15" ) func main() { var count int fmt.Scanf("%d", &count) var num int fmt.Scanf("%d\n", &num) node := &day15.Node{} node = node.NewNode(num) for i := 1; i < count; i++ { fmt.Scanf("%d\n", &num) node.Insert(num) } fmt.Println(node.Display()) }
package models import ( "github.com/astaxie/beego/orm" "time" ) // TableName 设置OctConf表名 func (a *OtcConf) TableName() string { return OtcConfTBName() } // OtcConfQueryParam 用于查询的类 type OtcConfQueryParam struct { BaseQueryParam Phone string `json:"phone"` //手机号 模糊查询 StartTime string `json:"startTime"` //开始时间 EndTime string `json:"endTime"` //截止时间 } // OtcConf 实体类 type OtcConf struct { Id int `orm:"pk;column(id)"json:"id"form:"id"` //委托单最大交易额度 OrdersMaxQuota float64 `orm:"column(orders_max_quota)"json:"ordersMaxQuota"form:"ordersMaxQuota"` //委托单最小交易额度 OrdersMinQuota float64 `orm:"column(orders_min_quota)"json:"ordersMinQuota"form:"ordersMinQuota"` //买方手续费 BuyerCost float64 `orm:"column(buyer_cost)"json:"buyerCost"form:"buyerCost"` //卖方手续费 SellerCost float64 `orm:"column(seller_cost)"json:"sellerCost"form:"sellerCost"` //取消次数 CancelNumber int `orm:"column(cancel_number)"json:"cancelNumber"form:"cancelNumber"` //买方超时时间 BuyerOvertime int `orm:"column(buyer_overtime)"json:"buyerOvertime"form:"buyerOvertime"` //卖方超时时间 SellerOvertime int `orm:"column(seller_overtime)"json:"sellerOvertime"form:"sellerOvertime"` //卖家申诉超时时间[卖家发起申诉] AppealOvertime int `orm:"column(appeal_overtime)"json:"appealOvertime"form:"appealOvertime"` //买家申诉超时时间 VendeeAppealOvertime int `orm:"column(vendee_appeal_overtime)"json:"vendeeAppealOvertime"form:"vendeeAppealOvertime"` //委托挂单时间 OrderEntryTime int `orm:"column(order_entry_time)"json:"orderEntryTime"form:"orderEntryTime"` //描述说明 Msg string `orm:"column(msg)"json:"msg"form:"msg"` //交易规则 Content string `orm:"column(content)"json:"content"form:"content"` //修改人uid UserId int `orm:"column(user_id)"json:"userId"form:"userId"` //创建时间 CreateTime time.Time `orm:"auto_now_add;type(datetime);column(create_time)"json:"createTime"form:"createTime"` } func (a *OtcConfBak) TableName() string { return OtcConfBakTBName() } //副表 只做记录 type OtcConfBak struct { Id int `orm:"pk;column(id)"json:"id"form:"id"` //委托单最大交易额度 OrdersMaxQuota float64 `orm:"column(orders_max_quota)"json:"ordersMaxQuota"form:"ordersMaxQuota"` //委托单最小交易额度 OrdersMinQuota float64 `orm:"column(orders_min_quota)"json:"ordersMinQuota"form:"ordersMinQuota"` //买方手续费 BuyerCost float32 `orm:"column(buyer_cost)"json:"buyerCost"form:"buyerCost"` //卖方手续费 SellerCost float32 `orm:"column(seller_cost)"json:"sellerCost"form:"sellerCost"` //取消次数 CancelNumber int `orm:"column(cancel_number)"json:"cancelNumber"form:"cancelNumber"` //买方超时时间 BuyerOvertime int `orm:"column(buyer_overtime)"json:"buyerOvertime"form:"buyerOvertime"` //卖方超时时间 SellerOvertime int `orm:"column(seller_overtime)"json:"sellerOvertime"form:"sellerOvertime"` //卖家申诉超时时间 AppealOvertime int `orm:"column(appeal_overtime)"json:"appealOvertime"form:"appealOvertime"` //买家申诉超时时间 VendeeAppealOvertime int `orm:"column(vendee_appeal_overtime)"json:"vendeeAppealOvertime"form:"vendeeAppealOvertime"` //委托挂单时间 OrderEntryTime int `orm:"column(order_entry_time)"json:"orderEntryTime"form:"orderEntryTime"` //描述说明 Msg string `orm:"column(msg)"json:"msg"form:"msg"` //交易规则 Content string `orm:"column(content)"json:"content"form:"content"` //修改人uid UserId int `orm:"column(user_id)"json:"userId"form:"userId"` //创建时间 CreateTime time.Time `orm:"auto_now_add;type(datetime);column(create_time)"json:"createTime"form:"createTime"` } // OtcConfPageList 获取分页数据 func OtcConfPageList(params *OtcConfQueryParam) ([]*OtcConf, int64) { o := orm.NewOrm() query := o.QueryTable(OtcConfBakTBName()) data := make([]*OtcConf, 0) //默认排序 sortorder := "id" switch params.Sort { case "id": sortorder = "id" } if params.Order == "desc" { sortorder = "-" + sortorder } total, _ := query.Count() if total > 0 { query.OrderBy(sortorder).Limit(params.Limit, (params.Offset-1)*params.Limit).All(&data) } return data, total } //获取最后一条数据 func OtcConfGetLastOne() OtcConf { o := orm.NewOrm() query := o.QueryTable(OtcConfTBName()) var obj OtcConf query.OrderBy("-id").One(&obj) return obj }
package config type ServerConfig struct{ RpcListenEndPoint map[string]string RethinkDbEndPoint map[string]string RethinkDbName map[string]string AddressTrxDbPath map[string]string SupportCoinType map[string]string SourceDataHost map[string]string SourceDataPort map[string]string PosgresqlConfig map[string]interface{} SafeBlock map[string]int } var RpcServerConfig = ServerConfig{RpcListenEndPoint:map[string]string{"ETH":"0.0.0.0:5444","ETH_TEST":"0.0.0.0:5445"}, SourceDataHost:map[string]string{"ETH":"http://192.168.1.122","ETH_TEST":"http://192.168.1.164"}, SourceDataPort:map[string]string{"ETH":"8588","ETH_TEST":"28000"}, PosgresqlConfig: map[string]interface{}{"ETH": map[string]string{"host":"localhost", "port":"5432","user": "postgres","password":"12345678","dbname":"eth_db"},"ETH_TEST": map[string]string{"host":"localhost", "port":"5432","user": "postgres","password":"12345678","dbname":"eth_test_db"}}, SupportCoinType:map[string]string{"ETH":"","ETH_TEST":""}, SafeBlock:map[string]int{"ETH":6,"ETH_TEST":1}}
package conf import ( coreinformers "k8s.io/client-go/informers/core/v1" restclient "k8s.io/client-go/rest" clientSet "github.com/gxthrj/apisix-ingress-types/pkg/client/clientset/versioned" seven "github.com/gxthrj/seven/conf" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/kubernetes" "k8s.io/client-go/informers" "os" "path/filepath" "io/ioutil" "fmt" "github.com/tidwall/gjson" "runtime" ) var ( ENV string basePath string ADMIN_URL = os.Getenv("APISIX_ADMIN_INTERNAL") HOSTNAME = os.Getenv("HOSTNAME") LOCAL_ADMIN_URL = "" podInformer coreinformers.PodInformer svcInformer coreinformers.ServiceInformer nsInformer coreinformers.NamespaceInformer EndpointsInformer coreinformers.EndpointsInformer IsLeader = false //etcdClient client.Client kubeClient kubernetes.Interface CoreSharedInformerFactory informers.SharedInformerFactory ) const PROD = "prod" const HBPROD = "hb-prod" const BETA = "beta" const DEV = "dev" const TEST = "test" const LOCAL = "local" const confPath = "/root/ingress-controller/conf.json" const AispeechUpstreamKey = "/apisix/customer/upstream/map" func setEnvironment() { if env := os.Getenv("ENV"); env == "" { ENV = LOCAL } else { ENV = env } _, basePath, _, _ = runtime.Caller(1) } func ConfPath() string { if ENV == LOCAL { return filepath.Join(filepath.Dir(basePath), "conf.json") } else { return confPath } } type etcdConfig struct { Addresses []string } var EtcdConfig etcdConfig var K8sAuth k8sAuth var Syslog syslog var config *restclient.Config func init() { // 获取当前环境 setEnvironment() // 获取配置文件路径 filePath := ConfPath() // 获取配置文件内容 if configurationContent, err := ioutil.ReadFile(filePath); err != nil { panic(fmt.Sprintf("failed to read configuration file: %s", filePath)) } else { configuration := gjson.ParseBytes(configurationContent) // apisix baseUrl apisixConf := configuration.Get("conf.apisix") apisixBaseUrl := apisixConf.Get("base_url").String() seven.SetBaseUrl(apisixBaseUrl) // k8sAuth conf k8sAuthConf := configuration.Get("conf.k8sAuth") K8sAuth.file = k8sAuthConf.Get("file").String() // syslog conf syslogConf := configuration.Get("conf.syslog") Syslog.Host = syslogConf.Get("host").String() } // init etcd client //etcdClient = NewEtcdClient() // init informer InitInformer() } type k8sAuth struct { file string } type syslog struct { Host string } //func GetEtcdAPI() client.KeysAPI{ // return client.NewKeysAPI(etcdClient) //} func GetURL() string{ if ADMIN_URL != "" { return ADMIN_URL } else { return "http://172.16.20.90:30116/apisix/admin" } } func GetPodInformer() coreinformers.PodInformer{ return podInformer } func GetSvcInformer() coreinformers.ServiceInformer{ return svcInformer } func GetNsInformer() coreinformers.NamespaceInformer{ return nsInformer } func GetKubeClient() kubernetes.Interface{ return kubeClient } func InitKubeClient() kubernetes.Interface { //var err error //if ENV == LOCAL { // clientConfig, err := clientcmd.LoadFromFile(K8sAuth.file) // ExceptNilErr(err) // // config, err = clientcmd.NewDefaultClientConfig(*clientConfig, &clientcmd.ConfigOverrides{}).ClientConfig() // ExceptNilErr(err) //} else { // config, err = restclient.InClusterConfig() // ExceptNilErr(err) //} k8sClient, err := kubernetes.NewForConfig(config) ExceptNilErr(err) return k8sClient } func InitApisixClient() clientSet.Interface{ apisixRouteClientset, err:= clientSet.NewForConfig(config) ExceptNilErr(err) return apisixRouteClientset } func InitInformer() { // 生成一个k8s client //var config *restclient.Config var err error if ENV == LOCAL { clientConfig, err := clientcmd.LoadFromFile(K8sAuth.file) ExceptNilErr(err) config, err = clientcmd.NewDefaultClientConfig(*clientConfig, &clientcmd.ConfigOverrides{}).ClientConfig() ExceptNilErr(err) } else { config, err = restclient.InClusterConfig() ExceptNilErr(err) } //k8sClient, err := kubernetes.NewForConfig(config) kubeClient = InitKubeClient() ExceptNilErr(err) // 创建一个informerFactory //sharedInformerFactory := informers.NewSharedInformerFactory(k8sClient, 0) // 创建一个informerFactory CoreSharedInformerFactory = informers.NewSharedInformerFactory(kubeClient, 0) // 创建 informers podInformer = CoreSharedInformerFactory.Core().V1().Pods() svcInformer = CoreSharedInformerFactory.Core().V1().Services() nsInformer = CoreSharedInformerFactory.Core().V1().Namespaces() //return podInformer, svcInformer, nsInformer } func ExceptNilErr(err error) { if err != nil { panic(err) } } //func NewEtcdClient() client.Client { // cfg := client.Config{ // Endpoints: EtcdConfig.Addresses, // Transport: client.DefaultTransport, // } // if c, err := client.New(cfg); err != nil { // panic(fmt.Sprintf("failed to initialize etcd watcher. %s", err.Error())) // } else { // return c // } //} // EtcdWatcher //type EtcdWatcher struct { // client client.Client // etcdKey string // ctx context.Context // cancels []context.CancelFunc //} // // //type BalancerRules struct { // RuleSpec *RuleSpec `json:"spec"` //} // //type RuleSpec struct { // Ewma []string `json:"ewma"` // Sllb []Sllb `json:"sllb"` //} // //type Sllb struct { // Name string `json:"name"` // Threshold int64 `json:"threshold"` // Open string `json:"open"` // MakeZero string `json:"makeZero"` //} // //type BalancerLevel struct { // LevelSpec *LevelSpec `json:"spec"` //} // //type LevelSpec struct { // Pod []string `json:"pod"` //}
/* In many table-top games it is common to use different dice to simulate random events. A “d” or “D” is used to indicate a die with a specific number of faces, d4 indicating a four-sided die, for example. If several dice of the same type are to be rolled, this is indicated by a leading number specifying the number of dice. Hence, 2d6 means the player should roll two six-sided dice and sum the result face values. Task Write a program to compute the most likely outcomes for the sum of two dice rolls. Assume each die has numbered faces starting at 1 and that each face has equal roll probability. Input The input consists of a single line with two integer numbers, N,M, specifying the number of faces of the two dice. Constraints 4≤N,M≤20 Number of faces. Output A line with the most likely outcome for the sum; in case of several outcomes with the same probability, they must be listed from lowest to highest value in separate lines. */ package main import ( "fmt" "reflect" ) func main() { test(6, 6, []int{7}) test(6, 4, []int{5, 6, 7}) test(12, 20, []int{13, 14, 15, 16, 17, 18, 19, 20, 21}) } func assert(x bool) { if !x { panic("assertion failed") } } func test(n, m int, r []int) { p := likelist(n, m) fmt.Println(p) assert(reflect.DeepEqual(p, r)) } func likelist(n, m int) []int { r := []int{} p := make([]int, (n+1)*(m+1)) c := 0 for i := 1; i <= n; i++ { for j := 1; j <= m; j++ { k := i + j if p[k]++; p[k] > c { c = k r = r[:0] } if p[k] == c { r = append(r, k) } } } return r }
package main import "fmt" func main() { // 支付比较, 只支持 == 或 != , 比较是不是每一个元素都一样 // 2个数组比较, 类型要一样 a := [5]int{1, 2, 3, 4, 5} b := [5]int{1, 2, 3, 4, 5} c := [5]int{1, 2, 3} fmt.Println("a == b ? ", a == b) // true fmt.Println("a == c ? ", a == c) // false var d [5]int d = a fmt.Println("d = ", d) // d = [1 2 3 4 5] }
package service import ( "fmt" ovirtsdk "github.com/ovirt/go-ovirt" ) func diskAttachmentByVmAndDisk(connection *ovirtsdk.Connection, vmId string, diskId string) (*ovirtsdk.DiskAttachment, error) { vmService := connection.SystemService().VmsService().VmService(vmId) attachments, err := vmService.DiskAttachmentsService().List().Send() if err != nil { return nil, err } for _, attachment := range attachments.MustAttachments().Slice() { if diskId == attachment.MustDisk().MustId() { return attachment, nil } } return nil, fmt.Errorf("failed to find attachment by disk %s for VM %s", diskId, vmId) }
package usecases type ResultMap map[string]error // SweepAcceptedStories returns a map of the branches it attempted to delete and an error if that branch was unable to be deleted func SweepAcceptedStories(repo Repository, tracker Tracker) ResultMap { branchErrors := make(map[string]error) branchNames := repo.GetAllBranchNames() for _, branchName := range branchNames { story, _ := GetStoryByBranchName(branchName, tracker) if story != nil && story.State == "accepted" { branchErrors[branchName] = repo.DeleteBranch(branchName) } } return branchErrors }
package log_parser import ( "bytes" ) type FullErrText struct { Text *bytes.Buffer complete bool } func (p *FullErrText) String() string { return p.Text.String() } func NewFullError() *FullErrText { return &FullErrText{bytes.NewBuffer([]byte{}), false} } func (fe *FullErrText) addNewLine() { fe.Text.WriteString("\n") } func (fe *FullErrText) addText(str []byte) { fe.Text.Write(str) } func (fe *FullErrText) addLine(str []byte) { fe.addText(str) fe.addNewLine() }
package model import ( "github.com/corbym/gogiven/generator" ) type testResults struct { ID string `json:"id"` Failed bool `json:"failed"` Skipped bool `json:"skipped"` TestOutput string `json:"test_output"` } //newTestResults is internal and creates a new json data object for marshalling test data func newTestResults(data generator.TestResult) testResults { return testResults{Failed: data.Failed, TestOutput: data.TestOutput, ID: data.TestID, Skipped: data.Skipped, } }
package models import ( "context" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" ) func CheckExist(ctx context.Context, collection *mongo.Collection, key string, value interface{}) (bool, error) { findRes := collection.FindOne(ctx, bson.D{ { Key: key, Value: value, }, }) err := findRes.Err() if err == nil { var test interface{} err := findRes.Decode(&test) if err == nil { return true, nil } else if err == mongo.ErrNoDocuments { return false, nil } else { return false, err } } else if err == mongo.ErrNoDocuments { return false, nil } else { return false, err } }
/* # -*- coding: utf-8 -*- # @Author : joker # @Time : 2021/6/18 9:51 上午 # @File : lt_数字范围按位与.go # @Description : # @Attention : */ package v2 func rangeBitwiseAnd(left int, right int) int { // 按位与的关键: 全为1 的时候,才会为1 // 并且需要查看规律: 根据规律得出 ,最终会得到一个公共前缀 return 0 }
package santa import ( "github.com/maprost/application/example/max/profile" "github.com/maprost/application/generator/genmodel" ) func New() genmodel.JobPosition { return genmodel.JobPosition{ Title: "Santa Clause", ProfessionalSkills: []genmodel.SkillID{profile.TechSkillWrapping, profile.TechSkillSneaking, profile.TechSkillClimbing}, } }
package model import ( "Blog/util" "Blog/util/errmsg" "github.com/jinzhu/gorm" ) type Category struct { ID uint `json:"id,omitempty"` Name string `gorm:"type:varchar(20);not null" json:"name,omitempty"` } // 查询类别是否存在 func ExistsCategory(c *Category) errmsg.Code { var cate Category db.Select("id").Where("name = ?", c.Name).First(&cate) if cate.ID > 0 { return errmsg.ERROR_USERNAME_USED } return errmsg.SUCCESS } // 新增类别 func InsertCategory(c *Category) errmsg.Code { if err := db.Create(&c).Error; err != nil { return errmsg.ERROR } return errmsg.SUCCESS } // 查询类别表 func GetCategorysList(pageSize int, pageNum int) ([]Category, errmsg.Code) { var categorys []Category // limit: 指定需要多少条; offset:指定从哪一条开始 err := db.Limit(pageSize).Offset((pageNum - 1) * pageSize).Find(&categorys).Error if err != nil && err != gorm.ErrRecordNotFound { return nil, errmsg.ERROR } return categorys, errmsg.SUCCESS } // 编辑类别信息 func EditCategory(c *Category) errmsg.Code { dataMap := util.Struct2Map(*c) logger.Debug("Edit category:", dataMap) err := db.Model(c).Updates(dataMap).Error if err != nil { return errmsg.ERROR } return errmsg.SUCCESS } // 删除类别 func DeleteCategory(id int) errmsg.Code { err = db.Delete(&Category{}, id).Error if err != nil { return errmsg.ERROR } return errmsg.SUCCESS }
package glog import ( "fmt" "math/rand" "testing" "github.com/onsi/gomega" ) func TestSession(t *testing.T) { g := gomega.NewGomegaWithT(t) ClearBackends() backendName := "session" backend := NewListBackend("", Debug) SetBackend(backendName, backend) // Verify that the list is initially empty g.Expect(len(backend.Get(Debug))).To(gomega.Equal(0)) levels := make([]LogLevel, 0) counts := make(map[LogLevel]int) for k := range logLevelMap { levels = append(levels, k) counts[k] = 0 } N := 32 for index := 0; index < N; index++ { // Generate a message for a random level r := rand.Intn(len(levels)) level := levels[r] message := fmt.Sprintf("Message at %s level.", level) Logf(level, message) sessionContent := backend.Get(Debug) // Ensure the session contains the expected number of records g.Expect(len(sessionContent)).To(gomega.Equal(index + 1)) // Verify content of most-recent record last := sessionContent[len(sessionContent)-1] g.Expect(last.Level).To(gomega.Equal(level)) g.Expect(last.Message).To(gomega.Equal(message)) } backend.Clear() // Verify that the list is empty after clearing. g.Expect(len(backend.Get(Debug))).To(gomega.Equal(0)) }