text
stringlengths
11
4.05M
package main import "testing" type tuple struct { n, s int } func TestSumint(t *testing.T) { for k, v := range map[tuple]int{ tuple{3, 0}: 3, tuple{23, 3}: 26, tuple{321, 26}: 347, tuple{21, 0}: 21, tuple{14, 21}: 35, tuple{35, 7}: 42} { if r := sumint(k.n, k.s); r != v { t.Errorf("failed: sumint %d %d is %d, got %d", k.n, k.s, v, r) } } } func sumint(n, s int) int { return n + s }
package service import ( //"github.com/astaxie/beego" "webserver/common" //"webserver/controllers/hservice" "sort" "time" "tripod/timekit" "webserver/models" "webserver/models/maccount" "webserver/models/mservice" ) type CheckOfflineController struct { User *maccount.User CheckType int CheckValue int serviceType int toSave bool sortKeys []string sortMap map[string]interface{} } func (c *CheckOfflineController) CheckOffline() { c.initParam() if c.CheckType == common.Community { c.GetOfferDinnerInfo() c.GetHaveDinnerInfo() c.GetMarketGOInfo() c.GetMarketBuyInfo() c.GetHomeCourseInfo() c.GetWantCourseInfo() c.GetHouseKeepInfo() c.GetHouseRequestInfo() } c.GetPetCareInfo() c.GetPetTransferInfo() c.GetSkillInfo() c.GetLendOutInfo() c.GetBorrowInfo() c.GetGoodSaleInfo() c.GetWantBuyInfo() c.GetDoAgencyInfo() c.GetOtherInfo() c.saveRecords() } func (c *CheckOfflineController) initParam() { c.sortKeys = make([]string, 0, 10) c.sortMap = make(map[string]interface{}) } func (c *CheckOfflineController) setRecord(key time.Time, value interface{}) { timeKey := timekit.GetLocalTime(key).Format(timekit.FormatTime) c.sortKeys = append(c.sortKeys, timeKey) c.sortMap[timeKey] = value } func (c *CheckOfflineController) saveRecords() { sort.Sort(sort.StringSlice(c.sortKeys)) for _, key := range c.sortKeys { //beego.BeeLogger.Debug("saveRecords %v", c.sortMap[key]) models.SaveRecord(c.sortMap[key]) } } func (c *CheckOfflineController) doOffline(record *mservice.ServiceRecord) error { if record.Status == models.STATUS_OFFLINE || record.Status == models.STATUS_DELETED { return nil } //beego.BeeLogger.Debug("doOffline before %v", record) if c.CheckType == common.Community && record.CommunityId > 0 { record.CommunityId = c.CheckValue c.toSave = true } else if c.CheckType == common.Officebuilding && record.OfficebuildingId > 0 { record.OfficebuildingId = c.CheckValue c.toSave = true } else if c.CheckType == common.School && record.SchoolId > 0 { record.SchoolId = c.CheckValue c.toSave = true } else if c.CheckType == common.Hometown && record.HometownId > 0 { record.HometownId = c.CheckValue c.toSave = true } //beego.BeeLogger.Debug("doOffline after %v", record) return nil } /* func (c *CheckOfflineController) handleService(serviceType, serviceId int) error { handler := &hservice.HandleService{} handler.ServiceId = serviceId handler.ServiceType = serviceType handler.Handle = hservice.SERVICE_TO_OFFLINE handler.ServiceHandle() if handler.Err != nil { return handler.Err } if err := maccount.UnfreezeUserAccount(c.User.Id, handler.OfflineFreeze); err != nil { beego.BeeLogger.Error("UnfreezeUserAccount %v, Error:%v", c.User.Id, err) return err } return nil } */ func (c *CheckOfflineController) GetOfferDinnerInfo() { var records []mservice.ServiceOfferDinner models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetHaveDinnerInfo() { var records []mservice.ServiceHaveDinner models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetMarketGOInfo() { var records []mservice.ServiceMarket models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetMarketBuyInfo() { var records []mservice.ServiceMarketbuy models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetPetCareInfo() { var records []mservice.ServicePetcare models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetPetTransferInfo() { var records []mservice.ServicePettransfer models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetSkillInfo() { var records []mservice.ServiceSkill models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetHomeCourseInfo() { var records []mservice.ServiceCourse models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetWantCourseInfo() { var records []mservice.ServiceWantcourse models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetLendOutInfo() { var records []mservice.ServiceLendout models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetBorrowInfo() { var records []mservice.ServiceBorrow models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetGoodSaleInfo() { var records []mservice.ServiceGoodsale models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetWantBuyInfo() { var records []mservice.ServiceWantbuy models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetDoAgencyInfo() { var records []mservice.ServiceAgency models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetHouseKeepInfo() { var records []mservice.ServiceHousekeep models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetHouseRequestInfo() { var records []mservice.ServiceHouserequest models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } } func (c *CheckOfflineController) GetOtherInfo() { var records []mservice.ServiceOther models.FindRecordsByUserId(&records, c.User.Id) for _, record := range records { c.doOffline(&record.ServiceRecord) if c.toSave { c.setRecord(record.UpdatedAt, &record) } } }
// Copyright (c) 2020-2021 KHS Films // // This file is a part of mtproto package. // See https://github.com/xelaj/mtproto/blob/master/LICENSE for details package ige import ( "testing" "github.com/stretchr/testify/assert" ) func TestCipher_isCorrectData(t *testing.T) { tests := []struct { name string data []byte wantErr assert.ErrorAssertionFunc }{ { name: "good_one", data: Hexed("0000000000000000000000000000000000000000000000000000000000000000"), wantErr: assert.NoError, }, { name: "smaller_than_want", data: Hexed("0000"), wantErr: assert.Error, }, { name: "not_divisible_by_blocks", data: Hexed("0000000000000000000000000000000000000000000000000000000000"), wantErr: assert.Error, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { wantErr := tt.wantErr if wantErr == nil { wantErr = assert.NoError } err := isCorrectData(tt.data) wantErr(t, err) }) } }
// Copyright 2018 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package importccl import ( "bytes" "context" "encoding/hex" "fmt" "io/ioutil" "os" "path/filepath" "reflect" "strings" "testing" "time" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catformat" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/kr/pretty" mysql "vitess.io/vitess/go/vt/sqlparser" ) func TestMysqldumpDataReader(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) files := getMysqldumpTestdata(t) ctx := context.Background() table := descForTable(ctx, t, `CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea)`, 100, 200, NoFKs) tables := map[string]*execinfrapb.ReadImportDataSpec_ImportTable{"simple": {Desc: table.TableDesc()}} opts := roachpb.MysqldumpOptions{} kvCh := make(chan row.KVBatch, 50) // When creating a new dump reader, we need to pass in the walltime that will be used as // a parameter used for generating unique rowid, random, and gen_random_uuid as default // expressions. Here, the parameter doesn't matter so we pass in 0. converter, err := newMysqldumpReader(ctx, kvCh, 0 /*walltime*/, tables, testEvalCtx, opts) if err != nil { t.Fatal(err) } var res []tree.Datums converter.debugRow = func(row tree.Datums) { res = append(res, append(tree.Datums{}, row...)) } in, err := os.Open(files.simple) if err != nil { t.Fatal(err) } defer in.Close() wrapped := &fileReader{Reader: in, counter: byteCounter{r: in}} if err := converter.readFile(ctx, wrapped, 1, 0, nil); err != nil { t.Fatal(err) } close(kvCh) if expected, actual := len(simpleTestRows), len(res); expected != actual { t.Fatalf("expected %d rows, got %d: %v", expected, actual, res) } for i, expected := range simpleTestRows { row := res[i] if actual := *row[0].(*tree.DInt); expected.i != int(actual) { t.Fatalf("row %d: expected i = %d, got %d", i, expected.i, actual) } if expected.s != injectNull { if actual := *row[1].(*tree.DString); expected.s != string(actual) { t.Fatalf("row %d: expected s = %q, got %q", i, expected.i, actual) } } else if row[1] != tree.DNull { t.Fatalf("row %d: expected b = NULL, got %T: %v", i, row[1], row[1]) } if expected.b != nil { if actual := []byte(*row[2].(*tree.DBytes)); !bytes.Equal(expected.b, actual) { t.Fatalf("row %d: expected b = %v, got %v", i, hex.EncodeToString(expected.b), hex.EncodeToString(actual)) } } else if row[2] != tree.DNull { t.Fatalf("row %d: expected b = NULL, got %T: %v", i, row[2], row[2]) } } } const expectedParent = 52 func readFile(t *testing.T, name string) string { body, err := ioutil.ReadFile(filepath.Join("testdata", "mysqldump", name)) if err != nil { t.Fatal(err) } return string(body) } func readMysqlCreateFrom( t *testing.T, path, name string, id descpb.ID, fks fkHandler, ) *descpb.TableDescriptor { t.Helper() f, err := os.Open(path) if err != nil { t.Fatal(err) } defer f.Close() walltime := testEvalCtx.StmtTimestamp.UnixNano() tbl, err := readMysqlCreateTable(context.Background(), f, testEvalCtx, nil, id, expectedParent, name, fks, map[descpb.ID]int64{}, security.RootUserName(), walltime) if err != nil { t.Fatal(err) } return tbl[len(tbl)-1].TableDesc() } func TestMysqldumpSchemaReader(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() files := getMysqldumpTestdata(t) simpleTable := descForTable(ctx, t, readFile(t, `simple.cockroach-schema.sql`), expectedParent, 52, NoFKs) referencedSimple := descForTable(ctx, t, readFile(t, `simple.cockroach-schema.sql`), expectedParent, 52, NoFKs) fks := fkHandler{ allowed: true, resolver: fkResolver{ tableNameToDesc: map[string]*tabledesc.Mutable{referencedSimple.Name: referencedSimple}, format: mysqlDumpFormat()}, } t.Run("simple", func(t *testing.T) { expected := simpleTable got := readMysqlCreateFrom(t, files.simple, "", 51, NoFKs) compareTables(t, expected.TableDesc(), got) }) t.Run("second", func(t *testing.T) { secondTable := descForTable(ctx, t, readFile(t, `second.cockroach-schema.sql`), expectedParent, 53, fks) expected := secondTable got := readMysqlCreateFrom(t, files.second, "", 53, fks) compareTables(t, expected.TableDesc(), got) }) t.Run("everything", func(t *testing.T) { expected := descForTable(ctx, t, readFile(t, `everything.cockroach-schema.sql`), expectedParent, 53, NoFKs) got := readMysqlCreateFrom(t, files.everything, "", 53, NoFKs) compareTables(t, expected.TableDesc(), got) }) t.Run("simple-in-multi", func(t *testing.T) { expected := simpleTable got := readMysqlCreateFrom(t, files.wholeDB, "simple", 51, NoFKs) compareTables(t, expected.TableDesc(), got) }) t.Run("third-in-multi", func(t *testing.T) { skip := fkHandler{allowed: true, skip: true, resolver: fkResolver{ tableNameToDesc: make(map[string]*tabledesc.Mutable), format: mysqlDumpFormat(), }} expected := descForTable(ctx, t, readFile(t, `third.cockroach-schema.sql`), expectedParent, 52, skip) got := readMysqlCreateFrom(t, files.wholeDB, "third", 51, skip) compareTables(t, expected.TableDesc(), got) }) } func compareTables(t *testing.T, expected, got *descpb.TableDescriptor) { colNames := func(cols []descpb.ColumnDescriptor) string { names := make([]string, len(cols)) for i := range cols { names[i] = cols[i].Name } return strings.Join(names, ", ") } idxNames := func(indexes []descpb.IndexDescriptor) string { names := make([]string, len(indexes)) for i := range indexes { names[i] = indexes[i].Name } return strings.Join(names, ", ") } // Attempt to verify the pieces individually, and return more helpful errors // if an individual column or index does not match. If the pieces look right // when compared individually, move on to compare the whole table desc as // rendered to a string via `%+v`, as a more comprehensive check. if expectedCols, gotCols := expected.Columns, got.Columns; len(gotCols) != len(expectedCols) { t.Fatalf("expected columns (%d):\n%v\ngot columns (%d):\n%v\n", len(expectedCols), colNames(expectedCols), len(gotCols), colNames(gotCols), ) } for i := range expected.Columns { e, g := expected.Columns[i].SQLStringNotHumanReadable(), got.Columns[i].SQLStringNotHumanReadable() if e != g { t.Fatalf("column %d (%q): expected\n%s\ngot\n%s\n", i, expected.Columns[i].Name, e, g) } } if expectedIdx, gotIdx := expected.Indexes, got.Indexes; len(expectedIdx) != len(gotIdx) { t.Fatalf("expected indexes (%d):\n%v\ngot indexes (%d):\n%v\n", len(expectedIdx), idxNames(expectedIdx), len(gotIdx), idxNames(gotIdx), ) } for i := range expected.Indexes { ctx := context.Background() semaCtx := tree.MakeSemaContext() tableName := &descpb.AnonymousTable expectedDesc := tabledesc.NewBuilder(expected).BuildImmutableTable() gotDesc := tabledesc.NewBuilder(got).BuildImmutableTable() e, err := catformat.IndexForDisplay( ctx, expectedDesc, tableName, &expected.Indexes[i], "" /* partition */, "" /* interleave */, &semaCtx, ) if err != nil { t.Fatalf("unexpected error: %s", err) } g, err := catformat.IndexForDisplay( ctx, gotDesc, tableName, &got.Indexes[i], "" /* partition */, "" /* interleave */, &semaCtx, ) if err != nil { t.Fatalf("unexpected error: %s", err) } if e != g { t.Fatalf("index %d: expected\n%s\ngot\n%s\n", i, e, g) } } // Our attempts to check parts individually (and return readable errors if // they didn't match) found nothing. expectedBytes, err := protoutil.Marshal(expected) if err != nil { t.Fatal(err) } gotBytes, err := protoutil.Marshal(got) if err != nil { t.Fatal(err) } if !bytes.Equal(expectedBytes, gotBytes) { t.Fatalf("expected\n%+v\n, got\n%+v\ndiff: %v", expected, got, pretty.Diff(expected, got)) } } func TestMysqlValueToDatum(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) date := func(s string) tree.Datum { d, _, err := tree.ParseDDate(nil, s) if err != nil { t.Fatal(err) } return d } ts := func(s string) tree.Datum { d, _, err := tree.ParseDTimestamp(nil, s, time.Microsecond) if err != nil { t.Fatal(err) } return d } tests := []struct { raw mysql.Expr typ *types.T want tree.Datum }{ {raw: mysql.NewStrLiteral([]byte("0000-00-00")), typ: types.Date, want: tree.DNull}, {raw: mysql.NewStrLiteral([]byte("2010-01-01")), typ: types.Date, want: date("2010-01-01")}, {raw: mysql.NewStrLiteral([]byte("0000-00-00 00:00:00")), typ: types.Timestamp, want: tree.DNull}, {raw: mysql.NewStrLiteral([]byte("2010-01-01 00:00:00")), typ: types.Timestamp, want: ts("2010-01-01 00:00:00")}, } evalContext := tree.NewTestingEvalContext(nil) for _, tc := range tests { t.Run(fmt.Sprintf("%v", tc.raw), func(t *testing.T) { got, err := mysqlValueToDatum(tc.raw, tc.typ, evalContext) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("got %v, want %v", got, tc.want) } }) } }
package datadogagent import ( "context" "testing" "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1/test" "github.com/DataDog/datadog-operator/pkg/kubernetes" assert "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" logf "sigs.k8s.io/controller-runtime/pkg/log" ) func Test_cleanupClusterRole(t *testing.T) { tests := []struct { name string dda *v1alpha1.DatadogAgent clusterRoleLabels map[string]string expectToBeDeleted bool }{ { name: "ClusterRole belongs to DatadogAgent", dda: test.NewDefaultedDatadogAgent("some_namespace", "some_name", nil), clusterRoleLabels: map[string]string{ kubernetes.AppKubernetesManageByLabelKey: "datadog-operator", kubernetes.AppKubernetesPartOfLabelKey: "some_namespace-some_name", }, expectToBeDeleted: true, }, { name: "ClusterRole does not belong to DatadogAgent (not managed by operator)", dda: test.NewDefaultedDatadogAgent("some_namespace", "some_name", nil), clusterRoleLabels: map[string]string{ kubernetes.AppKubernetesManageByLabelKey: "not-the-datadog-operator", kubernetes.AppKubernetesPartOfLabelKey: "some_namespace-some_name", }, expectToBeDeleted: false, }, { name: "ClusterRole does not belong to DatadogAgent (belongs to other)", dda: test.NewDefaultedDatadogAgent("some_namespace", "some_name", nil), clusterRoleLabels: map[string]string{ kubernetes.AppKubernetesManageByLabelKey: "datadog-operator", kubernetes.AppKubernetesPartOfLabelKey: "other-datadog-agent", }, expectToBeDeleted: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { clusterRole := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: getAgentRbacResourcesName(tt.dda), Labels: tt.clusterRoleLabels, }, } fakeClient := fake.NewClientBuilder().WithObjects(&clusterRole).Build() r := newReconcilerForRbacTests(fakeClient) _, err := r.cleanupClusterRole( logf.Log.WithName(tt.name), tt.dda, getAgentRbacResourcesName(tt.dda), ) assert.NoError(t, err) err = fakeClient.Get( context.TODO(), types.NamespacedName{Name: getAgentRbacResourcesName(tt.dda)}, &rbacv1.ClusterRole{}, ) if tt.expectToBeDeleted { assert.True(t, err != nil && apierrors.IsNotFound(err), "ClusterRole not deleted") } else { assert.NoError(t, err) } }) } } func Test_cleanupClusterRoleBinding(t *testing.T) { tests := []struct { name string dda *v1alpha1.DatadogAgent clusterRoleBindingLabels map[string]string expectToBeDeleted bool }{ { name: "ClusterRoleBinding belongs to DatadogAgent", dda: test.NewDefaultedDatadogAgent("some_namespace", "some_name", nil), clusterRoleBindingLabels: map[string]string{ kubernetes.AppKubernetesManageByLabelKey: "datadog-operator", kubernetes.AppKubernetesPartOfLabelKey: "some_namespace-some_name", }, expectToBeDeleted: true, }, { name: "ClusterRoleBinding does not belong to DatadogAgent (not managed by operator)", dda: test.NewDefaultedDatadogAgent("some_namespace", "some_name", nil), clusterRoleBindingLabels: map[string]string{ kubernetes.AppKubernetesManageByLabelKey: "not-the-datadog-operator", kubernetes.AppKubernetesPartOfLabelKey: "some_namespace-some_name", }, expectToBeDeleted: false, }, { name: "ClusterRoleBinding does not belong to DatadogAgent (belongs to other)", dda: test.NewDefaultedDatadogAgent("some_namespace", "some_name", nil), clusterRoleBindingLabels: map[string]string{ kubernetes.AppKubernetesManageByLabelKey: "datadog-operator", kubernetes.AppKubernetesPartOfLabelKey: "other-datadog-agent", }, expectToBeDeleted: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { clusterRoleBinding := rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: getAgentRbacResourcesName(tt.dda), Labels: tt.clusterRoleBindingLabels, }, } fakeClient := fake.NewClientBuilder().WithObjects(&clusterRoleBinding).Build() reconciler := newReconcilerForRbacTests(fakeClient) _, err := reconciler.cleanupClusterRoleBinding( logf.Log.WithName(tt.name), tt.dda, getAgentRbacResourcesName(tt.dda), ) assert.NoError(t, err) err = fakeClient.Get( context.TODO(), types.NamespacedName{Name: getAgentRbacResourcesName(tt.dda)}, &rbacv1.ClusterRoleBinding{}, ) if tt.expectToBeDeleted { assert.True(t, err != nil && apierrors.IsNotFound(err), "ClusterRoleBinding not deleted") } else { assert.NoError(t, err) } }) } } func newReconcilerForRbacTests(client client.Client) *Reconciler { reconcilerScheme := scheme.Scheme reconcilerScheme.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.ClusterRoleBinding{}, &rbacv1.ClusterRole{}) eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{}) return &Reconciler{ client: client, scheme: reconcilerScheme, recorder: recorder, } } func Test_checkSecretBackendMultipleProvidersUsed(t *testing.T) { tests := []struct { name string envVarList []corev1.EnvVar want bool }{ { name: "Secret backend multiple providers script is found", envVarList: []corev1.EnvVar{ { Name: "EnvVar1", Value: "Value1", }, { Name: "EnvVar2", Value: "Value2", }, { Name: "DD_SECRET_BACKEND_COMMAND", Value: "/readsecret_multiple_providers.sh", }, }, want: true, }, { name: "Script does not match", envVarList: []corev1.EnvVar{ { Name: "EnvVar1", Value: "Value1", }, { Name: "EnvVar2", Value: "Value2", }, { Name: "DD_SECRET_BACKEND_COMMAND", Value: "/readsecret.sh", }, }, want: false, }, { name: "EnvVar name does not match", envVarList: []corev1.EnvVar{ { Name: "EnvVar1", Value: "Value1", }, { Name: "EnvVar2", Value: "Value2", }, { Name: "DD_SECRET_BACKEND_COMMAND_D", Value: "/readsecret_multiple_providers.sh", }, }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := checkSecretBackendMultipleProvidersUsed(tt.envVarList) if result != tt.want { t.Errorf("checkEnvVarMatchesValue() result is %v but want %v", result, tt.want) } }) } }
package aws import ( "encoding/base64" "errors" "log" "reflect" "testing" "time" "github.com/NYTimes/gizmo/pubsub" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" "github.com/golang/protobuf/proto" ) func TestSubscriberNoBase64(t *testing.T) { test1 := "hey hey hey!" test2 := "ho ho ho!" test3 := "yessir!" test4 := "nope!" sqstest := &TestSQSAPI{ Messages: [][]*sqs.Message{ { { Body: &test1, ReceiptHandle: &test1, }, { Body: &test2, ReceiptHandle: &test2, }, }, { { Body: &test3, ReceiptHandle: &test3, }, { Body: &test4, ReceiptHandle: &test4, }, }, }, } fals := false cfg := SQSConfig{ConsumeBase64: &fals} defaultSQSConfig(&cfg) sub := &subscriber{ sqs: sqstest, cfg: cfg, toDelete: make(chan *deleteRequest), stop: make(chan chan error, 1), } queue := sub.Start() verifySQSSub(t, queue, sqstest, test1, 0) verifySQSSub(t, queue, sqstest, test2, 1) verifySQSSub(t, queue, sqstest, test3, 2) verifySQSSub(t, queue, sqstest, test4, 3) sub.Stop() } func TestSQSReceiveError(t *testing.T) { wantErr := errors.New("my sqs error") sqstest := &TestSQSAPI{ Err: wantErr, } fals := false cfg := SQSConfig{ConsumeBase64: &fals} defaultSQSConfig(&cfg) sub := &subscriber{ sqs: sqstest, cfg: cfg, toDelete: make(chan *deleteRequest), stop: make(chan chan error, 1), } queue := sub.Start() _, ok := <-queue if ok { t.Error("no message should've gotten to us, the channel should be closed") return } sub.Stop() if sub.Err() != wantErr { t.Errorf("expected subscriber to return error '%s'; got '%s'", wantErr, sub.Err()) } } func TestSQSDoneAfterStop(t *testing.T) { test := "it stopped??" sqstest := &TestSQSAPI{ Messages: [][]*sqs.Message{ { { Body: &test, ReceiptHandle: &test, }, }, }, } fals := false cfg := SQSConfig{ConsumeBase64: &fals} defaultSQSConfig(&cfg) sub := &subscriber{ sqs: sqstest, cfg: cfg, toDelete: make(chan *deleteRequest), stop: make(chan chan error, 1), } queue := sub.Start() // verify we can receive a message, stop and still mark the message as 'done' gotRaw := <-queue sub.Stop() gotRaw.Done() // do all the other normal verifications if len(sqstest.Deleted) != 1 { t.Errorf("subscriber expected %d deleted message, got: %d", 1, len(sqstest.Deleted)) } if *sqstest.Deleted[0].ReceiptHandle != test { t.Errorf("subscriber expected receipt handle of \"%s\" , got:+ \"%s\"", test, *sqstest.Deleted[0].ReceiptHandle) } } func TestExtendDoneTimeout(t *testing.T) { test := "some test" sqstest := &TestSQSAPI{ Messages: [][]*sqs.Message{ { { Body: &test, ReceiptHandle: &test, }, }, }, } fals := false cfg := SQSConfig{ConsumeBase64: &fals} defaultSQSConfig(&cfg) sub := &subscriber{ sqs: sqstest, cfg: cfg, toDelete: make(chan *deleteRequest), stop: make(chan chan error, 1), } queue := sub.Start() defer sub.Stop() gotRaw := <-queue gotRaw.ExtendDoneDeadline(time.Hour) if len(sqstest.Extended) != 1 { t.Errorf("subscriber expected %d extended message, got %d", 1, len(sqstest.Extended)) } if *sqstest.Extended[0].ReceiptHandle != test { t.Errorf("subscriber expected receipt handle of %q , got:+ %q", test, *sqstest.Extended[0].ReceiptHandle) } } func verifySQSSub(t *testing.T, queue <-chan pubsub.SubscriberMessage, testsqs *TestSQSAPI, want string, index int) { gotRaw := <-queue got := string(gotRaw.Message()) if got != want { t.Errorf("subscriber expected:\n%#v\ngot:\n%#v", want, got) } gotRaw.Done() if len(testsqs.Deleted) != (index + 1) { t.Errorf("subscriber expected %d deleted message, got: %d", index+1, len(testsqs.Deleted)) } if *testsqs.Deleted[index].ReceiptHandle != want { t.Errorf("subscriber expected receipt handle of \"%s\" , got: \"%s\"", want, *testsqs.Deleted[index].ReceiptHandle) } } func TestSubscriber(t *testing.T) { test1 := &TestProto{"hey hey hey!"} test2 := &TestProto{"ho ho ho!"} test3 := &TestProto{"yessir!"} test4 := &TestProto{"nope!"} sqstest := &TestSQSAPI{ Messages: [][]*sqs.Message{ { { Body: makeB64String(test1), ReceiptHandle: &test1.Value, }, { Body: makeB64String(test2), ReceiptHandle: &test2.Value, }, }, { { Body: makeB64String(test3), ReceiptHandle: &test3.Value, }, { Body: makeB64String(test4), ReceiptHandle: &test4.Value, }, }, }, } cfg := SQSConfig{} defaultSQSConfig(&cfg) sub := &subscriber{ sqs: sqstest, cfg: cfg, toDelete: make(chan *deleteRequest), stop: make(chan chan error, 1), } queue := sub.Start() verifySQSSubProto(t, queue, sqstest, test1, 0) verifySQSSubProto(t, queue, sqstest, test2, 1) verifySQSSubProto(t, queue, sqstest, test3, 2) verifySQSSubProto(t, queue, sqstest, test4, 3) sub.Stop() } func verifySQSSubProto(t *testing.T, queue <-chan pubsub.SubscriberMessage, testsqs *TestSQSAPI, want *TestProto, index int) { gotRaw := <-queue got := makeProto(gotRaw.Message()) if !reflect.DeepEqual(got, want) { t.Errorf("subscriber expected:\n%#v\ngot:\n%#v", want, got) } gotRaw.Done() if len(testsqs.Deleted) != (index + 1) { t.Errorf("subscriber expected %d deleted message, got: %d", index+1, len(testsqs.Deleted)) } if *testsqs.Deleted[index].ReceiptHandle != want.Value { t.Errorf("subscriber expected receipt handle of \"%s\" , got: \"%s\"", want.Value, *testsqs.Deleted[index].ReceiptHandle) } } func makeB64String(p proto.Message) *string { b, _ := proto.Marshal(p) s := base64.StdEncoding.EncodeToString(b) return &s } func makeProto(b []byte) *TestProto { t := &TestProto{} err := proto.Unmarshal(b, t) if err != nil { log.Fatalf("unable to unmarshal protobuf: %s", err) } return t } /* 500000 13969 ns/op 1494 B/op 31 allocs/op 1000000 14248 ns/op 1491 B/op 31 allocs/op 2000000 14138 ns/op 1489 B/op 31 allocs/op */ func BenchmarkSubscriber_Proto(b *testing.B) { test1 := &TestProto{"hey hey hey!"} sqstest := &TestSQSAPI{ Messages: [][]*sqs.Message{ { { Body: makeB64String(test1), ReceiptHandle: &test1.Value, }, }, }, } for i := 0; i < b.N/2; i++ { sqstest.Messages = append(sqstest.Messages, []*sqs.Message{ { Body: makeB64String(test1), ReceiptHandle: &test1.Value, }, { Body: makeB64String(test1), ReceiptHandle: &test1.Value, }, }) } cfg := SQSConfig{} defaultSQSConfig(&cfg) sub := &subscriber{ sqs: sqstest, cfg: cfg, toDelete: make(chan *deleteRequest), stop: make(chan chan error, 1), } queue := sub.Start() for i := 0; i < b.N; i++ { gotRaw := <-queue // get message, forcing base64 decode gotRaw.Message() // send delete message gotRaw.Done() } go sub.Stop() } type TestSQSAPI struct { Offset int Messages [][]*sqs.Message Deleted []*sqs.DeleteMessageBatchRequestEntry Extended []*sqs.ChangeMessageVisibilityInput Err error } var _ sqsiface.SQSAPI = &TestSQSAPI{} func (s *TestSQSAPI) ReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) { if s.Offset >= len(s.Messages) { return &sqs.ReceiveMessageOutput{}, s.Err } out := s.Messages[s.Offset] s.Offset++ return &sqs.ReceiveMessageOutput{Messages: out}, s.Err } func (s *TestSQSAPI) DeleteMessageBatch(i *sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) { s.Deleted = append(s.Deleted, i.Entries...) return nil, errNotImpl } func (s *TestSQSAPI) ChangeMessageVisibility(i *sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error) { s.Extended = append(s.Extended, i) return nil, nil } /////////// // ALL METHODS BELOW HERE ARE EMPTY AND JUST SATISFYING THE SQSAPI interface /////////// func (s *TestSQSAPI) DeleteMessage(d *sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) DeleteMessageWithContext(aws.Context, *sqs.DeleteMessageInput, ...request.Option) (*sqs.DeleteMessageOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) DeleteMessageBatchRequest(i *sqs.DeleteMessageBatchInput) (*request.Request, *sqs.DeleteMessageBatchOutput) { return nil, nil } func (s *TestSQSAPI) DeleteMessageBatchWithContext(aws.Context, *sqs.DeleteMessageBatchInput, ...request.Option) (*sqs.DeleteMessageBatchOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) AddPermissionRequest(*sqs.AddPermissionInput) (*request.Request, *sqs.AddPermissionOutput) { return nil, nil } func (s *TestSQSAPI) AddPermission(*sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) AddPermissionWithContext(aws.Context, *sqs.AddPermissionInput, ...request.Option) (*sqs.AddPermissionOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ChangeMessageVisibilityRequest(*sqs.ChangeMessageVisibilityInput) (*request.Request, *sqs.ChangeMessageVisibilityOutput) { return nil, nil } func (s *TestSQSAPI) ChangeMessageVisibilityWithContext(aws.Context, *sqs.ChangeMessageVisibilityInput, ...request.Option) (*sqs.ChangeMessageVisibilityOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ChangeMessageVisibilityBatchRequest(*sqs.ChangeMessageVisibilityBatchInput) (*request.Request, *sqs.ChangeMessageVisibilityBatchOutput) { return nil, nil } func (s *TestSQSAPI) ChangeMessageVisibilityBatch(*sqs.ChangeMessageVisibilityBatchInput) (*sqs.ChangeMessageVisibilityBatchOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ChangeMessageVisibilityBatchWithContext(aws.Context, *sqs.ChangeMessageVisibilityBatchInput, ...request.Option) (*sqs.ChangeMessageVisibilityBatchOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) CreateQueueRequest(*sqs.CreateQueueInput) (*request.Request, *sqs.CreateQueueOutput) { return nil, nil } func (s *TestSQSAPI) CreateQueue(*sqs.CreateQueueInput) (*sqs.CreateQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) CreateQueueWithContext(aws.Context, *sqs.CreateQueueInput, ...request.Option) (*sqs.CreateQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) DeleteMessageRequest(*sqs.DeleteMessageInput) (*request.Request, *sqs.DeleteMessageOutput) { return nil, nil } func (s *TestSQSAPI) DeleteQueueRequest(*sqs.DeleteQueueInput) (*request.Request, *sqs.DeleteQueueOutput) { return nil, nil } func (s *TestSQSAPI) DeleteQueue(*sqs.DeleteQueueInput) (*sqs.DeleteQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) DeleteQueueWithContext(aws.Context, *sqs.DeleteQueueInput, ...request.Option) (*sqs.DeleteQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) GetQueueAttributesRequest(*sqs.GetQueueAttributesInput) (*request.Request, *sqs.GetQueueAttributesOutput) { return nil, nil } func (s *TestSQSAPI) GetQueueAttributes(*sqs.GetQueueAttributesInput) (*sqs.GetQueueAttributesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) GetQueueAttributesWithContext(aws.Context, *sqs.GetQueueAttributesInput, ...request.Option) (*sqs.GetQueueAttributesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) GetQueueUrlRequest(*sqs.GetQueueUrlInput) (*request.Request, *sqs.GetQueueUrlOutput) { return nil, nil } func (s *TestSQSAPI) GetQueueUrl(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) GetQueueUrlWithContext(aws.Context, *sqs.GetQueueUrlInput, ...request.Option) (*sqs.GetQueueUrlOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput) { return nil, nil } func (s *TestSQSAPI) ListDeadLetterSourceQueues(*sqs.ListDeadLetterSourceQueuesInput) (*sqs.ListDeadLetterSourceQueuesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ListDeadLetterSourceQueuesWithContext(aws.Context, *sqs.ListDeadLetterSourceQueuesInput, ...request.Option) (*sqs.ListDeadLetterSourceQueuesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput) { return nil, nil } func (s *TestSQSAPI) ListQueues(*sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ListQueuesWithContext(aws.Context, *sqs.ListQueuesInput, ...request.Option) (*sqs.ListQueuesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput) { return nil, nil } func (s *TestSQSAPI) PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) PurgeQueueWithContext(aws.Context, *sqs.PurgeQueueInput, ...request.Option) (*sqs.PurgeQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ReceiveMessageRequest(*sqs.ReceiveMessageInput) (*request.Request, *sqs.ReceiveMessageOutput) { return nil, nil } func (s *TestSQSAPI) ReceiveMessageWithContext(aws.Context, *sqs.ReceiveMessageInput, ...request.Option) (*sqs.ReceiveMessageOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) RemovePermissionRequest(*sqs.RemovePermissionInput) (*request.Request, *sqs.RemovePermissionOutput) { return nil, nil } func (s *TestSQSAPI) RemovePermission(*sqs.RemovePermissionInput) (*sqs.RemovePermissionOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) RemovePermissionWithContext(aws.Context, *sqs.RemovePermissionInput, ...request.Option) (*sqs.RemovePermissionOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) SendMessageRequest(*sqs.SendMessageInput) (*request.Request, *sqs.SendMessageOutput) { return nil, nil } func (s *TestSQSAPI) SendMessage(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) SendMessageWithContext(aws.Context, *sqs.SendMessageInput, ...request.Option) (*sqs.SendMessageOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) SendMessageBatchRequest(*sqs.SendMessageBatchInput) (*request.Request, *sqs.SendMessageBatchOutput) { return nil, nil } func (s *TestSQSAPI) SendMessageBatch(*sqs.SendMessageBatchInput) (*sqs.SendMessageBatchOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) SendMessageBatchWithContext(aws.Context, *sqs.SendMessageBatchInput, ...request.Option) (*sqs.SendMessageBatchOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) SetQueueAttributesRequest(*sqs.SetQueueAttributesInput) (*request.Request, *sqs.SetQueueAttributesOutput) { return nil, nil } func (s *TestSQSAPI) SetQueueAttributes(*sqs.SetQueueAttributesInput) (*sqs.SetQueueAttributesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) SetQueueAttributesWithContext(aws.Context, *sqs.SetQueueAttributesInput, ...request.Option) (*sqs.SetQueueAttributesOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ListQueueTags(input *sqs.ListQueueTagsInput) (*sqs.ListQueueTagsOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) ListQueueTagsRequest(input *sqs.ListQueueTagsInput) (req *request.Request, output *sqs.ListQueueTagsOutput) { return nil, nil } func (s *TestSQSAPI) ListQueueTagsWithContext(ctx aws.Context, input *sqs.ListQueueTagsInput, opts ...request.Option) (*sqs.ListQueueTagsOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) TagQueue(input *sqs.TagQueueInput) (*sqs.TagQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) TagQueueRequest(input *sqs.TagQueueInput) (req *request.Request, output *sqs.TagQueueOutput) { return nil, nil } func (s *TestSQSAPI) TagQueueWithContext(ctx aws.Context, input *sqs.TagQueueInput, opts ...request.Option) (*sqs.TagQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) UntagQueue(input *sqs.UntagQueueInput) (*sqs.UntagQueueOutput, error) { return nil, errNotImpl } func (s *TestSQSAPI) UntagQueueRequest(input *sqs.UntagQueueInput) (req *request.Request, output *sqs.UntagQueueOutput) { return nil, nil } func (s *TestSQSAPI) UntagQueueWithContext(ctx aws.Context, input *sqs.UntagQueueInput, opts ...request.Option) (*sqs.UntagQueueOutput, error) { return nil, errNotImpl }
package model type User struct { Id uint `gorm:"primary_key;auto_increment" json:"id"` Name string `json:"name"` Password string `json:"password"` Email string `json:"email"` Status int `json:"status"` } func (u *User) TableName() string { return "user_info" }
package main import ( "flag" "log" "net" "github.com/cloudnoize/dig/dnsmsg" ) func main() { doamin := flag.String("d", "google.com", "domain") flag.Parse() udpaddr, err := net.ResolveUDPAddr("udp", "8.8.8.8:53") if err != nil { log.Fatal(err) } /* socket(AF_INET, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, IPPROTO_IP) = 3 - create sokcet datagram return file desc setsockopt(3, SOL_SOCKET, SO_BROADCAST, [1], 4) = 0 - set options on socket connect(3, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("8.8.8.8")}, 16) = 0 - only binds the addr, does not make any conneciton. epoll_create1(EPOLL_CLOEXEC) = 4 epoll_ctl(4, EPOLL_CTL_ADD, 3, {EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLET, {u32=4208557832, u64=140423869333256}}) = 0 getsockname(3, {sa_family=AF_INET, sin_port=htons(40335), sin_addr=inet_addr("172.17.0.3")}, [112->16]) = 0 getpeername(3, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("8.8.8.8")}, [112->16]) = 0 */ c, err := net.DialUDP("udp", nil, udpaddr) if err != nil { log.Fatal(err) } defer c.Close() dq := dnsmsg.NewDnsQuery(*doamin) buf := dq.Serialize() log.Printf("% x\n", buf) n, err := c.Write(buf) if err != nil { log.Fatalf(err.Error()) } log.Printf("Wrote %d bytes\n", n) var res [2056]byte n, err = c.Read(res[:]) if err != nil { log.Fatalf(err.Error()) } log.Printf("% x\n", res[:n]) r := dnsmsg.NewDnsRes(res[:n]) //https://osqa-ask.wireshark.org/questions/50806/help-understanding-dns-packet-data log.Println(r) }
package stor import ( "github.com/oceanho/gw" "github.com/oceanho/gw/contrib/apps/stor/api" ) func init() { } type App struct { } func New() App { return App{} } func (a App) Name() string { return "gw.stor" } func (a App) Router() string { return "stor" } func (a App) Register(router *gw.RouterGroup) { router.GET("object/create", api.CreateObject) router.POST("object/modify", api.ModifyObject) } func (a App) Migrate(state *gw.ServerState) { // db := store.GetDbStore() } func (a App) Use(opt *gw.ServerOption) { } func (a App) OnStart(state *gw.ServerState) { } func (a App) OnShutDown(state *gw.ServerState) { }
package podlogstream import ( "context" "fmt" "sync" "time" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/jonboulle/clockwork" "github.com/tilt-dev/tilt/internal/controllers/indexer" "github.com/tilt-dev/tilt/internal/k8s" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" ) var podGVK = schema.GroupVersionKind{Version: "v1", Kind: "Pod"} var nsGVK = schema.GroupVersionKind{Version: "v1", Kind: "Namespace"} // Helper struct that captures Pod changes and queues up a Reconcile() // call for any PodLogStream watching that pod. type PodSource struct { ctx context.Context indexer *indexer.Indexer kClient k8s.Client handler handler.EventHandler q workqueue.RateLimitingInterface clock clockwork.Clock watchesByNamespace map[string]*podWatch mu sync.Mutex } type podWatch struct { ctx context.Context cancel func() namespace string // Only populated if ctx.Err() != nil (the context has been cancelled) finishedAt time.Time error error } var _ source.Source = &PodSource{} func NewPodSource(ctx context.Context, kClient k8s.Client, scheme *runtime.Scheme, clock clockwork.Clock) *PodSource { return &PodSource{ ctx: ctx, indexer: indexer.NewIndexer(scheme, indexPodLogStreamForKubernetes), kClient: kClient, watchesByNamespace: make(map[string]*podWatch), clock: clock, } } func (s *PodSource) Start(ctx context.Context, handler handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { s.mu.Lock() defer s.mu.Unlock() s.q = q s.handler = handler return nil } func (s *PodSource) TearDown() { s.mu.Lock() defer s.mu.Unlock() for k, pw := range s.watchesByNamespace { pw.cancel() delete(s.watchesByNamespace, k) } } // Register the pods for this stream. // // Set up any watches we need. func (s *PodSource) handleReconcileRequest(ctx context.Context, name types.NamespacedName, pls *PodLogStream) error { s.mu.Lock() defer s.mu.Unlock() s.indexer.OnReconcile(name, pls) var err error ns := pls.Spec.Namespace if ns != "" { pw, ok := s.watchesByNamespace[ns] if !ok { ctx, cancel := context.WithCancel(ctx) pw = &podWatch{ctx: ctx, cancel: cancel, namespace: ns} s.watchesByNamespace[ns] = pw go s.doWatch(pw) } if pw.ctx.Err() != nil { err = pw.ctx.Err() if pw.error != nil { err = pw.error } } } return err } // Process pod events and make sure they trigger a reconcile. func (s *PodSource) doWatch(pw *podWatch) { defer func() { // If the watch wasn't cancelled and there's no other error, // record a generic error. if pw.error == nil && pw.ctx.Err() == nil { pw.error = fmt.Errorf("watch disconnected") } pw.finishedAt = s.clock.Now() pw.cancel() s.requeueIndexerKey(indexer.Key{Name: types.NamespacedName{Name: pw.namespace}, GVK: nsGVK}) }() pw.finishedAt = time.Time{} pw.error = nil podCh, err := s.kClient.WatchPods(s.ctx, k8s.Namespace(pw.namespace)) if err != nil { pw.error = fmt.Errorf("watching pods: %v", err) return } for { select { case <-pw.ctx.Done(): return case pod, ok := <-podCh: if !ok { return } s.handlePod(pod) continue } } } // Turn all pod events into Reconcile() calls. func (s *PodSource) handlePod(obj k8s.ObjectUpdate) { podNN, ok := obj.AsNamespacedName() if !ok { return } s.requeueIndexerKey(indexer.Key{Name: podNN, GVK: podGVK}) } func (s *PodSource) requeueIndexerKey(key indexer.Key) { s.mu.Lock() requests := s.indexer.EnqueueKey(key) q := s.q s.mu.Unlock() if q == nil { return } for _, req := range requests { q.Add(req) } } func (s *PodSource) requeueStream(name types.NamespacedName) { s.mu.Lock() q := s.q s.mu.Unlock() if q == nil { return } q.Add(reconcile.Request{NamespacedName: name}) } // indexPodLogStreamForKubernetes indexes a PodLogStream object and returns keys // for Pods from the K8s cluster that it watches. // // See also: indexPodLogStreamForTiltAPI which indexes a PodLogStream object // and returns keys for objects from the Tilt apiserver that it watches. func indexPodLogStreamForKubernetes(obj client.Object) []indexer.Key { pls := obj.(*v1alpha1.PodLogStream) if pls.Spec.Pod == "" { return nil } return []indexer.Key{ // Watch events broadcast on the whole namespace. indexer.Key{ Name: types.NamespacedName{Name: pls.Spec.Namespace}, GVK: nsGVK, }, // Watch events on this specific Pod. indexer.Key{ Name: types.NamespacedName{Name: pls.Spec.Pod, Namespace: pls.Spec.Namespace}, GVK: podGVK, }, } }
package kuma import "github.com/layer5io/meshery-adapter-library/status" func (kuma *Kuma) applyCustomOperation(namespace string, manifest string, isDel bool) (string, error) { st := status.Starting err := kuma.applyManifest(isDel, namespace, []byte(manifest)) if err != nil { return st, ErrCustomOperation(err) } return status.Completed, nil }
// Copyright 2019 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gofer import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/sentry/vfs" ) func dentryTimestamp(t linux.StatxTimestamp) int64 { return t.ToNsec() } func dentryTimestampFromUnix(t unix.Timespec) int64 { return dentryTimestamp(linux.StatxTimestamp{Sec: t.Sec, Nsec: uint32(t.Nsec)}) } // Preconditions: d.cachedMetadataAuthoritative() == true. func (d *dentry) touchAtime(mnt *vfs.Mount) { if mnt.Flags.NoATime || mnt.ReadOnly() { return } if err := mnt.CheckBeginWrite(); err != nil { return } now := d.fs.clock.Now().Nanoseconds() d.metadataMu.Lock() d.atime.Store(now) d.atimeDirty.Store(1) d.metadataMu.Unlock() mnt.EndWrite() } // Preconditions: d.metadataMu is locked. d.cachedMetadataAuthoritative() == true. func (d *dentry) touchAtimeLocked(mnt *vfs.Mount) { if mnt.Flags.NoATime || mnt.ReadOnly() { return } if err := mnt.CheckBeginWrite(); err != nil { return } now := d.fs.clock.Now().Nanoseconds() d.atime.Store(now) d.atimeDirty.Store(1) mnt.EndWrite() } // Preconditions: // - d.cachedMetadataAuthoritative() == true. // - The caller has successfully called vfs.Mount.CheckBeginWrite(). func (d *dentry) touchCtime() { now := d.fs.clock.Now().Nanoseconds() d.metadataMu.Lock() d.ctime.Store(now) d.metadataMu.Unlock() } // Preconditions: // - d.cachedMetadataAuthoritative() == true. // - The caller has successfully called vfs.Mount.CheckBeginWrite(). func (d *dentry) touchCMtime() { now := d.fs.clock.Now().Nanoseconds() d.metadataMu.Lock() d.mtime.Store(now) d.ctime.Store(now) d.mtimeDirty.Store(1) d.metadataMu.Unlock() } // Preconditions: // - d.cachedMetadataAuthoritative() == true. // - The caller has locked d.metadataMu. func (d *dentry) touchCMtimeLocked() { now := d.fs.clock.Now().Nanoseconds() d.mtime.Store(now) d.ctime.Store(now) d.mtimeDirty.Store(1) }
package rty import ( "fmt" "github.com/gdamore/tcell" ) // Canvases hold content. type Canvas interface { Size() (int, int) SetContent(x int, y int, mainc rune, combc []rune, style tcell.Style) Close() (int, int) GetContent(x, y int) (mainc rune, combc []rune, style tcell.Style, width int) } func totalHeight(canvases []Canvas) int { total := 0 for _, c := range canvases { _, h := c.Size() total += h } return total } // Implementations below type cell struct { ch rune style tcell.Style } type TempCanvas struct { width int height int cells [][]cell style tcell.Style handler ErrorHandler } var _ Canvas = &TempCanvas{} func newTempCanvas(width, height int, style tcell.Style, handler ErrorHandler) *TempCanvas { c := &TempCanvas{width: width, height: height, handler: handler} if height != GROW { c.cells = make([][]cell, height) for i := 0; i < height; i++ { c.cells[i] = c.makeRow() } } return c } func (c *TempCanvas) Size() (int, int) { return c.width, c.height } func (c *TempCanvas) Close() (int, int) { if c.height == GROW { c.height = len(c.cells) } return c.width, c.height } func (c *TempCanvas) makeRow() []cell { row := make([]cell, c.width) for i := 0; i < c.width; i++ { row[i].style = c.style } return row } func (c *TempCanvas) SetContent(x int, y int, mainc rune, combc []rune, style tcell.Style) { if mainc == 0 { mainc = ' ' } if x < 0 || x >= c.width || y < 0 || y >= c.height { c.handler.Errorf("cell %v,%v outside canvas %v,%v", x, y, c.width, c.height) return } for y >= len(c.cells) { c.cells = append(c.cells, c.makeRow()) } c.cells[y][x] = cell{ch: mainc, style: style} } func (c *TempCanvas) GetContent(x, y int) (mainc rune, combc []rune, style tcell.Style, width int) { if x < 0 || x >= c.width || y < 0 || y >= c.height { c.handler.Errorf("cell %d, %d outside bounds %d, %d", x, y, c.width, c.height) return 0, nil, tcell.StyleDefault, 1 } if y >= len(c.cells) { return 0, nil, tcell.StyleDefault, 1 } cell := c.cells[y][x] return cell.ch, nil, cell.style, 1 } type SubCanvas struct { del Canvas startX int startY int width int height int highWater int style tcell.Style needsFill bool handler ErrorHandler } func newSubCanvas(del Canvas, startX int, startY int, width int, height int, style tcell.Style, handler ErrorHandler) (*SubCanvas, error) { _, delHeight := del.Size() if height == GROW && delHeight != GROW { return nil, fmt.Errorf("can't create a growing subcanvas from a non-growing subcanvas") } needsFill := true delSubCanvas, ok := del.(*SubCanvas) if ok { // If this is a subcanvas of a subcanvas with the exact same style (or with // only the foreground different), we already reset the canvas to the // current style. No need to re-fill. needsFill = style.Foreground(tcell.ColorDefault) != delSubCanvas.style.Foreground(tcell.ColorDefault) } r := &SubCanvas{ del: del, startX: startX, startY: startY, width: width, height: height, highWater: -1, style: style, needsFill: needsFill, handler: handler, } if needsFill { r.fill(-1) } return r, nil } func (c *SubCanvas) Size() (int, int) { return c.width, c.height } func (c *SubCanvas) Close() (int, int) { if c.height == GROW { c.height = c.highWater + 1 } return c.width, c.height } func (c *SubCanvas) SetContent(x int, y int, mainc rune, combc []rune, style tcell.Style) { if mainc == 0 { mainc = ' ' } if x < 0 || x >= c.width || y < 0 || y >= c.height { c.handler.Errorf("coord %d,%d is outside bounds %d,%d", x, y, c.width, c.height) return } if c.height == GROW && y > c.highWater { oldHighWater := c.highWater c.highWater = y if c.needsFill { c.fill(oldHighWater) } } c.del.SetContent(c.startX+x, c.startY+y, mainc, combc, style) } func (c *SubCanvas) fill(lastFilled int) { startY := lastFilled + 1 maxY := c.height if maxY == GROW { maxY = c.highWater + 1 } for y := startY; y < maxY; y++ { for x := 0; x < c.width; x++ { c.del.SetContent(c.startX+x, c.startY+y, ' ', nil, c.style) } } } func (c *SubCanvas) GetContent(x int, y int) (rune, []rune, tcell.Style, int) { return c.del.GetContent(x, y) } type ScreenCanvas struct { del tcell.Screen handler ErrorHandler } var _ Canvas = &ScreenCanvas{} func newScreenCanvas(del tcell.Screen, handler ErrorHandler) *ScreenCanvas { return &ScreenCanvas{del: del, handler: handler} } func (c *ScreenCanvas) Size() (int, int) { return c.del.Size() } func (c *ScreenCanvas) SetContent(x int, y int, mainc rune, combc []rune, style tcell.Style) { if mainc == 0 { mainc = ' ' } c.del.SetContent(x, y, mainc, combc, style) } func (c *ScreenCanvas) Close() (int, int) { return c.del.Size() } func (c *ScreenCanvas) GetContent(x, y int) (mainc rune, combc []rune, style tcell.Style, width int) { return c.del.GetContent(x, y) }
package core import ( "github.com/cadmium-im/zirconium-go/core/models" "github.com/gorilla/websocket" ) type Session struct { wsConn *websocket.Conn connID string Claims *JWTCustomClaims } func (s *Session) Send(message models.BaseMessage) error { return s.wsConn.WriteJSON(message) } func (s *Session) Receive() (models.BaseMessage, error) { var msg models.BaseMessage err := s.wsConn.ReadJSON(&msg) return msg, err } func (s *Session) Close() error { return s.wsConn.Close() }
package binary_search import "testing" func TestSearch(t *testing.T) { subTests := []struct { input []int target int result int }{ { input: []int{-1, 0, 3, 5, 9, 12}, target: 9, result: 4, }, { input: []int{-1, 0, 3, 5, 9, 12}, target: 2, result: -1, }, } for _, test := range subTests { if s := search(test.input, test.target); s != test.result { t.Errorf("wanted %v, got %v", test.result, s) } } }
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package camt_v08 import ( "reflect" "regexp" "github.com/moov-io/iso20022/pkg/utils" ) // Must be at least 1 items long type ExternalAccountIdentification1Code string func (r ExternalAccountIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalAccountIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCashAccountType1Code string func (r ExternalCashAccountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCashAccountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalClearingSystemIdentification1Code string func (r ExternalClearingSystemIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 5 { return utils.NewErrTextLengthInvalid("ExternalClearingSystemIdentification1Code", 1, 5) } return nil } // Must be at least 1 items long type ExternalEnquiryRequestType1Code string func (r ExternalEnquiryRequestType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalEnquiryRequestType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalFinancialInstitutionIdentification1Code string func (r ExternalFinancialInstitutionIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalFinancialInstitutionIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalOrganisationIdentification1Code string func (r ExternalOrganisationIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalOrganisationIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalPaymentControlRequestType1Code string func (r ExternalPaymentControlRequestType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalPaymentControlRequestType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalPersonIdentification1Code string func (r ExternalPersonIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalPersonIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalProxyAccountType1Code string func (r ExternalProxyAccountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalProxyAccountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalSystemBalanceType1Code string func (r ExternalSystemBalanceType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalSystemBalanceType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalSystemErrorHandling1Code string func (r ExternalSystemErrorHandling1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalSystemErrorHandling1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalSystemEventType1Code string func (r ExternalSystemEventType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalSystemEventType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCashClearingSystem1Code string func (r ExternalCashClearingSystem1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 3 { return utils.NewErrTextLengthInvalid("ExternalCashClearingSystem1Code", 1, 3) } return nil } // Must be at least 1 items long type ExternalMarketInfrastructure1Code string func (r ExternalMarketInfrastructure1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 3 { return utils.NewErrTextLengthInvalid("ExternalMarketInfrastructure1Code", 1, 3) } return nil } // Must be at least 1 items long type ExternalCancellationReason1Code string func (r ExternalCancellationReason1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCancellationReason1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCategoryPurpose1Code string func (r ExternalCategoryPurpose1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCategoryPurpose1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalDiscountAmountType1Code string func (r ExternalDiscountAmountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalDiscountAmountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalDocumentLineType1Code string func (r ExternalDocumentLineType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalDocumentLineType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalGarnishmentType1Code string func (r ExternalGarnishmentType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalGarnishmentType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalLocalInstrument1Code string func (r ExternalLocalInstrument1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 35 { return utils.NewErrTextLengthInvalid("ExternalLocalInstrument1Code", 1, 35) } return nil } // Must be at least 1 items long type ExternalMandateSetupReason1Code string func (r ExternalMandateSetupReason1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalMandateSetupReason1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalPurpose1Code string func (r ExternalPurpose1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalPurpose1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalServiceLevel1Code string func (r ExternalServiceLevel1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalServiceLevel1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalTaxAmountType1Code string func (r ExternalTaxAmountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalTaxAmountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalAgentInstruction1Code string func (r ExternalAgentInstruction1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalAgentInstruction1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalBalanceSubType1Code string func (r ExternalBalanceSubType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalBalanceSubType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalBalanceType1Code string func (r ExternalBalanceType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalBalanceType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalBankTransactionDomain1Code string func (r ExternalBankTransactionDomain1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalBankTransactionDomain1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalBankTransactionFamily1Code string func (r ExternalBankTransactionFamily1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalBankTransactionFamily1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalBankTransactionSubFamily1Code string func (r ExternalBankTransactionSubFamily1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalBankTransactionSubFamily1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCardTransactionCategory1Code string func (r ExternalCardTransactionCategory1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCardTransactionCategory1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalChargeType1Code string func (r ExternalChargeType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalChargeType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCreditLineType1Code string func (r ExternalCreditLineType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCreditLineType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalEntryStatus1Code string func (r ExternalEntryStatus1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalEntryStatus1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalFinancialInstrumentIdentificationType1Code string func (r ExternalFinancialInstrumentIdentificationType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalFinancialInstrumentIdentificationType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalRePresentmentReason1Code string func (r ExternalRePresentmentReason1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalRePresentmentReason1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalReportingSource1Code string func (r ExternalReportingSource1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalReportingSource1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalReturnReason1Code string func (r ExternalReturnReason1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalReturnReason1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalTechnicalInputChannel1Code string func (r ExternalTechnicalInputChannel1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalTechnicalInputChannel1Code", 1, 4) } return nil } // May be one of PDNG, STLD type BalanceStatus1Code string func (r BalanceStatus1Code) Validate() error { for _, vv := range []string{ "PDNG", "STLD", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("BalanceStatus1Code") } // May be one of YEAR, MNTH, QURT, MIAN, WEEK, DAIL, ADHO, INDA, OVNG type Frequency2Code string func (r Frequency2Code) Validate() error { for _, vv := range []string{ "YEAR", "MNTH", "QURT", "MIAN", "WEEK", "DAIL", "ADHO", "INDA", "OVNG", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Frequency2Code") } // May be one of LETT, MAIL, PHON, FAXX, CELL type PreferredContactMethod1Code string func (r PreferredContactMethod1Code) Validate() error { for _, vv := range []string{ "LETT", "MAIL", "PHON", "FAXX", "CELL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PreferredContactMethod1Code") } // May be one of RJCT, CVHD, RSVT, BLCK, EARM, EFAC, DLVR, COLD, CSDB type ProcessingType1Code string func (r ProcessingType1Code) Validate() error { for _, vv := range []string{ "RJCT", "CVHD", "RSVT", "BLCK", "EARM", "EFAC", "DLVR", "COLD", "CSDB", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("ProcessingType1Code") } // May be one of USTO, PSTO type StandingOrderType1Code string func (r StandingOrderType1Code) Validate() error { for _, vv := range []string{ "USTO", "PSTO", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("StandingOrderType1Code") } // May be one of OPNG, INTM, CLSG, BOOK, CRRT, PDNG, LRLD, AVLB, LTSF, CRDT, EAST, PYMT, BLCK, XPCD, DLOD, XCRD, XDBT, ADJT, PRAV, DBIT, THRE, NOTE, FSET, BLOC, OTHB, CUST, FORC, COLC, FUND, PIPO, XCHG, CCPS, TOHB, COHB, DOHB, TPBL, CPBL, DPBL, FUTB, REJB, FCOL, FCOU, SCOL, SCOU, CUSA, XCHC, XCHN, DSET, LACK, NSET, OTCC, OTCG, OTCN, SAPD, SAPC, REPD, REPC, BSCD, BSCC, SAPP, IRLT, IRDR, DWRD, ADWR, AIDR type SystemBalanceType2Code string func (r SystemBalanceType2Code) Validate() error { for _, vv := range []string{ "OPNG", "INTM", "CLSG", "BOOK", "CRRT", "PDNG", "LRLD", "AVLB", "LTSF", "CRDT", "EAST", "PYMT", "BLCK", "XPCD", "DLOD", "XCRD", "XDBT", "ADJT", "PRAV", "DBIT", "THRE", "NOTE", "FSET", "BLOC", "OTHB", "CUST", "FORC", "COLC", "FUND", "PIPO", "XCHG", "CCPS", "TOHB", "COHB", "DOHB", "TPBL", "CPBL", "DPBL", "FUTB", "REJB", "FCOL", "FCOU", "SCOL", "SCOU", "CUSA", "XCHC", "XCHN", "DSET", "LACK", "NSET", "OTCC", "OTCG", "OTCN", "SAPD", "SAPC", "REPD", "REPC", "BSCD", "BSCC", "SAPP", "IRLT", "IRDR", "DWRD", "ADWR", "AIDR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("SystemBalanceType2Code") } // May be one of PDNG, FINL type CashPaymentStatus2Code string func (r CashPaymentStatus2Code) Validate() error { for _, vv := range []string{ "PDNG", "FINL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("CashPaymentStatus2Code") } // May be one of BOOK, PDNG, FUTR type EntryStatus1Code string func (r EntryStatus1Code) Validate() error { for _, vv := range []string{ "BOOK", "PDNG", "FUTR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("EntryStatus1Code") } // May be one of STLD, RJTD, CAND, FNLD type FinalStatusCode string func (r FinalStatusCode) Validate() error { for _, vv := range []string{ "STLD", "RJTD", "CAND", "FNLD", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("FinalStatusCode") } // May be one of PBEN, TTIL, TFRO type Instruction1Code string func (r Instruction1Code) Validate() error { for _, vv := range []string{ "PBEN", "TTIL", "TFRO", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Instruction1Code") } // May be one of BDT, BCT, CDT, CCT, CHK, BKT, DCP, CCP, RTI, CAN type PaymentInstrument1Code string func (r PaymentInstrument1Code) Validate() error { for _, vv := range []string{ "BDT", "BCT", "CDT", "CCT", "CHK", "BKT", "DCP", "CCP", "RTI", "CAN", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PaymentInstrument1Code") } // May be one of CBS, BCK, BAL, CLS, CTR, CBH, CBP, DPG, DPN, EXP, TCH, LMT, LIQ, DPP, DPH, DPS, STF, TRP, TCS, LOA, LOR, TCP, OND, MGL type PaymentType3Code string func (r PaymentType3Code) Validate() error { for _, vv := range []string{ "CBS", "BCK", "BAL", "CLS", "CTR", "CBH", "CBP", "DPG", "DPN", "EXP", "TCH", "LMT", "LIQ", "DPP", "DPH", "DPS", "STF", "TRP", "TCS", "LOA", "LOR", "TCP", "OND", "MGL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PaymentType3Code") } // May be one of ACPD, VALD, MATD, AUTD, INVD, UMAC, STLE, STLM, SSPD, PCAN, PSTL, PFST, SMLR, RMLR, SRBL, AVLB, SRML type PendingStatus4Code string func (r PendingStatus4Code) Validate() error { for _, vv := range []string{ "ACPD", "VALD", "MATD", "AUTD", "INVD", "UMAC", "STLE", "STLM", "SSPD", "PCAN", "PSTL", "PFST", "SMLR", "RMLR", "SRBL", "AVLB", "SRML", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PendingStatus4Code") } // May be one of HIGH, LOWW, NORM, URGT type Priority5Code string func (r Priority5Code) Validate() error { for _, vv := range []string{ "HIGH", "LOWW", "NORM", "URGT", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Priority5Code") } // May be one of ALLL, CHNG, MODF, DELD type QueryType2Code string func (r QueryType2Code) Validate() error { for _, vv := range []string{ "ALLL", "CHNG", "MODF", "DELD", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("QueryType2Code") } // May be one of STND, PRPR type ReportIndicator1Code string func (r ReportIndicator1Code) Validate() error { for _, vv := range []string{ "STND", "PRPR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("ReportIndicator1Code") } // May be one of CANI, CANS, CSUB type CancelledStatusReason1Code string func (r CancelledStatusReason1Code) Validate() error { for _, vv := range []string{ "CANI", "CANS", "CSUB", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("CancelledStatusReason1Code") } // May be one of STLD, RJTD, CAND, FNLD type FinalStatus1Code string func (r FinalStatus1Code) Validate() error { for _, vv := range []string{ "STLD", "RJTD", "CAND", "FNLD", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("FinalStatus1Code") } // May be one of AWMO, AWSH, LAAW, DOCY, CLAT, CERT, MINO, PHSE, SBLO, DKNY, STCD, BENO, LACK, LATE, CANR, MLAT, OBJT, DOCC, BLOC, CHAS, NEWI, CLAC, PART, CMON, COLL, DEPO, FLIM, NOFX, INCA, LINK, BYIY, CAIS, LALO, MONY, NCON, YCOL, REFS, SDUT, CYCL, BATC, GUAD, PREA, GLOB, CPEC, MUNO type PendingFailingSettlement1Code string func (r PendingFailingSettlement1Code) Validate() error { for _, vv := range []string{ "AWMO", "AWSH", "LAAW", "DOCY", "CLAT", "CERT", "MINO", "PHSE", "SBLO", "DKNY", "STCD", "BENO", "LACK", "LATE", "CANR", "MLAT", "OBJT", "DOCC", "BLOC", "CHAS", "NEWI", "CLAC", "PART", "CMON", "COLL", "DEPO", "FLIM", "NOFX", "INCA", "LINK", "BYIY", "CAIS", "LALO", "MONY", "NCON", "YCOL", "REFS", "SDUT", "CYCL", "BATC", "GUAD", "PREA", "GLOB", "CPEC", "MUNO", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PendingFailingSettlement1Code") } // May be one of AWMO, CAIS, REFU, AWSH, PHSE, TAMM, DOCY, DOCC, BLOC, CHAS, NEWI, CLAC, MUNO, GLOB, PREA, GUAD, PART, NMAS, CMON, YCOL, COLL, DEPO, FLIM, NOFX, INCA, LINK, FUTU, LACK, LALO, MONY, NCON, REFS, SDUT, BATC, CYCL, SBLO, CPEC, MINO, PCAP type PendingSettlement2Code string func (r PendingSettlement2Code) Validate() error { for _, vv := range []string{ "AWMO", "CAIS", "REFU", "AWSH", "PHSE", "TAMM", "DOCY", "DOCC", "BLOC", "CHAS", "NEWI", "CLAC", "MUNO", "GLOB", "PREA", "GUAD", "PART", "NMAS", "CMON", "YCOL", "COLL", "DEPO", "FLIM", "NOFX", "INCA", "LINK", "FUTU", "LACK", "LALO", "MONY", "NCON", "REFS", "SDUT", "BATC", "CYCL", "SBLO", "CPEC", "MINO", "PCAP", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PendingSettlement2Code") } // May be one of SUBY, SUBS type SuspendedStatusReason1Code string func (r SuspendedStatusReason1Code) Validate() error { for _, vv := range []string{ "SUBY", "SUBS", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("SuspendedStatusReason1Code") } // May be one of CMIS, DDAT, DELN, DEPT, DMON, DDEA, DQUA, CADE, SETR, DSEC, VASU, DTRA, RSPR, REPO, CLAT, RERT, REPA, REPP, PHYS, IIND, FRAP, PLCE, PODU, FORF, REGD, RTGS, ICAG, CPCA, CHAR, IEXE, NCRR, NMAS, SAFE, DTRD, LATE, TERM, ICUS type UnmatchedStatusReason1Code string func (r UnmatchedStatusReason1Code) Validate() error { for _, vv := range []string{ "CMIS", "DDAT", "DELN", "DEPT", "DMON", "DDEA", "DQUA", "CADE", "SETR", "DSEC", "VASU", "DTRA", "RSPR", "REPO", "CLAT", "RERT", "REPA", "REPP", "PHYS", "IIND", "FRAP", "PLCE", "PODU", "FORF", "REGD", "RTGS", "ICAG", "CPCA", "CHAR", "IEXE", "NCRR", "NMAS", "SAFE", "DTRD", "LATE", "TERM", "ICUS", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("UnmatchedStatusReason1Code") } // May be one of ENAB, DISA, DELD, REQD type LimitStatus1Code string func (r LimitStatus1Code) Validate() error { for _, vv := range []string{ "ENAB", "DISA", "DELD", "REQD", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("LimitStatus1Code") } // May be one of MULT, BILI, MAND, DISC, NELI, INBI, GLBL, DIDB, SPLC, SPLF, TDLC, TDLF, UCDT, ACOL, EXGT type LimitType3Code string func (r LimitType3Code) Validate() error { for _, vv := range []string{ "MULT", "BILI", "MAND", "DISC", "NELI", "INBI", "GLBL", "DIDB", "SPLC", "SPLF", "TDLC", "TDLF", "UCDT", "ACOL", "EXGT", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("LimitType3Code") } // May be one of RTGS, RTNS, MPNS, BOOK type ClearingChannel2Code string func (r ClearingChannel2Code) Validate() error { for _, vv := range []string{ "RTGS", "RTNS", "MPNS", "BOOK", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("ClearingChannel2Code") } // May be one of RADM, RPIN, FXDR, DISP, PUOR, SCOR type DocumentType3Code string func (r DocumentType3Code) Validate() error { for _, vv := range []string{ "RADM", "RPIN", "FXDR", "DISP", "PUOR", "SCOR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("DocumentType3Code") } // May be one of MSIN, CNFA, DNFA, CINV, CREN, DEBN, HIRI, SBIN, CMCN, SOAC, DISP, BOLD, VCHR, AROI, TSUT, PUOR type DocumentType6Code string func (r DocumentType6Code) Validate() error { for _, vv := range []string{ "MSIN", "CNFA", "DNFA", "CINV", "CREN", "DEBN", "HIRI", "SBIN", "CMCN", "SOAC", "DISP", "BOLD", "VCHR", "AROI", "TSUT", "PUOR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("DocumentType6Code") } // May be one of YEAR, MNTH, QURT, MIAN, WEEK, DAIL, ADHO, INDA, FRTN type Frequency6Code string func (r Frequency6Code) Validate() error { for _, vv := range []string{ "YEAR", "MNTH", "QURT", "MIAN", "WEEK", "DAIL", "ADHO", "INDA", "FRTN", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Frequency6Code") } // May be one of CHK, TRF, DD, TRA type PaymentMethod4Code string func (r PaymentMethod4Code) Validate() error { for _, vv := range []string{ "CHK", "TRF", "DD", "TRA", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PaymentMethod4Code") } // May be one of HIGH, NORM type Priority2Code string func (r Priority2Code) Validate() error { for _, vv := range []string{ "HIGH", "NORM", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Priority2Code") } // May be one of FRST, RCUR, FNAL, OOFF, RPRE type SequenceType3Code string func (r SequenceType3Code) Validate() error { for _, vv := range []string{ "FRST", "RCUR", "FNAL", "OOFF", "RPRE", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("SequenceType3Code") } // May be one of INDA, INGA, COVE, CLRG type SettlementMethod1Code string func (r SettlementMethod1Code) Validate() error { for _, vv := range []string{ "INDA", "INGA", "COVE", "CLRG", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("SettlementMethod1Code") } // May be one of MM01, MM02, MM03, MM04, MM05, MM06, MM07, MM08, MM09, MM10, MM11, MM12, QTR1, QTR2, QTR3, QTR4, HLF1, HLF2 type TaxRecordPeriod1Code string func (r TaxRecordPeriod1Code) Validate() error { for _, vv := range []string{ "MM01", "MM02", "MM03", "MM04", "MM05", "MM06", "MM07", "MM08", "MM09", "MM10", "MM11", "MM12", "QTR1", "QTR2", "QTR3", "QTR4", "HLF1", "HLF2", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("TaxRecordPeriod1Code") } // May be one of IN01, IN02, IN03, IN04, IN05, IN06, IN07, IN08, IN09, IN10, IN11, IN12, IN13, IN14, IN15, IN16, IN17, IN18, IN19, MM20, MM21, MM22, MM25, MM26, MM27, MM28, MM29, MM30, MM31, MM32, IN33, MM34, MM35, IN36, IN37, IN38, IN39, NARR type UnableToApplyIncorrectInformation4Code string func (r UnableToApplyIncorrectInformation4Code) Validate() error { for _, vv := range []string{ "IN01", "IN02", "IN03", "IN04", "IN05", "IN06", "IN07", "IN08", "IN09", "IN10", "IN11", "IN12", "IN13", "IN14", "IN15", "IN16", "IN17", "IN18", "IN19", "MM20", "MM21", "MM22", "MM25", "MM26", "MM27", "MM28", "MM29", "MM30", "MM31", "MM32", "IN33", "MM34", "MM35", "IN36", "IN37", "IN38", "IN39", "NARR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("UnableToApplyIncorrectInformation4Code") } // May be one of MS01, MS02, MS03, MS04, MS05, MS06, MS07, MS08, MS09, MS10, MS11, MS12, MS13, MS14, MS15, MS16, MS17, NARR type UnableToApplyMissingInformation3Code string func (r UnableToApplyMissingInformation3Code) Validate() error { for _, vv := range []string{ "MS01", "MS02", "MS03", "MS04", "MS05", "MS06", "MS07", "MS08", "MS09", "MS10", "MS11", "MS12", "MS13", "MS14", "MS15", "MS16", "MS17", "NARR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("UnableToApplyMissingInformation3Code") } // May be one of ATTD, SATT, UATT type AttendanceContext1Code string func (r AttendanceContext1Code) Validate() error { for _, vv := range []string{ "ATTD", "SATT", "UATT", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("AttendanceContext1Code") } // May be one of ICCD, AGNT, MERC type AuthenticationEntity1Code string func (r AuthenticationEntity1Code) Validate() error { for _, vv := range []string{ "ICCD", "AGNT", "MERC", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("AuthenticationEntity1Code") } // May be one of UKNW, BYPS, NPIN, FPIN, CPSG, PPSG, MANU, MERC, SCRT, SNCT, SCNL type AuthenticationMethod1Code string func (r AuthenticationMethod1Code) Validate() error { for _, vv := range []string{ "UKNW", "BYPS", "NPIN", "FPIN", "CPSG", "PPSG", "MANU", "MERC", "SCRT", "SNCT", "SCNL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("AuthenticationMethod1Code") } // May be one of PRST, BYPS, UNRD, NCSC type CSCManagement1Code string func (r CSCManagement1Code) Validate() error { for _, vv := range []string{ "PRST", "BYPS", "UNRD", "NCSC", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("CSCManagement1Code") } // May be one of TAGC, PHYS, BRCD, MGST, CICC, DFLE, CTLS, ECTL type CardDataReading1Code string func (r CardDataReading1Code) Validate() error { for _, vv := range []string{ "TAGC", "PHYS", "BRCD", "MGST", "CICC", "DFLE", "CTLS", "ECTL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("CardDataReading1Code") } // May be one of AGGR, DCCV, GRTT, INSP, LOYT, NRES, PUCO, RECP, SOAF, UNAF, VCAU type CardPaymentServiceType2Code string func (r CardPaymentServiceType2Code) Validate() error { for _, vv := range []string{ "AGGR", "DCCV", "GRTT", "INSP", "LOYT", "NRES", "PUCO", "RECP", "SOAF", "UNAF", "VCAU", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("CardPaymentServiceType2Code") } // May be one of MNSG, NPIN, FCPN, FEPN, FDSG, FBIO, MNVR, FBIG, APKI, PKIS, CHDT, SCEC type CardholderVerificationCapability1Code string func (r CardholderVerificationCapability1Code) Validate() error { for _, vv := range []string{ "MNSG", "NPIN", "FCPN", "FEPN", "FDSG", "FBIO", "MNVR", "FBIG", "APKI", "PKIS", "CHDT", "SCEC", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("CardholderVerificationCapability1Code") } // May be one of DEBT, CRED, SHAR, SLEV type ChargeBearerType1Code string func (r ChargeBearerType1Code) Validate() error { for _, vv := range []string{ "DEBT", "CRED", "SHAR", "SLEV", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("ChargeBearerType1Code") } // May be one of OFLN, ONLN, SMON type OnLineCapability1Code string func (r OnLineCapability1Code) Validate() error { for _, vv := range []string{ "OFLN", "ONLN", "SMON", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("OnLineCapability1Code") } // May be one of SOFT, EMVK, EMVO, MRIT, CHIT, SECM, PEDV type POIComponentType1Code string func (r POIComponentType1Code) Validate() error { for _, vv := range []string{ "SOFT", "EMVK", "EMVO", "MRIT", "CHIT", "SECM", "PEDV", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("POIComponentType1Code") } // May be one of OPOI, MERC, ACCP, ITAG, ACQR, CISS, DLIS type PartyType3Code string func (r PartyType3Code) Validate() error { for _, vv := range []string{ "OPOI", "MERC", "ACCP", "ITAG", "ACQR", "CISS", "DLIS", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PartyType3Code") } // May be one of MERC, ACCP, ITAG, ACQR, CISS, TAXH type PartyType4Code string func (r PartyType4Code) Validate() error { for _, vv := range []string{ "MERC", "ACCP", "ITAG", "ACQR", "CISS", "TAXH", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PartyType4Code") } // May be one of DISC, PREM, PARV type PriceValueType1Code string func (r PriceValueType1Code) Validate() error { for _, vv := range []string{ "DISC", "PREM", "PARV", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PriceValueType1Code") } // May be one of FAXI, EDIC, URID, EMAL, POST, SMSM type RemittanceLocationMethod2Code string func (r RemittanceLocationMethod2Code) Validate() error { for _, vv := range []string{ "FAXI", "EDIC", "URID", "EMAL", "POST", "SMSM", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("RemittanceLocationMethod2Code") } // May be one of MAIL, TLPH, ECOM, TVPY type TransactionChannel1Code string func (r TransactionChannel1Code) Validate() error { for _, vv := range []string{ "MAIL", "TLPH", "ECOM", "TVPY", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("TransactionChannel1Code") } // May be one of MERC, PRIV, PUBL type TransactionEnvironment1Code string func (r TransactionEnvironment1Code) Validate() error { for _, vv := range []string{ "MERC", "PRIV", "PUBL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("TransactionEnvironment1Code") } // May be one of PIEC, TONS, FOOT, GBGA, USGA, GRAM, INCH, KILO, PUND, METR, CMET, MMET, LITR, CELI, MILI, GBOU, USOU, GBQA, USQA, GBPI, USPI, MILE, KMET, YARD, SQKI, HECT, ARES, SMET, SCMT, SMIL, SQMI, SQYA, SQFO, SQIN, ACRE type UnitOfMeasure1Code string func (r UnitOfMeasure1Code) Validate() error { for _, vv := range []string{ "PIEC", "TONS", "FOOT", "GBGA", "USGA", "GRAM", "INCH", "KILO", "PUND", "METR", "CMET", "MMET", "LITR", "CELI", "MILI", "GBOU", "USOU", "GBQA", "USQA", "GBPI", "USPI", "MILE", "KMET", "YARD", "SQKI", "HECT", "ARES", "SMET", "SCMT", "SMIL", "SQMI", "SQYA", "SQFO", "SQIN", "ACRE", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("UnitOfMeasure1Code") } // May be one of MDSP, CDSP type UserInterface2Code string func (r UserInterface2Code) Validate() error { for _, vv := range []string{ "MDSP", "CDSP", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("UserInterface2Code") } // Must match the pattern [0-9] type Exact1NumericText string func (r Exact1NumericText) Validate() error { reg := regexp.MustCompile(`[0-9]`) if !reg.MatchString(string(r)) { return utils.NewErrValueInvalid("Exact1NumericText") } return nil } // Must match the pattern [0-9]{3} type Exact3NumericText string func (r Exact3NumericText) Validate() error { reg := regexp.MustCompile(`[0-9]{3}`) if !reg.MatchString(string(r)) { return utils.NewErrValueInvalid("Exact3NumericText") } return nil } // Must match the pattern [A-Z]{2,2}[A-Z0-9]{9,9}[0-9]{1,1} type ISINOct2015Identifier string func (r ISINOct2015Identifier) Validate() error { reg := regexp.MustCompile(`[A-Z]{2,2}[A-Z0-9]{9,9}[0-9]{1,1}`) if !reg.MatchString(string(r)) { return utils.NewErrValueInvalid("ISINOct2015Identifier") } return nil } // Must match the pattern [a-z]{2,2} type ISO2ALanguageCode string func (r ISO2ALanguageCode) Validate() error { reg := regexp.MustCompile(`[a-z]{2,2}`) if !reg.MatchString(string(r)) { return utils.NewErrValueInvalid("ISO2ALanguageCode") } return nil } // Must match the pattern [BEOVW]{1,1}[0-9]{2,2}|DUM type EntryTypeIdentifier string func (r EntryTypeIdentifier) Validate() error { reg := regexp.MustCompile(`[BEOVW]{1,1}[0-9]{2,2}|DUM`) if !reg.MatchString(string(r)) { return utils.NewErrValueInvalid("EntryTypeIdentifier") } return nil }
package cmd import ( "fmt" "strings" "github.com/spf13/cobra" ) var fetchMessagesCmd = &cobra.Command{ Use: "fetch-messages", Aliases: []string{"fetch"}, Short: "Retrieves messages from your account(s)", Long: `Connects to the BitMaelum servers and fetches new emails that are not available on your local system.`, Run: func(cmd *cobra.Command, args []string) { fmt.Printf("fetchMessages called witch %s and %s\n", *checkOnly, strings.Join(*addresses, " ")) }, } var checkOnly *bool var addresses *[]string func init() { rootCmd.AddCommand(fetchMessagesCmd) addresses = fetchMessagesCmd.PersistentFlags().StringArrayP("address", "a", []string{}, "Address(es) to fetch") checkOnly = fetchMessagesCmd.Flags().Bool("check-only", false, "Check only, don't download") }
package main import ( "bytes" "errors" "fmt" "regexp" "strings" "github.com/irfansharif/log" ) type logMode struct { m log.Mode set bool } func (l logMode) String() string { return modeToString(log.Mode(l.m)) } func (l *logMode) Set(value string) error { l.set = true m, err := modeFromString(value) if err != nil { return err } l.m = m return nil } type fileLogMode struct { fname string fmode log.Mode } type logFilter []fileLogMode func (l logFilter) String() string { var buf bytes.Buffer buf.WriteString("[") for i := 0; i < len(l)-1; i++ { buf.WriteString(fmt.Sprintf("%s:%s ", l[i].fname, modeToString(l[i].fmode))) } if len(l) > 0 { lastIndex := len(l) - 1 buf.WriteString(fmt.Sprintf("%s:%s", l[lastIndex].fname, modeToString(l[lastIndex].fmode))) } buf.WriteString("]") return buf.String() } func (l *logFilter) Set(value string) error { fileNameRegex := "^[\\w]+.go$" modeRegex := "^[info|debug|warn|error][\\|(info|debug|warn|error)]*$" for _, f := range strings.Split(value, ",") { f := strings.Split(f, ":") if len(f) != 2 { return errors.New( fmt.Sprintf("Improperly formatted filter: %s, expected fname.go:mode", f)) } fname, mode := f[0], f[1] matched, err := regexp.Match(fileNameRegex, []byte(fname)) if err != nil { return err } if !matched { // TODO(irfansharif): Better error here. return errors.New( fmt.Sprintf("Expected filename '%s' to match the regex '%s'", fname, fileNameRegex)) } matched, err = regexp.Match(modeRegex, []byte(mode)) if err != nil { return err } if !matched { return errors.New( fmt.Sprintf("Expected mode '%s' to match the regex '%s'", mode, modeRegex)) } fmode, err := modeFromString(mode) if err != nil { return err } *l = append(*l, fileLogMode{fname: fname, fmode: fmode}) } return nil } type backtracePoints []string func (l *backtracePoints) String() string { return fmt.Sprint(*l) } func (l *backtracePoints) Set(value string) error { fileNameRegex := "^[\\w]+.go$" lineNumberRegex := "^[\\d]+$" for _, f := range strings.Split(value, ",") { f := strings.Split(f, ":") if len(f) != 2 { return errors.New( fmt.Sprintf("Improperly formatted filter: %s, expected fname.go:line", f)) } fname, lnumber := f[0], f[1] matched, err := regexp.Match(fileNameRegex, []byte(fname)) if err != nil { return err } if !matched { return errors.New( fmt.Sprintf("Expected filename '%s' to match the regex '%s'", fname, fileNameRegex)) } matched, err = regexp.Match(lineNumberRegex, []byte(lnumber)) if err != nil { return err } if !matched { return errors.New( fmt.Sprintf("Expected line number '%s' to match the regex '%s'", lnumber, lineNumberRegex)) } *l = append(*l, fmt.Sprintf("%s:%s", fname, lnumber)) } return nil } func modeFromString(value string) (log.Mode, error) { var m log.Mode for _, mode := range strings.Split(value, "|") { switch mode { case "info": m |= log.InfoMode case "debug": m |= log.DebugMode case "warn": m |= log.WarnMode case "error": m |= log.ErrorMode case "disabled": m = log.DisabledMode break default: return m, errors.New(fmt.Sprintf("unrecognized mode: %v", m)) } } return m, nil } func modeToString(m log.Mode) string { if m == log.DisabledMode { return "disabled" } var buf bytes.Buffer if (m & log.InfoMode) != log.DisabledMode { buf.WriteString("info|") } if (m & log.WarnMode) != log.DisabledMode { buf.WriteString("warn|") } if (m & log.ErrorMode) != log.DisabledMode { buf.WriteString("error|") } if (m & log.DebugMode) != log.DisabledMode { buf.WriteString("debug|") } return buf.String()[:len(buf.String())-1] // Chop off the last '|' symbol. }
package main import ( "net/http" "time" "github.com/BKH7/go-client/fetch" "github.com/sirupsen/logrus" ) func main() { err := fetch.PostMsg(&http.Client{Timeout: 5 * time.Second}, &fetch.MsgStruct{ ID: 1, Sender: "Tom", Msg: "Hello", }) if err != nil { logrus.Error(err) } }
/* * Created on Thu Feb 28 2019 9:15:33 * Author: WuLC * EMail: liangchaowu5@gmail.com */ // simple solution func numRookCaptures(board [][]byte) int { result := 0 for i := 0; i < 8; i++ { for j := 0; j < 8; j++ { if board[i][j] == 'R' { directions := [][]int{{-1, 0}, {1, 0}, {0, -1}, {0, 1}} for _, d := range directions { ni, nj := i, j for ni+d[0] >= 0 && ni+d[0] < 8 && nj+d[1] >= 0 && nj+d[1] < 8 { ni += d[0] nj += d[1] if board[ni][nj] == 'B' { break } else if board[ni][nj] == 'p' { result++ break } } } return result } } } return result }
package vite import ( "fmt" "testing" ) func TestWallet(t *testing.T) { Setup() mnemonic, em, err := NewMnemonicAndEntropyStore("111111") fmt.Println(mnemonic, em, err) } func TestGetPrimaryAddr(t *testing.T) { Setup() addr := GetPrimaryAddr() fmt.Println(addr) }
package mydsl import ( "gopkg.in/yaml.v2" "io/ioutil" "os" "testing" ) func TestForCoverage(t *testing.T) { f, err := os.Open("test/yamls/testsuite.yml") if err != nil { t.Fatalf("open error:%v", err) } defer f.Close() yamlInput, err := ioutil.ReadAll(f) if err != nil { t.Fatalf("read error:%v", err) } var testsuites []map[interface{}]interface{} yamlError := yaml.UnmarshalStrict(yamlInput, &testsuites) if yamlError != nil { t.Fatalf("unmarshal error:%v", err) } container := &map[string]interface{}{} for _, testsuite := range testsuites { evaluated, err := NewArgument(testsuite).Evaluate(container) if err != nil { t.Errorf("testsuite %s failed: %v", testsuite["testsuite"].([]interface{})[0], evaluated) } } } func TestForTestsuite(t *testing.T) { f, err := os.Open("test/yamls/testsuite_test.yml") if err != nil { t.Fatalf("open error:%v", err) } defer f.Close() yamlInput, err := ioutil.ReadAll(f) if err != nil { t.Fatalf("read error:%v", err) } var testsuites []map[interface{}]interface{} yamlError := yaml.UnmarshalStrict(yamlInput, &testsuites) if yamlError != nil { t.Fatalf("unmarshal error:%v", err) } container := &map[string]interface{}{} for _, testsuite := range testsuites { evaluated, err := NewArgument(testsuite).Evaluate(container) if err == nil { t.Errorf("testsuite %s failed: %v", testsuite["testsuite"].([]interface{})[0], evaluated) } } }
package routes import "github.com/labstack/echo/v4" // InitRoutes init all routes func InitRoutes(e *echo.Echo) { initAuthRoutes(e) initTodoRoutes(e) }
package rex import ( potato "github.com/rise-worlds/potato-go" ) func NewDeposit(owner potato.AccountName, amount potato.Asset) *potato.Action { return &potato.Action{ Account: REXAN, Name: ActN("deposit"), Authorization: []potato.PermissionLevel{ {Actor: owner, Permission: potato.PermissionName("active")}, }, ActionData: potato.NewActionData(Deposit{ Owner: owner, Amount: amount, }), } } type Deposit struct { Owner potato.AccountName Amount potato.Asset }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package firmware import ( "context" "fmt" "regexp" "strconv" "time" gossh "golang.org/x/crypto/ssh" fwCommon "chromiumos/tast/common/firmware" "chromiumos/tast/common/servo" "chromiumos/tast/errors" "chromiumos/tast/remote/firmware" "chromiumos/tast/remote/firmware/fixture" "chromiumos/tast/remote/firmware/reporters" "chromiumos/tast/ssh" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) // eventLogParams contains all the data needed to run a single test iteration. type eventLogParams struct { resetType firmware.ResetType bootToMode fwCommon.BootMode suspendResume bool suspendToIdle string hardwareWatchdog bool // All of the regexes in one of the sets must be present. Ex. // [][]string{[]string{`Case 1A`, `Case 1B`}, []string{`Case 2A`, `Case 2[BC]`}} // Any of these events would pass: // Case 1A, Case 1B // Case 2A, Case 2B // Case 2A, Case 2C requiredEventSets [][]string prohibitedEvents string allowedEvents string } func init() { testing.AddTest(&testing.Test{ Func: Eventlog, Desc: "Ensure that eventlog is written on boot and suspend/resume", Contacts: []string{ "gredelston@google.com", // Test author. "cros-fw-engprod@google.com", }, Attr: []string{"group:firmware"}, HardwareDeps: hwdep.D( // Eventlog is broken/wontfix on veyron devices. // See http://b/35585376#comment14 for more info. hwdep.SkipOnPlatform("veyron_fievel"), hwdep.SkipOnPlatform("veyron_tiger"), ), SoftwareDeps: []string{"crossystem", "flashrom"}, Vars: []string{"firmware.skipFlashUSB"}, Params: []testing.Param{ // Test eventlog upon normal->normal reboot. { Name: "normal", ExtraAttr: []string{"firmware_ec"}, // Disable on leona (b/184778308) and coral (b/250684696) ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("leona", "astronaut", "babymega", "babytiger", "blacktiplte", "nasher", "robo360")), Fixture: fixture.NormalMode, Val: eventLogParams{ resetType: firmware.WarmReset, requiredEventSets: [][]string{{`System boot`}}, prohibitedEvents: `Developer Mode|Recovery Mode|Sleep| Wake`, }, }, { // Allow some normally disallowed events on leona. b/184778308 Name: "leona_normal", ExtraAttr: []string{"firmware_ec"}, ExtraHardwareDeps: hwdep.D(hwdep.Model("leona")), Fixture: fixture.NormalMode, Val: eventLogParams{ resetType: firmware.WarmReset, requiredEventSets: [][]string{{`System boot`}}, prohibitedEvents: `Developer Mode|Recovery Mode|Sleep| Wake`, allowedEvents: `^ACPI Wake \| Deep S5$`, }, }, // Test eventlog upon dev->dev reboot. { Name: "dev", ExtraAttr: []string{"firmware_ec"}, ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("leona")), Fixture: fixture.DevModeGBB, Val: eventLogParams{ resetType: firmware.WarmReset, requiredEventSets: [][]string{{`System boot`, `Chrome ?OS Developer Mode|boot_mode=Developer`}}, prohibitedEvents: `Recovery Mode|Sleep| Wake`, }, }, // Allow some normally disallowed events on leona. b/184778308 { Name: "leona_dev", ExtraAttr: []string{"firmware_ec"}, ExtraHardwareDeps: hwdep.D(hwdep.Model("leona")), Fixture: fixture.DevModeGBB, Val: eventLogParams{ resetType: firmware.WarmReset, requiredEventSets: [][]string{{`System boot`, `Chrome ?OS Developer Mode|boot_mode=Developer`}}, prohibitedEvents: `Recovery Mode|Sleep| Wake`, allowedEvents: `^ACPI Wake \| Deep S5$`, }, }, // Test eventlog upon normal->rec reboot. { Name: "normal_rec", ExtraAttr: []string{"firmware_unstable", "firmware_usb"}, Fixture: fixture.NormalMode, Val: eventLogParams{ bootToMode: fwCommon.BootModeRecovery, requiredEventSets: [][]string{{`System boot`, `(?i)Chrome ?OS Recovery Mode \| Recovery Button|boot_mode=Manual recovery`}}, prohibitedEvents: `Developer Mode|Sleep|FW Wake|ACPI Wake \| S3`, }, Timeout: 60 * time.Minute, }, // Test eventlog upon rec->normal reboot. { Name: "rec_normal", ExtraAttr: []string{"firmware_unstable", "firmware_usb"}, Fixture: fixture.RecModeNoServices, Val: eventLogParams{ bootToMode: fwCommon.BootModeNormal, requiredEventSets: [][]string{{`System boot`}}, prohibitedEvents: `Developer Mode|Recovery Mode|Sleep`, }, Timeout: 6 * time.Minute, }, // Test eventlog upon suspend/resume w/ default value of suspend_to_idle. // treeya: ACPI Enter | S3, EC Event | Power Button, ACPI Wake | S3, Wake Source | Power Button | 0 // kindred: S0ix Enter, S0ix Exit, Wake Source | Power Button | 0, EC Event | Power Button // leona: S0ix Enter, S0ix Exit, Wake Source | Power Button | 0, EC Event | Power Button // eldrid: S0ix Enter, S0ix Exit, Wake Source | Power Button | 0, EC Event | Power Button // hayato: Sleep, Wake { Name: "suspend_resume", ExtraAttr: []string{"firmware_unstable"}, Fixture: fixture.NormalMode, Val: eventLogParams{ suspendResume: true, requiredEventSets: [][]string{ {`Sleep`, `^Wake`}, {`ACPI Enter \| S3`, `ACPI Wake \| S3`}, {`S0ix Enter`, `S0ix Exit`}, }, prohibitedEvents: `System |Developer Mode|Recovery Mode`, }, }, // Test eventlog upon suspend/resume w/ suspend_to_idle. // On supported machines, this should go to S0ix or stay in S0. // x86 duts: S0ix Enter, S0ix Exit, Wake Source | Power Button | 0, EC Event | Power Button // hayato: FAIL Sleep, System boot // treeya: FAIL Nothing logged { Name: "suspend_resume_idle", ExtraAttr: []string{"firmware_unstable"}, Fixture: fixture.NormalMode, Val: eventLogParams{ suspendResume: true, suspendToIdle: "1", requiredEventSets: [][]string{ {`Sleep`, `^Wake`}, {`S0ix Enter`, `S0ix Exit`}, }, prohibitedEvents: `System |Developer Mode|Recovery Mode`, }, }, // Test eventlog upon suspend/resume w/o suspend_to_idle. // This should power down all the way to S3. // eldrid: FAIL ACPI Enter | S3, EC Event | Power Button, ACPI Wake | S3, Wake Source | Power Button | 0 -> Gets stuck and doesn't boot. // hayato: Sleep, Wake // x86 duts: ACPI Enter | S3, EC Event | Power Button, ACPI Wake | S3, Wake Source | Power Button | 0 { Name: "suspend_resume_noidle", ExtraAttr: []string{"firmware_unstable"}, Fixture: fixture.NormalMode, Val: eventLogParams{ suspendResume: true, suspendToIdle: "0", requiredEventSets: [][]string{ {`Sleep`, `^Wake`}, {`ACPI Enter \| S3`, `ACPI Wake \| S3`}, }, prohibitedEvents: `System |Developer Mode|Recovery Mode`, }, }, // Test eventlog with hardware watchdog. { Name: "watchdog", ExtraAttr: []string{"firmware_ec"}, Fixture: fixture.NormalMode, ExtraSoftwareDeps: []string{"watchdog"}, Val: eventLogParams{ hardwareWatchdog: true, requiredEventSets: [][]string{ {`System boot|Hardware watchdog reset`}, }, }, }, }, }) } // eventMessagesContainReMatch returns true if any event's message matches the regexp. func eventMessagesContainReMatch(ctx context.Context, events []reporters.Event, re *regexp.Regexp) bool { for _, event := range events { if re.MatchString(event.Message) { return true } } return false } func Eventlog(ctx context.Context, s *testing.State) { // Create mode-switcher. v := s.FixtValue().(*fixture.Value) h := v.Helper if err := h.RequireServo(ctx); err != nil { s.Fatal("Failed to connect to servod") } ms, err := firmware.NewModeSwitcher(ctx, h) if err != nil { s.Fatal("Creating mode switcher: ", err) } r := h.Reporter param := s.Param().(eventLogParams) var cutoffEvent reporters.Event oldEvents, err := r.EventlogList(ctx) if err != nil { s.Fatal("Finding last event: ", err) } if len(oldEvents) > 0 { cutoffEvent = oldEvents[len(oldEvents)-1] s.Log("Found previous event: ", cutoffEvent) } if param.resetType != "" { if err := ms.ModeAwareReboot(ctx, param.resetType); err != nil { s.Fatal("Error resetting DUT: ", err) } } else if param.bootToMode != "" { // If booting into recovery, check the USB Key. if param.bootToMode == fwCommon.BootModeRecovery { skipFlashUSB := false if skipFlashUSBStr, ok := s.Var("firmware.skipFlashUSB"); ok { skipFlashUSB, err = strconv.ParseBool(skipFlashUSBStr) if err != nil { s.Fatalf("Invalid value for var firmware.skipFlashUSB: got %q, want true/false", skipFlashUSBStr) } } cs := s.CloudStorage() if skipFlashUSB { cs = nil } if err := h.SetupUSBKey(ctx, cs); err != nil { s.Fatal("USBKey not working: ", err) } } if err := ms.RebootToMode(ctx, param.bootToMode); err != nil { s.Fatalf("Error during transition to %s: %+v", param.bootToMode, err) } } else if param.suspendResume { if err := h.Servo.WatchdogRemove(ctx, servo.WatchdogCCD); err != nil { s.Error("Failed to remove watchdog for ccd: ", err) } if param.suspendToIdle != "" { if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", fmt.Sprintf( "mkdir -p /tmp/power_manager && "+ "echo %q > /tmp/power_manager/suspend_to_idle && "+ "mount --bind /tmp/power_manager /var/lib/power_manager && "+ "restart powerd", param.suspendToIdle), ).Run(ssh.DumpLogOnError); err != nil { s.Fatal("Failed to set suspend to idle: ", err) } defer func(ctx context.Context) { if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", "umount /var/lib/power_manager && restart powerd", ).Run(ssh.DumpLogOnError); err != nil { s.Log("Failed to restore powerd settings: ", err) } }(ctx) // Suspend will fail right after restarting powerd. testing.Sleep(ctx, 2*time.Second) } h.CloseRPCConnection(ctx) s.Log("Suspending DUT") shortCtx, cancel := context.WithTimeout(ctx, 3*time.Second) defer cancel() if err := h.DUT.Conn().CommandContext(shortCtx, "powerd_dbus_suspend").Run(ssh.DumpLogOnError); err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, &gossh.ExitMissingError{}) { s.Fatal("Failed to suspend: ", err) } // Let the DUT stay in suspend a little while. 10s seems to be enough to allow wake up. Shorter times might work also. if err := testing.Sleep(ctx, 10*time.Second); err != nil { s.Fatal("Failed to sleep: ", err) } powerState, err := h.Servo.GetECSystemPowerState(ctx) if err != nil { s.Error("Failed to get power state: ", err) } s.Log("Power state: ", powerState) s.Log("Pressing ENTER key to wake DUT") if err := h.Servo.KeypressWithDuration(ctx, servo.Enter, servo.DurTab); err != nil { s.Fatal("Failed to press enter key") } s.Log("Reconnecting to DUT") shortCtx, cancel = context.WithTimeout(ctx, 60*time.Second) defer cancel() if err := h.WaitConnect(shortCtx); err != nil { s.Fatal("Failed to reconnect to DUT: ", err) } s.Log("Reconnected to DUT") } else if param.hardwareWatchdog { if err := h.Servo.WatchdogRemove(ctx, servo.WatchdogCCD); err != nil { s.Error("Failed to remove watchdog for ccd: ", err) } // Daisydog is the watchdog service. cmd := `nohup sh -c 'sleep 2 sync stop daisydog sleep 60 > /dev/watchdog' >/dev/null 2>&1 </dev/null &` if err := h.DUT.Conn().CommandContext(ctx, "bash", "-c", cmd).Run(); err != nil { s.Fatal("Failed to panic DUT: ", err) } s.Log("Waiting for DUT to become unreachable") h.CloseRPCConnection(ctx) if err := h.DUT.WaitUnreachable(ctx); err != nil { s.Fatal("Failed to wait for DUT to become unreachable: ", err) } s.Log("DUT became unreachable (as expected)") s.Log("Reconnecting to DUT") shortCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() if err := h.WaitConnect(shortCtx); err != nil { s.Fatal("Failed to reconnect to DUT: ", err) } s.Log("Reconnected to DUT") } // Sometimes events are missing if you check too quickly after boot. var events []reporters.Event if err := testing.Poll(ctx, func(context.Context) error { var err error events, err = r.EventlogListAfter(ctx, cutoffEvent) if err != nil { return testing.PollBreak(err) } if len(events) == 0 { return errors.New("no new events found") } return nil }, &testing.PollOptions{ Timeout: 1 * time.Minute, Interval: 5 * time.Second, }); err != nil { s.Fatal("Gathering events: ", err) } for _, event := range events { s.Log("Found event: ", event) } // Complicated rules here. // One of the param.requiredEventSets must be found. // Within that event set, all the regexs need to match to be considered found. requiredEventsFound := false for _, requiredEventSet := range param.requiredEventSets { foundAllRequiredEventsInSet := true for _, requiredEvent := range requiredEventSet { reRequiredEvent := regexp.MustCompile(requiredEvent) if !eventMessagesContainReMatch(ctx, events, reRequiredEvent) { foundAllRequiredEventsInSet = false break } } if foundAllRequiredEventsInSet { requiredEventsFound = true break } } if !requiredEventsFound { s.Error("Required event missing") } if param.prohibitedEvents != "" { reProhibitedEvents := regexp.MustCompile(param.prohibitedEvents) var allowedRe *regexp.Regexp if param.allowedEvents != "" { allowedRe = regexp.MustCompile(param.allowedEvents) } for _, event := range events { if reProhibitedEvents.MatchString(event.Message) && (allowedRe == nil || !allowedRe.MatchString(event.Message)) { s.Errorf("Incorrect event logged: %+v", event) } } } }
package util import ( "fmt" "testing" "github.com/kylelemons/godebug/pretty" ) func TestGroupNames(t *testing.T) { var names []string for i := 1; i <= 5; i++ { names = append(names, fmt.Sprintf("server%d.loadavg5", i)) } nameGroups := GroupNames(names, 2) expected := [][]string{ {"server1.loadavg5", "server2.loadavg5"}, {"server3.loadavg5", "server4.loadavg5"}, {"server5.loadavg5"}, } if diff := pretty.Compare(nameGroups, expected); diff != "" { t.Fatalf("diff: (-actual +expected)\n%s", diff) } } func TestSplitName(t *testing.T) { name := "roleA.r.{1,2,3,4}.loadavg" names := SplitName(name) expected := []string{ "roleA.r.1.loadavg", "roleA.r.2.loadavg", "roleA.r.3.loadavg", "roleA.r.4.loadavg", } if diff := pretty.Compare(names, expected); diff != "" { t.Fatalf("diff: (-actual +expected)\n%s", diff) } }
// Copyright The runc Authors. // Copyright The containerd Authors. // Copyright 2021 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cgroup import ( "bufio" "bytes" "context" "errors" "fmt" "io/ioutil" "math" "math/big" "os" "path/filepath" "strconv" "strings" "time" "github.com/cenkalti/backoff" "github.com/coreos/go-systemd/v22/dbus" specs "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/cleanup" "gvisor.dev/gvisor/pkg/log" ) const ( subtreeControl = "cgroup.subtree_control" controllersFile = "cgroup.controllers" cgroup2Key = "cgroup2" // https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html defaultPeriod = 100000 ) var ( ErrInvalidFormat = errors.New("cgroup: parsing file with invalid format failed") ErrInvalidGroupPath = errors.New("cgroup: invalid group path") // controllers2 is the group of all supported cgroupv2 controllers controllers2 = map[string]controllerv2{ "cpu": &cpu2{}, "cpuset": &cpuset2{}, "io": &io2{}, "memory": &memory2{}, "pids": &pid2{}, "hugetlb": &hugeTLB2{}, } ) // cgroupV2 represents a cgroup inside supported all cgroupV2 controllers type cgroupV2 struct { // Mountpoint is the unified mount point of cgroupV2 Mountpoint string `json:"mountpoint"` // Path is the relative path to the unified mountpoint Path string `json:"path"` // Controllers is the list of supported controllers Controllers []string `json:"controllers"` // Own is the list of owned path created when install this cgroup Own []string `json:"own"` } func newCgroupV2(mountpoint, group string, useSystemd bool) (Cgroup, error) { data, err := ioutil.ReadFile(filepath.Join(mountpoint, "cgroup.controllers")) if err != nil { return nil, err } cg := &cgroupV2{ Mountpoint: mountpoint, Path: group, Controllers: strings.Fields(string(data)), } if useSystemd { return newCgroupV2Systemd(cg) } return cg, err } func (c *cgroupV2) createCgroupPaths() (bool, error) { // setup all known controllers for the current subtree // For example, given path /foo/bar and mount /sys/fs/cgroup, we need to write // the controllers to: // * /sys/fs/cgroup/cgroup.subtree_control // * /sys/fs/cgroup/foo/cgroup.subtree_control val := "+" + strings.Join(c.Controllers, " +") elements := strings.Split(c.Path, "/") current := c.Mountpoint created := false for i, e := range elements { current = filepath.Join(current, e) if i > 0 { if err := os.Mkdir(current, 0o755); err != nil { if !os.IsExist(err) { return false, err } } else { created = true c.Own = append(c.Own, current) } } // enable all known controllers for subtree if i < len(elements)-1 { if err := writeFile(filepath.Join(current, subtreeControl), []byte(val), 0700); err != nil { return false, err } } } return created, nil } // Install creates and configures cgroups. func (c *cgroupV2) Install(res *specs.LinuxResources) error { log.Debugf("Installing cgroup path %q", c.MakePath("")) // Clean up partially created cgroups on error. Errors during cleanup itself // are ignored. clean := cleanup.Make(func() { _ = c.Uninstall() }) defer clean.Clean() created, err := c.createCgroupPaths() if err != nil { return err } if created { // If we created our final cgroup path then we can set the resources. for controllerName, ctrlr := range controllers2 { // First check if our controller is found in the system. found := false for _, knownController := range c.Controllers { if controllerName == knownController { found = true } } // In case we don't have the controller. if found { if err := ctrlr.set(res, c.MakePath("")); err != nil { return err } continue } if ctrlr.optional() { if err := ctrlr.skip(res); err != nil { return err } } else { return fmt.Errorf("mandatory cgroup controller %q is missing for %q", controllerName, c.MakePath("")) } } } clean.Release() return nil } // Uninstall removes the settings done in Install(). If cgroup path already // existed when Install() was called, Uninstall is a noop. func (c *cgroupV2) Uninstall() error { log.Debugf("Deleting cgroup %q", c.MakePath("")) // If we try to remove the cgroup too soon after killing the sandbox we // might get EBUSY, so we retry for a few seconds until it succeeds. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx) // Deletion must occur reverse order, because they may contain ancestors. for i := len(c.Own) - 1; i >= 0; i-- { current := c.Own[i] log.Debugf("Removing cgroup for path=%q", current) fn := func() error { err := unix.Rmdir(current) if os.IsNotExist(err) { return nil } return err } if err := backoff.Retry(fn, b); err != nil { return fmt.Errorf("removing cgroup path %q: %w", current, err) } } return nil } // Join adds the current process to the all controllers. Returns function that // restores cgroup to the original state. func (c *cgroupV2) Join() (func(), error) { // First save the current state so it can be restored. paths, err := loadPaths("self") if err != nil { return nil, err } // Since this is unified, get the first path of current process's cgroup is // enough. undoPath := filepath.Join(c.Mountpoint, paths[cgroup2Key]) cu := cleanup.Make(func() { log.Debugf("Restoring cgroup %q", undoPath) // Writing the value 0 to a cgroup.procs file causes // the writing process to be moved to the corresponding // cgroup. - cgroups(7). if err := setValue(undoPath, "cgroup.procs", "0"); err != nil { log.Warningf("Error restoring cgroup %q: %v", undoPath, err) } }) defer cu.Clean() // now join the cgroup if err := setValue(c.MakePath(""), "cgroup.procs", "0"); err != nil { return nil, err } return cu.Release(), nil } // CPUQuota returns the CFS CPU quota. func (c *cgroupV2) CPUQuota() (float64, error) { cpuMax, err := getValue(c.MakePath(""), "cpu.max") if err != nil { return -1, err } return parseCPUQuota(cpuMax) } func parseCPUQuota(cpuMax string) (float64, error) { data := strings.SplitN(strings.TrimSpace(cpuMax), " ", 2) if len(data) != 2 { return -1, fmt.Errorf("invalid cpu.max data %q", cpuMax) } // no cpu limit if quota is max if data[0] == "max" { return -1, nil } quota, err := strconv.ParseInt(data[0], 10, 64) if err != nil { return -1, err } period, err := strconv.ParseInt(data[1], 10, 64) if err != nil { return -1, err } if quota <= 0 || period <= 0 { return -1, err } return float64(quota) / float64(period), nil } // CPUUsage returns the total CPU usage of the cgroup. func (c *cgroupV2) CPUUsage() (uint64, error) { cpuStat, err := getValue(c.MakePath(""), "cpu.stat") if err != nil { return 0, err } sc := bufio.NewScanner(strings.NewReader(cpuStat)) for sc.Scan() { key, value, err := parseKeyValue(sc.Text()) if err != nil { return 0, err } if key == "usage_usec" { return value, nil } } return 0, nil } // NumCPU returns the number of CPUs configured in 'cpuset/cpuset.cpus'. func (c *cgroupV2) NumCPU() (int, error) { cpuset, err := getValue(c.MakePath(""), "cpuset.cpus.effective") if err != nil { return 0, err } return countCpuset(strings.TrimSpace(cpuset)) } // MemoryLimit returns the memory limit. func (c *cgroupV2) MemoryLimit() (uint64, error) { limStr, err := getValue(c.MakePath(""), "memory.max") if err != nil { return 0, err } limStr = strings.TrimSpace(limStr) if limStr == "max" { return math.MaxUint64, nil } return strconv.ParseUint(limStr, 10, 64) } // MakePath builds a path to the given controller. func (c *cgroupV2) MakePath(controllerName string) string { return filepath.Join(c.Mountpoint, c.Path) } type controllerv2 interface { controller generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) } type cpu2 struct { mandatory } func (*cpu2) generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) { props := []dbus.Property{} if spec == nil || spec.CPU == nil { return props, nil } cpu := spec.CPU if cpu.Shares != nil { weight := convertCPUSharesToCgroupV2Value(*cpu.Shares) if weight != 0 { props = append(props, newProp("CPUWeight", weight)) } } var ( period uint64 quota int64 ) if cpu.Period != nil { period = *cpu.Period } if cpu.Quota != nil { quota = *cpu.Quota } if period != 0 { props = append(props, newProp("CPUQuotaPeriodUSec", period)) } if quota != 0 || period != 0 { // Corresponds to USEC_INFINITY in systemd. cpuQuotaPerSecUSec := uint64(math.MaxUint64) if quota > 0 { if period == 0 { // Assume the default. period = defaultPeriod } // systemd converts CPUQuotaPerSecUSec (microseconds per CPU second) to // CPUQuota (integer percentage of CPU) internally. This means that if a // fractional percent of CPU is indicated by spec.CPU.Quota, we need to // round up to the nearest 10ms (1% of a second) such that child cgroups // can set the cpu.cfs_quota_us they expect. cpuQuotaPerSecUSec = uint64(quota*1000000) / period if cpuQuotaPerSecUSec%10000 != 0 { cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000 } } props = append(props, newProp("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec)) } return props, nil } func (*cpu2) set(spec *specs.LinuxResources, path string) error { if spec == nil || spec.CPU == nil { return nil } if spec.CPU.Shares != nil { weight := convertCPUSharesToCgroupV2Value(*spec.CPU.Shares) if weight != 0 { if err := setValue(path, "cpu.weight", strconv.FormatUint(weight, 10)); err != nil { return err } } } if spec.CPU.Period != nil || spec.CPU.Quota != nil { v := "max" if spec.CPU.Quota != nil && *spec.CPU.Quota > 0 { v = strconv.FormatInt(*spec.CPU.Quota, 10) } var period uint64 if spec.CPU.Period != nil && *spec.CPU.Period != 0 { period = *spec.CPU.Period } else { period = defaultPeriod } v += " " + strconv.FormatUint(period, 10) if err := setValue(path, "cpu.max", v); err != nil { return err } } return nil } type cpuset2 struct { mandatory } func (*cpuset2) generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) { props := []dbus.Property{} if spec == nil || spec.CPU == nil { return props, nil } cpu := spec.CPU if cpu.Cpus == "" && cpu.Mems == "" { return props, nil } cpus := cpu.Cpus mems := cpu.Mems if cpus != "" { bits, err := RangeToBits(cpus) if err != nil { return nil, fmt.Errorf("%w: cpus=%q conversion error: %v", ErrBadResourceSpec, cpus, err) } props = append(props, newProp("AllowedCPUs", bits)) } if mems != "" { bits, err := RangeToBits(mems) if err != nil { return nil, fmt.Errorf("%w: mems=%q conversion error: %v", ErrBadResourceSpec, mems, err) } props = append(props, newProp("AllowedMemoryNodes", bits)) } return props, nil } func (*cpuset2) set(spec *specs.LinuxResources, path string) error { if spec == nil || spec.CPU == nil { return nil } if spec.CPU.Cpus != "" { if err := setValue(path, "cpuset.cpus", spec.CPU.Cpus); err != nil { return err } } if spec.CPU.Mems != "" { if err := setValue(path, "cpuset.mems", spec.CPU.Mems); err != nil { return err } } return nil } type memory2 struct { mandatory } func (*memory2) generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) { props := []dbus.Property{} if spec == nil || spec.Memory == nil { return props, nil } mem := spec.Memory if mem.Swap != nil { if mem.Limit == nil { return nil, ErrBadResourceSpec } swap, err := convertMemorySwapToCgroupV2Value(*mem.Swap, *mem.Limit) if err != nil { return nil, err } props = append(props, newProp("MemorySwapMax", uint64(swap))) } if mem.Limit != nil { props = append(props, newProp("MemoryMax", uint64(*mem.Limit))) } if mem.Reservation != nil { props = append(props, newProp("MemoryLow", uint64(*mem.Reservation))) } return props, nil } func (*memory2) set(spec *specs.LinuxResources, path string) error { if spec == nil || spec.Memory == nil { return nil } if spec.Memory.Swap != nil { // in cgroup v2, we set memory and swap separately, but the spec specifies // Swap field as memory+swap, so we need memory limit here to be set in // order to get the correct swap value. if spec.Memory.Limit == nil { return errors.New("cgroup: Memory.Swap is set without Memory.Limit") } swap, err := convertMemorySwapToCgroupV2Value(*spec.Memory.Swap, *spec.Memory.Limit) if err != nil { return nil } swapStr := numToStr(swap) // memory and memorySwap set to the same value -- disable swap if swapStr == "" && swap == 0 && *spec.Memory.Swap > 0 { swapStr = "0" } // never write empty string to `memory.swap.max`, it means set to 0. if swapStr != "" { if err := setValue(path, "memory.swap.max", swapStr); err != nil { return err } } } if spec.Memory.Limit != nil { if val := numToStr(*spec.Memory.Limit); val != "" { if err := setValue(path, "memory.max", val); err != nil { return err } } } if spec.Memory.Reservation != nil { if val := numToStr(*spec.Memory.Reservation); val != "" { if err := setValue(path, "memory.low", val); err != nil { return err } } } return nil } type pid2 struct { mandatory } func (*pid2) generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) { if spec != nil && spec.Pids != nil { return []dbus.Property{newProp("TasksMax", uint64(spec.Pids.Limit))}, nil } return []dbus.Property{}, nil } func (*pid2) set(spec *specs.LinuxResources, path string) error { if spec == nil || spec.Pids == nil { return nil } if val := numToStr(spec.Pids.Limit); val != "" { return setValue(path, "pids.max", val) } return nil } type io2 struct { mandatory } func (*io2) generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) { props := []dbus.Property{} if spec == nil || spec.BlockIO == nil { return props, nil } io := spec.BlockIO if io != nil { if io.Weight != nil && *io.Weight != 0 { ioWeight := convertBlkIOToIOWeightValue(*io.Weight) props = append(props, newProp("IOWeight", ioWeight)) } for _, dev := range io.WeightDevice { val := fmt.Sprintf("%d:%d %d", dev.Major, dev.Minor, *dev.Weight) props = append(props, newProp("IODeviceWeight", val)) } props = addIOProps(props, "IOReadBandwidthMax", io.ThrottleReadBpsDevice) props = addIOProps(props, "IOWriteBandwidthMax", io.ThrottleWriteBpsDevice) props = addIOProps(props, "IOReadIOPSMax", io.ThrottleReadIOPSDevice) props = addIOProps(props, "IOWriteIOPSMax", io.ThrottleWriteIOPSDevice) } return props, nil } func (*io2) set(spec *specs.LinuxResources, path string) error { if spec == nil || spec.BlockIO == nil { return nil } blkio := spec.BlockIO var ( err error bfq *os.File ) // If BFQ IO scheduler is available, use it. if blkio.Weight != nil || len(blkio.WeightDevice) > 0 { bfq, err = os.Open(filepath.Join(path, "io.bfq.weight")) if err == nil { defer bfq.Close() } else if !os.IsNotExist(err) { return err } } if blkio.Weight != nil && *blkio.Weight != 0 { if bfq != nil { if _, err := bfq.WriteString(strconv.FormatUint(uint64(*blkio.Weight), 10)); err != nil { return err } } else { // bfq io scheduler is not available, fallback to io.weight with // a conversion scheme ioWeight := convertBlkIOToIOWeightValue(*blkio.Weight) if err = setValue(path, "io.weight", strconv.FormatUint(ioWeight, 10)); err != nil { return err } } } if bfqDeviceWeightSupported(bfq) { // ignore leaf weight, does not apply to cgroupv2 for _, dev := range blkio.WeightDevice { if dev.Weight != nil { val := fmt.Sprintf("%d:%d %d\n", dev.Major, dev.Minor, *dev.Weight) if _, err := bfq.WriteString(val); err != nil { return fmt.Errorf("failed to set device weight %q: %w", val, err) } } } } if err := setThrottle2(path, "rbps", blkio.ThrottleReadBpsDevice); err != nil { return err } if err := setThrottle2(path, "wbps", blkio.ThrottleWriteBpsDevice); err != nil { return err } if err := setThrottle2(path, "riops", blkio.ThrottleReadIOPSDevice); err != nil { return err } if err := setThrottle2(path, "wiops", blkio.ThrottleWriteIOPSDevice); err != nil { return err } return nil } func setThrottle2(path, name string, devs []specs.LinuxThrottleDevice) error { for _, dev := range devs { val := fmt.Sprintf("%d:%d %s=%d", dev.Major, dev.Minor, name, dev.Rate) if err := setValue(path, "io.max", val); err != nil { return err } } return nil } type hugeTLB2 struct { } func (*hugeTLB2) optional() bool { return true } func (*hugeTLB2) skip(spec *specs.LinuxResources) error { if spec != nil && len(spec.HugepageLimits) > 0 { return fmt.Errorf("HugepageLimits set but hugetlb cgroup controller not found") } return nil } func (*hugeTLB2) generateProperties(spec *specs.LinuxResources) ([]dbus.Property, error) { return nil, nil } func (*hugeTLB2) set(spec *specs.LinuxResources, path string) error { if spec == nil { return nil } for _, limit := range spec.HugepageLimits { name := fmt.Sprintf("hugetlb.%s.limit_in_bytes", limit.Pagesize) val := strconv.FormatUint(limit.Limit, 10) if err := setValue(path, name, val); err != nil { return err } } return nil } // Since the OCI spec is designed for cgroup v1, in some cases // there is need to convert from the cgroup v1 configuration to cgroup v2 // the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142) // convert from [2-262144] to [1-10000] // 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)" func convertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 { if cpuShares == 0 { return 0 } return (1 + ((cpuShares-2)*9999)/262142) } // convertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec // for use by cgroup v2 drivers. A conversion is needed since // Resources.MemorySwap is defined as memory+swap combined, while in cgroup v2 // swap is a separate value. func convertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) { // for compatibility with cgroup1 controller, set swap to unlimited in // case the memory is set to unlimited, and swap is not explicitly set, // treating the request as "set both memory and swap to unlimited". if memory == -1 && memorySwap == 0 { return -1, nil } if memorySwap == -1 || memorySwap == 0 { // -1 is "max", 0 is "unset", so treat as is. return memorySwap, nil } // sanity checks if memory == 0 || memory == -1 { return 0, errors.New("unable to set swap limit without memory limit") } if memory < 0 { return 0, fmt.Errorf("invalid memory value: %d", memory) } if memorySwap < memory { return 0, errors.New("memory+swap limit should be >= memory limit") } return memorySwap - memory, nil } // Since the OCI spec is designed for cgroup v1, in some cases // there is need to convert from the cgroup v1 configuration to cgroup v2 // the formula for BlkIOWeight to IOWeight is y = (1 + (x - 10) * 9999 / 990) // convert linearly from [10-1000] to [1-10000] func convertBlkIOToIOWeightValue(blkIoWeight uint16) uint64 { if blkIoWeight == 0 { return 0 } return 1 + (uint64(blkIoWeight)-10)*9999/990 } // numToStr converts an int64 value to a string for writing to a // cgroupv2 files with .min, .max, .low, or .high suffix. // The value of -1 is converted to "max" for cgroupv1 compatibility // (which used to write -1 to remove the limit). func numToStr(value int64) (ret string) { switch { case value == 0: ret = "" case value == -1: ret = "max" default: ret = strconv.FormatInt(value, 10) } return ret } // bfqDeviceWeightSupported checks for per-device BFQ weight support (added // in kernel v5.4, commit 795fe54c2a8) by reading from "io.bfq.weight". func bfqDeviceWeightSupported(bfq *os.File) bool { if bfq == nil { return false } if _, err := bfq.Seek(0, 0); err != nil { return false } buf := make([]byte, 32) if _, err := bfq.Read(buf); err != nil { return false } // If only a single number (default weight) if read back, we have older // kernel. _, err := strconv.ParseInt(string(bytes.TrimSpace(buf)), 10, 64) return err != nil } // parseKeyValue parses a space-separated "name value" kind of cgroup // parameter and returns its key as a string, and its value as uint64 // (ParseUint is used to convert the value). For example, // "io_service_bytes 1234" will be returned as "io_service_bytes", 1234. func parseKeyValue(t string) (string, uint64, error) { parts := strings.SplitN(t, " ", 3) if len(parts) != 2 { return "", 0, fmt.Errorf("line %q is not in key value format", t) } value, err := parseUint(parts[1], 10, 64) if err != nil { return "", 0, err } return parts[0], value, nil } // parseUint converts a string to an uint64 integer. // Negative values are returned at zero as, due to kernel bugs, // some of the memory cgroup stats can be negative. func parseUint(s string, base, bitSize int) (uint64, error) { value, err := strconv.ParseUint(s, base, bitSize) if err != nil { intValue, intErr := strconv.ParseInt(s, base, bitSize) // 1. Handle negative values greater than MinInt64 (and) // 2. Handle negative values lesser than MinInt64 if intErr == nil && intValue < 0 { return 0, nil } else if errors.Is(intErr, strconv.ErrRange) && intValue < 0 { return 0, nil } return value, err } return value, nil } // RangeToBits converts a text representation of a CPU mask (as written to // or read from cgroups' cpuset.* files, e.g. "1,3-5") to a slice of bytes // with the corresponding bits set (as consumed by systemd over dbus as // AllowedCPUs/AllowedMemoryNodes unit property value). // Copied from runc. func RangeToBits(str string) ([]byte, error) { bits := &big.Int{} for _, r := range strings.Split(str, ",") { // allow extra spaces around r = strings.TrimSpace(r) // allow empty elements (extra commas) if r == "" { continue } ranges := strings.SplitN(r, "-", 2) if len(ranges) > 1 { start, err := strconv.ParseUint(ranges[0], 10, 32) if err != nil { return nil, err } end, err := strconv.ParseUint(ranges[1], 10, 32) if err != nil { return nil, err } if start > end { return nil, errors.New("invalid range: " + r) } for i := start; i <= end; i++ { bits.SetBit(bits, int(i), 1) } } else { val, err := strconv.ParseUint(ranges[0], 10, 32) if err != nil { return nil, err } bits.SetBit(bits, int(val), 1) } } ret := bits.Bytes() if len(ret) == 0 { // do not allow empty values return nil, errors.New("empty value") } return ret, nil }
package semanticanalyzer import ( "compiler/src/types" "errors" "log" ) var globalSymbolTable = map[string]types.STEntry{} var builtinSymbolTable = map[string]types.STEntry{} func SemanticAnalysis(node *types.ParseNode, parseGlobalSymbolTable map[string]types.STEntry, parseBuiltinSymbolTable map[string]types.STEntry) { globalSymbolTable = parseGlobalSymbolTable builtinSymbolTable = parseGlobalSymbolTable err := CheckNode(node, nil, types.STEntry{}) if err != nil { log.Fatal(err) } } func CheckNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry, stEntry types.STEntry) error { var localST map[string]types.STEntry localST = localSymbolTable if node.Production == types.ProcedureDeclarationProd { localST = node.ProcLocalSymbolTable } var entry types.STEntry entry = stEntry var identifier types.ParseNode if node.Production == types.ProcedureDeclarationProd { header := node.ChildNodes[0] // if header.ChildNodes[0].TerminalToken.TokenType == types.GlobalKeyword { // identifier = node.ChildNodes[2] // } else { // identifier = node.ChildNodes[1] // } identifier = header.ChildNodes[1] entry = localST[identifier.TerminalToken.StringValue] } for _, child := range node.ChildNodes { CheckNode(&child, localST, entry) } if node.Production == types.AssignmentStatementProd { // for rule 14 return CheckAssignmentStatementNode(node, localST) } if node.Production == types.LoopStatementProd { // for rule 15 // check assignment statement return CheckLoopStatementNode(node, localST, entry) } if node.Production == types.IfStatementProd { // for rule 15 // check assignment statement return CheckIfStatementNode(node, localST, entry) } if node.Production == types.ReturnStatementProd { // for rule 15 // check assignment statement return CheckReturnStatementNode(node, localST, entry) } return nil } func CheckProcedureCallNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Procedure call argument types do not match procedure declaration parameter types" identifier := node.ChildNodes[0].TerminalToken.StringValue var stEntry types.STEntry stEntryLocal, existsLocal := localSymbolTable[identifier] stEntryGlobal, existsGlobal := globalSymbolTable[identifier] stEntryBuiltin, existsBuiltin := builtinSymbolTable[identifier] if existsLocal { stEntry = stEntryLocal } else if existsGlobal { stEntry = stEntryGlobal } else if existsBuiltin { stEntry = stEntryBuiltin } if node.ChildNodes[2].Production == types.ArgumentListProd { err := CheckArgumentListNode(&node.ChildNodes[3], localSymbolTable, stEntry) if err != nil { return types.STNone, errors.New(err.Error()) } // DO SOEMTHING HERE? } else if node.ChildNodes[2].TerminalToken.TokenType == types.CloseRoundBracket { if len(stEntry.ProcedureArgTypes) != 0 { return types.STNone, errors.New(errString) } } return stEntry.ProcedureReturnType, nil } func CheckArgumentListNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry, stEntry types.STEntry) error { errString := "Semantic Analysis Error: Procedure call argument types do not match procedure declaration parameter types" argListSTTypes := []types.STType{} for _, child := range node.ChildNodes { argType, err := CheckExpressionNode(&child, localSymbolTable) if err != nil { return errors.New(err.Error()) } argListSTTypes = append(argListSTTypes, argType) } if len(argListSTTypes) != len(stEntry.ProcedureArgTypes) { return errors.New(errString) } for i := range argListSTTypes { if argListSTTypes[i] != stEntry.ProcedureArgTypes[i] { return errors.New(errString) } } return nil } func CheckAssignmentStatementNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) error { errString := "Semantic Analysis Error: Expression type is not compatible with destination type" destSTType, err := CheckDestinationNode(&node.ChildNodes[0], localSymbolTable) if err != nil { return errors.New(err.Error()) } exprSTType, err := CheckExpressionNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return errors.New(err.Error()) } if exprSTType != destSTType { if !(exprSTType == types.STVarBool && destSTType == types.STVarInteger) && !(exprSTType == types.STVarInteger && destSTType == types.STVarBool) && !(exprSTType == types.STVarInteger && destSTType == types.STVarFloat) && !(exprSTType == types.STVarFloat && destSTType == types.STVarInteger) { return errors.New(errString) } } return nil } func CheckDestinationNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Index expression does not evaluate to an integer" identifier := node.ChildNodes[0].TerminalToken.StringValue var stEntry types.STEntry stEntryLocal, existsLocal := localSymbolTable[identifier] stEntryGlobal, existsGlobal := globalSymbolTable[identifier] if existsLocal { stEntry = stEntryLocal } else if existsGlobal { stEntry = stEntryGlobal } if len(node.ChildNodes) > 1 { exprSTType, err := CheckExpressionNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if exprSTType != types.STVarInteger { return types.STNone, errors.New(errString) } if stEntry.EntryType == types.STVarIntegerArray { return types.STVarInteger, nil } else if stEntry.EntryType == types.STVarFloatArray { return types.STVarFloat, nil } else if stEntry.EntryType == types.STVarStringArray { return types.STVarString, nil } else if stEntry.EntryType == types.STVarBoolArray { return types.STVarBool, nil } } return stEntry.EntryType, nil } func CheckNameNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Index expression does not evaluate to an integer" identifier := node.ChildNodes[0].TerminalToken.StringValue var stEntry types.STEntry stEntryLocal, existsLocal := localSymbolTable[identifier] stEntryGlobal, existsGlobal := globalSymbolTable[identifier] if existsLocal { stEntry = stEntryLocal } else if existsGlobal { stEntry = stEntryGlobal } if len(node.ChildNodes) > 1 { exprSTType, err := CheckExpressionNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if exprSTType != types.STVarInteger { return types.STNone, errors.New(errString) } if stEntry.EntryType == types.STVarIntegerArray { return types.STVarInteger, nil } else if stEntry.EntryType == types.STVarFloatArray { return types.STVarFloat, nil } else if stEntry.EntryType == types.STVarStringArray { return types.STVarString, nil } else if stEntry.EntryType == types.STVarBoolArray { return types.STVarBool, nil } } return stEntry.EntryType, nil } func CheckExpressionNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" // hasNot := false aop_index := 0 exp_index := 1 if node.ChildNodes[0].TerminalToken.TokenType == types.NotOperator { // hasNot = true aop_index = 1 exp_index = 2 } stType, err := CheckArithOpNode(&node.ChildNodes[aop_index], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 1 { termPrimeSTType, err := CheckExpressionPrimeNode(&node.ChildNodes[exp_index], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == termPrimeSTType && (stType == types.STVarInteger || stType == types.STVarBool) { return stType, nil } else { return types.STNone, errors.New(errString) } } return stType, nil } func CheckExpressionPrimeNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" // hasNot := false aop_index := 0 exp_index := 1 if node.ChildNodes[0].TerminalToken.TokenType == types.NotOperator { // hasNot = true aop_index = 1 exp_index = 2 } stType, err := CheckArithOpNode(&node.ChildNodes[aop_index], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 1 { termPrimeSTType, err := CheckExpressionPrimeNode(&node.ChildNodes[exp_index], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == termPrimeSTType && (stType == types.STVarInteger || stType == types.STVarBool) { return stType, nil } else { return types.STNone, errors.New(errString) } } if stType != types.STVarInteger && stType != types.STVarBool { return types.STNone, errors.New(errString) } return stType, nil } func CheckArithOpNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" stType, err := CheckRelationNode(&node.ChildNodes[0], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 1 { termPrimeSTType, err := CheckArithOpPrimeNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == termPrimeSTType && (stType == types.STVarInteger || stType == types.STVarFloat) { return stType, nil } else if stType == types.STVarInteger && termPrimeSTType == types.STVarFloat { return termPrimeSTType, nil } else if stType == types.STVarFloat && termPrimeSTType == types.STVarInteger { return stType, nil } else { return types.STNone, errors.New(errString) } } return stType, nil } func CheckArithOpPrimeNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" stType, err := CheckFactorNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 2 { termPrimeSTType, err := CheckTermPrimeNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == termPrimeSTType && (stType == types.STVarInteger || stType == types.STVarFloat) { return stType, nil } else if stType == types.STVarInteger && termPrimeSTType == types.STVarFloat { return termPrimeSTType, nil } else if stType == types.STVarFloat && termPrimeSTType == types.STVarInteger { return stType, nil } else { return types.STNone, errors.New(errString) } } if stType != types.STVarInteger && stType != types.STVarFloat { return types.STNone, errors.New(errString) } return stType, nil } func CheckRelationNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" stType, err := CheckTermNode(&node.ChildNodes[0], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 1 { relPrimeSTType, err := CheckRelationPrimeNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == relPrimeSTType && (stType == types.STVarInteger || stType == types.STVarFloat || stType == types.STVarBool) { return types.STVarBool, nil } else if stType == types.STVarBool && relPrimeSTType == types.STVarInteger { return stType, nil } else if stType == types.STVarInteger && relPrimeSTType == types.STVarBool { return relPrimeSTType, nil } else if stType == types.STVarFloat && relPrimeSTType == types.STVarFloat { return types.STVarBool, nil } else if stType == types.STVarString && relPrimeSTType == types.STVarString { if node.ChildNodes[1].ChildNodes[0].TerminalToken.TokenType == types.EqualOperator || node.ChildNodes[1].ChildNodes[0].TerminalToken.TokenType == types.NotEqualOperator { return types.STVarBool, nil } else { return types.STNone, errors.New(errString) } } else { return types.STNone, errors.New(errString) } } return stType, nil } func CheckRelationPrimeNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" stType, err := CheckTermNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 2 { relPrimeSTType, err := CheckRelationPrimeNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == relPrimeSTType && (stType == types.STVarInteger || stType == types.STVarFloat || stType == types.STVarBool) { return stType, nil } else if stType == types.STVarBool && relPrimeSTType == types.STVarInteger { return stType, nil } else if stType == types.STVarInteger && relPrimeSTType == types.STVarBool { return relPrimeSTType, nil } else if stType == types.STVarFloat && relPrimeSTType == types.STVarFloat { return stType, nil } else if stType == types.STVarString && relPrimeSTType == types.STVarString { if node.ChildNodes[1].ChildNodes[0].TerminalToken.TokenType == types.EqualOperator || node.ChildNodes[1].ChildNodes[0].TerminalToken.TokenType == types.NotEqualOperator { return types.STVarBool, nil } else { return types.STNone, errors.New(errString) } } else { return types.STNone, errors.New(errString) } } if stType != types.STVarInteger && stType != types.STVarFloat && stType != types.STVarBool && stType != types.STVarString { return types.STNone, errors.New(errString) } return stType, nil } func CheckTermNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" stType, err := CheckFactorNode(&node.ChildNodes[0], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 1 { termPrimeSTType, err := CheckTermPrimeNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == termPrimeSTType && (stType == types.STVarInteger || stType == types.STVarFloat) { return stType, nil } else if stType == types.STVarInteger && termPrimeSTType == types.STVarFloat { return termPrimeSTType, nil } else if stType == types.STVarFloat && termPrimeSTType == types.STVarInteger { return stType, nil } else { return types.STNone, errors.New(errString) } } return stType, nil } func CheckTermPrimeNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Incompatible types" stType, err := CheckFactorNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if len(node.ChildNodes) > 2 { termPrimeSTType, err := CheckTermPrimeNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return types.STNone, errors.New(err.Error()) } if stType == termPrimeSTType && (stType == types.STVarInteger || stType == types.STVarFloat) { return stType, nil } else if stType == types.STVarInteger && termPrimeSTType == types.STVarFloat { return termPrimeSTType, nil } else if stType == types.STVarFloat && termPrimeSTType == types.STVarInteger { return stType, nil } else { return types.STNone, errors.New(errString) } } if stType != types.STVarInteger && stType != types.STVarFloat { return types.STNone, errors.New(errString) } return stType, nil } func CheckFactorNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry) (types.STType, error) { errString := "Semantic Analysis Error: Unknown factor type" if node.ChildNodes[0].TerminalToken.TokenType == types.SubtractionOperator { if node.ChildNodes[1].Production == types.NameProd { return CheckNameNode(&node.ChildNodes[1], localSymbolTable) } else if node.ChildNodes[1].Production == types.NumberProd { if node.ChildNodes[1].TerminalToken.TokenType == types.FloatToken { return types.STVarFloat, nil } else if node.ChildNodes[1].TerminalToken.TokenType == types.IntegerToken { return types.STVarInteger, nil } } } else if node.ChildNodes[0].TerminalToken.TokenType == types.OpenRoundBracket { return CheckExpressionNode(&node.ChildNodes[1], localSymbolTable) } else if node.ChildNodes[0].Production == types.ProcedureCallProd { return CheckProcedureCallNode(&node.ChildNodes[0], localSymbolTable) } else if node.ChildNodes[0].Production == types.NameProd { return CheckNameNode(&node.ChildNodes[0], localSymbolTable) } else if node.ChildNodes[0].Production == types.NumberProd { if node.ChildNodes[0].TerminalToken.TokenType == types.FloatToken { return types.STVarFloat, nil } else if node.ChildNodes[0].TerminalToken.TokenType == types.IntegerToken { return types.STVarInteger, nil } } else if node.ChildNodes[0].Production == types.StringProd { return types.STVarString, nil } else if node.ChildNodes[0].TerminalToken.TokenType == types.TrueKeyword || node.ChildNodes[0].TerminalToken.TokenType == types.FalseKeyword { return types.STVarBool, nil } return types.STNone, errors.New(errString) } func CheckLoopStatementNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry, stEntry types.STEntry) error { errString := "Semantic Analysis Error: Loop expression does not evaluate to a boolean value" err := CheckAssignmentStatementNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return errors.New(err.Error()) } stType, err := CheckExpressionNode(&node.ChildNodes[4], localSymbolTable) if err != nil { return errors.New(err.Error()) } if stType != types.STVarBool || stType != types.STVarInteger { return errors.New(errString) } for _, child := range node.ChildNodes[6:] { if child.TerminalToken.TokenType == types.EndKeyword { break } err = CheckNode(&child, localSymbolTable, stEntry) if err != nil { return errors.New(err.Error()) } } return nil } func CheckIfStatementNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry, stEntry types.STEntry) error { errString := "Semantic Analysis Error: If expression does not evaluate to a boolean value" stType, err := CheckExpressionNode(&node.ChildNodes[2], localSymbolTable) if err != nil { return errors.New(err.Error()) } if stType != types.STVarBool || stType != types.STVarInteger { return errors.New(errString) } for _, child := range node.ChildNodes[5:] { if child.TerminalToken.TokenType == types.EndKeyword { break } err = CheckNode(&child, localSymbolTable, stEntry) if err != nil { return errors.New(err.Error()) } } return nil } func CheckReturnStatementNode(node *types.ParseNode, localSymbolTable map[string]types.STEntry, stEntry types.STEntry) error { errString := "Semantic Analysis Error: Returned value does not match procedure declaration return type" stType, err := CheckExpressionNode(&node.ChildNodes[1], localSymbolTable) if err != nil { return errors.New(err.Error()) } if stType != stEntry.EntryType { return errors.New(errString) } return nil }
package main import ( "testing" ) func TestTarget(t *testing.T) { t.Run("test1", func(t *testing.T) { // t.Parallel() if target() != true { t.Fatalf("ng detara dou naru no") } }) }
package cis var ( network1 = Recommendation{ Name: "Ensure the default network does not exist in a project", CisID: "3.1", Scored: true, Level: 1, } network2 = Recommendation{ Name: "Ensure legacy networks does not exists for a project", CisID: "3.2", Scored: true, Level: 1, } network3 = Recommendation{ Name: "Ensure that DNSSEC is enabled for Cloud DNS", CisID: "3.3", Scored: false, Level: 1, } network4 = Recommendation{ Name: "Ensure that RSASHA1 is not used for key-signing key in Cloud DNS DNSSEC", CisID: "3.4", Scored: false, Level: 1, } network5 = Recommendation{ Name: "Ensure that RSASHA1 is not used for zone-signing key in Cloud DNS DNSSEC", CisID: "3.5", Scored: false, Level: 1, } network6 = Recommendation{ Name: "Ensure that SSH access is restricted from the internet", CisID: "3.6", Scored: true, Level: 2, } network7 = Recommendation{ Name: "Ensure that RDP access is restricted from the internet", CisID: "3.7", Scored: true, Level: 2, } network8 = Recommendation{ Name: "Ensure Private Google Access is enabled for all subnetwork in VPC Network", CisID: "3.8", Scored: true, Level: 2, } network9 = Recommendation{ Name: "Ensure VPC Flow logs is enabled for every subnet in VPC Network", CisID: "3.9", Scored: true, Level: 1, } )
package main import "fmt" //给你一个整数数组 nums,请你将该数组升序排列。 /////////////////////// 冒泡排序法 ///////////////////////// func sortArray(nums []int) []int { for i := 0; i < len(nums); i++ { for j := i + 1; j < len(nums); j++ { if nums[i] > nums[j] { nums[i], nums[j] = nums[j], nums[i] } } } fmt.Println(nums) return nums } ///////////////////// 桶排序法 /////////////////////////////// func sortArray2(nums []int) []int { s := [101]int{} for i := 0; i < len(nums); i++ { s[nums[i]+50]++ } idx := 0 fmt.Println(s) for i := 0; i < 101; i++ { for s[i] > 0 { nums[idx] = i - 50 idx++ s[i]-- } } return nums } ///////////////////// 快速排序法 /////////////////////////////// func sortArray3(nums []int) []int { if len(nums) <= 1 { return nums } flag := nums[0] left, right := 0, len(nums)-1 for i := 1; i <= right; { if nums[i] > flag { nums[i], nums[right] = nums[right], nums[i] right-- fmt.Println(nums) } else { nums[i], nums[left] = nums[left], nums[i] i++ left++ fmt.Println(nums) } } sortArray3(nums[:left]) sortArray3(nums[left+1:]) return nums } func main() { nums := []int{5, 1, 6, 2, 0, 0} fmt.Println(sortArray3(nums)) }
package stack import "testing" //对于每一个高度 求出它的左右边界 即为面积 //如果有两根柱子 j0 j1 如果j1<j0 j0会被j1挡住 //单调栈 柱状图中最大的矩形 func largestRectangleArea(heights []int) int { n := len(heights) left, right := make([]int, n), make([]int, n) monoStack := []int{} //从左往右 找出每一个位置的左边界 index for i := 0; i < n; i++ { //如果元素高度始终小于栈顶元素 入栈 for len(monoStack) > 0 && heights[monoStack[len(monoStack)-1]] >= heights[i] { monoStack = monoStack[:len(monoStack)-1] } //为空时 设置哨兵 if len(monoStack) == 0 { left[i] = -1 } else { left[i] = monoStack[len(monoStack)-1] } monoStack = append(monoStack, i) } //从右向左 找到每一个位置的右边界 index for i := n - 1; i >= 0; i-- { for len(monoStack) > 0 && heights[monoStack[len(monoStack)-1]] >= heights[i] { monoStack = monoStack[:len(monoStack)-1] } if len(monoStack) == 0 { right[i] = n } else { right[i] = monoStack[len(monoStack)-1] } monoStack = append(monoStack, i) } ans := 0 for i := 0; i < n; i++ { ans = max(ans, (right[i]-left[i]-1)*heights[i]) } return ans } func max(x, y int) int { if x > y { return x } return y } func Test_84(t *testing.T) { t.Log(largestRectangleArea([]int{2, 1, 5, 6, 2, 3})) //10 t.Log(largestRectangleArea([]int{2, 0, 2})) //2 t.Log(largestRectangleArea([]int{2, 1, 2})) //3 }
package pool import ( "time" "sync" "container/list" ) type PoolObject struct {} func (p *PoolObject) LongOperation() { time.Sleep(time.Millisecond * 100) } type Pool struct { sync.Mutex available *list.List unavailable *list.List } func NewPool(total int) *Pool { l := list.New() for i := 0; i < total; i++ { po := &PoolObject{} l.PushBack(po) } return &Pool{ available: l, unavailable: list.New(), } } func (p *Pool) CallLongOperation() { var po *PoolObject var ele *list.Element for { p.Lock() if p.available.Len() == 0 { p.Unlock() time.Sleep(time.Millisecond * 50) //wait for a poolobject to be available } else { back := p.available.Back() po = p.available.Remove(back).(*PoolObject) ele = p.unavailable.PushBack(po) p.Unlock() break } } go func() { po.LongOperation() p.Lock() p.unavailable.Remove(ele) p.available.PushBack(po) p.Unlock() }() }
package handler import ( "log" "net" "puck-server/db-server/dbservice" "puck-server/db-server/user" "puck-server/match-server/convert" "regexp" ) func HandleSetNickname(buf []byte, conn net.Conn, dbService dbservice.Db) { log.Printf("SETNICKNAME received") // Parse recvPacket, err := convert.ParseSetNickname(buf) if err != nil { log.Printf("HandleSetNickname fail: %v", err.Error()) return } //userDb, err := user.LoadUserDb(convert.IdCuintToByteArray(recvPacket.Id)) var userLeaseDb user.LeaseDb userId := convert.IdCuintToByteArray(recvPacket.Id) err = dbService.Lease(&userId, &userLeaseDb) if err != nil { log.Printf("user db load failed: %v", err.Error()) } else { var newNickname string convert.CCharArrayToGoString(&recvPacket.Nickname, &newNickname) newNicknameByteLen := len([]byte(newNickname)) if newNicknameByteLen >= convert.LWNICKNAMEMAXLEN-4 { queueOkBuf := convert.Packet2Buf(convert.NewLwpSetNicknameResult(recvPacket, convert.LWSETNICKNAMERESULTTOOLONG)) conn.Write(queueOkBuf) log.Printf("Nickname '%v' (byte len %v) too long.", newNickname, newNicknameByteLen) return } else if newNicknameByteLen < 4 { queueOkBuf := convert.Packet2Buf(convert.NewLwpSetNicknameResult(recvPacket, convert.LWSETNICKNAMERESULTTOOSHORT)) conn.Write(queueOkBuf) log.Printf("Nickname '%v' (byte len %v) too short.", newNickname, newNicknameByteLen) return } else { nicknameRegexp := regexp.MustCompile("^[A-Za-z0-9가-힣]+$") if nicknameRegexp.MatchString(newNickname) == false { queueOkBuf := convert.Packet2Buf(convert.NewLwpSetNicknameResult(recvPacket, convert.LWSETNICKNAMERESULTTOONOTALLOWED)) conn.Write(queueOkBuf) log.Printf("Nickname '%v' (byte len %v) contains not allowed characters.", newNickname, newNicknameByteLen) return } } // good to go oldNickname := userLeaseDb.Db.Nickname userLeaseDb.Db.Nickname = newNickname //user.WriteUserDb(userDb) var writeReply int err = dbService.Write(&userLeaseDb, &writeReply) if err != nil { queueOkBuf := convert.Packet2Buf(convert.NewLwpSetNicknameResult(recvPacket, convert.LWSETNICKNAMERESULTINTERNALERROR)) conn.Write(queueOkBuf) log.Printf("DB service Write failed: %v", err) } else { queueOkBuf := convert.Packet2Buf(convert.NewLwpSetNicknameResult(recvPacket, convert.LWSETNICKNAMERESULTOK)) conn.Write(queueOkBuf) log.Printf("Nickname '%v' changed to '%v'", oldNickname, userLeaseDb.Db.Nickname) } } }
package swagger import ( "github.com/go-openapi/spec" "strings" ) func NewSwagger() *Swagger { swagger := new(spec.Swagger) swagger.Swagger = "2.0" if swagger.Paths == nil { swagger.Paths = new(spec.Paths) } if swagger.Definitions == nil { swagger.Definitions = make(map[string]spec.Schema) } if swagger.Responses == nil { swagger.Responses = make(map[string]spec.Response) } sg := new(Swagger) sg.Swagger = swagger return sg } type Swagger struct { *spec.Swagger } func (swagger *Swagger) AddOperation(method string, path string, op *spec.Operation) { if swagger.Paths.Paths == nil { swagger.Paths.Paths = make(map[string]spec.PathItem) } paths := swagger.Paths pathObj := paths.Paths[path] switch strings.ToUpper(method) { case "GET": pathObj.Get = op case "POST": pathObj.Post = op case "PUT": pathObj.Put = op case "PATCH": pathObj.Patch = op case "HEAD": pathObj.Head = op case "DELETE": pathObj.Delete = op case "OPTIONS": pathObj.Options = op } paths.Paths[path] = pathObj } func (swagger *Swagger) AddDefinition(name string, schema spec.Schema) (*spec.Schema, bool) { s := spec.RefProperty("#/definitions/" + name) if _, ok := swagger.Definitions[name]; !ok { swagger.Definitions[name] = schema return s, true } return s, false }
package partition import ( "gpartition/common" ) // PartitionType defines different partition type PartitionType int8 const ( //BdgPartitionType type BdgPartitionType PartitionType = iota //ShpPartitionType type ShpPartitionType //TShpPartitionType type TShpPartitionType ) // Config all type of config type Config struct { PartitionType SrcNodesNum uint64 StepNum uint64 VertexSize uint64 BucketSize uint64 Prob float64 Graph *common.Graph }
//go:generate mockgen -source=fs/fs.go -package fs -destination=fs/fs_mock.go package infra
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package iw contains utility functions to wrap around the iw program. package iw import ( "chromiumos/tast/common/network/iw" "chromiumos/tast/local/network/cmd" ) // Runner is an alias for common iw Runner but only for local execution. type Runner = iw.Runner // NewLocalRunner creates an iw runner for local execution. func NewLocalRunner() *Runner { return iw.NewRunner(&cmd.LocalCmdRunner{}) }
package activity import ( "context" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/bson" "errors" "fmt" "github.com/go-kit/kit/log" ) var RepoErr = errors.New("Unable to handle Repo Request") const ( database = "buddyApp" collection = "sys_activities" ) type repo struct { db *mongo.Client logger log.Logger } func NewRepo(db *mongo.Client, logger log.Logger) Repository { return &repo{ db: db, logger: log.With(logger, "repo", "mongodb"), } } func (repo *repo) CreateActivity(ctx context.Context, activity Activity) (string, bool, error) { // if activity.Name == "" || activity.Location == "" { // return "Some data is missing", false, RepoErr // } // fmt.Println(hexByte, "hexByte") // blah := string(hexByte) // activity.Owner = hexByte collection := repo.db.Database(database).Collection(collection) // pwd := helpers.HashAndSalt([]byte(user.Password)) // user.Password = pwd fmt.Println("activity", activity) insertResult, err := collection.InsertOne(context.TODO(), activity) if err != nil { return "Something went wrong", false, err } fmt.Println("Inserted a Single Document: ", insertResult.InsertedID) return "Activity created", true, nil } func (repo *repo) UpdateActivity(ctx context.Context, id string, activity Activity) (string, error) { if activity.Name == "" || activity.Location == "" { return "Some info is missing", nil } filter := bson.M{ "_id": id, } update := bson.M{"$set": bson.M{"name": activity.Name}} collection := repo.db.Database(database).Collection(collection) result, err := collection.UpdateOne( ctx, filter, update, ) if err != nil { // Email not found // RepoErr difficult to handle return "Error", nil } if result.MatchedCount == 1 { return "Activity Updated", nil } else { return "Activity Not Found", nil } // return "", nil } func (repo *repo) GetActivity(ctx context.Context, id string) (string, string, error) { var activity Activity fmt.Println("id", id) filter := bson.M{ "id": id, } collection := repo.db.Database(database).Collection(collection) err := collection.FindOne(ctx, filter).Decode(&activity) if err != nil { // Email not found // RepoErr difficult to handle // return "", "User not found", err return "", "Activity not found", nil } return activity.Name, "Activity found", nil } func (repo *repo) DeleteActivity(ctx context.Context, id string) (string, error) { filter := bson.M{ "id": id, } collection := repo.db.Database(database).Collection(collection) res, err := collection.DeleteOne(ctx, filter) if err != nil { // Email not found return "", RepoErr } fmt.Println("res", res) return "Success", nil }
package enums //ResponseStatus codes type ResponseStatus int type status struct { SUCCESS ResponseStatus ERROR ResponseStatus } //Status AppResonseStatus codes var Status = &status{ SUCCESS: 204, ERROR: 402, }
package model import ( "database/sql" ) // Message はメッセージの構造体です type Message struct { ID int64 `json:"id"` Body string `json:"body"` // 1-1. ユーザー名を表示しよう SenderName string `json:"sender_name"` } // MessagesAll は全てのメッセージを返します func MessagesAll(db *sql.DB) ([]*Message, error) { // 1-1. ユーザー名を表示しよう rows, err := db.Query(`select id, body, sender_name from message`) if err != nil { return nil, err } defer rows.Close() var ms []*Message for rows.Next() { m := &Message{} // 1-1. ユーザー名を表示しよう if err := rows.Scan(&m.ID, &m.Body, &m.SenderName); err != nil { return nil, err } ms = append(ms, m) } if err := rows.Err(); err != nil { return nil, err } return ms, nil } // MessageByID は指定されたIDのメッセージを1つ返します func MessageByID(db *sql.DB, id string) (*Message, error) { m := &Message{} // 1-1. ユーザー名を表示しよう if err := db.QueryRow(`select id, body from message where id = ?`, id).Scan(&m.ID, &m.Body); err != nil { return nil, err } return m, nil } // Insert はmessageテーブルに新規データを1件追加します func (m *Message) Insert(db *sql.DB) (*Message, error) { // 1-2. ユーザー名を追加しよう res, err := db.Exec(`insert into message (body, sender_name) values (?, ?)`, m.Body, m.SenderName) if err != nil { return nil, err } id, err := res.LastInsertId() if err != nil { return nil, err } return &Message{ ID: id, Body: m.Body, SenderName: m.SenderName, // 1-2. ユーザー名を追加しよう }, nil } // 1-3. メッセージを編集しよう // ... // 1-4. メッセージを削除しよう // ...
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc> // See LICENSE for licensing information package main import ( "encoding/json" "log" "os" ) var cmdDefaults = &Command{ UsageLine: "defaults", Short: "Reset to the default settings", } func init() { cmdDefaults.Run = runDefaults } func runDefaults(args []string) { if len(args) > 0 { log.Fatalf("No arguments allowed") } writeConfig(&config) } func writeConfig(c *userConfig) { f, err := os.Create(configPath()) if err != nil { log.Fatalf("Error when creating config file: %v", err) } defer f.Close() b, err := json.MarshalIndent(c, "", "\t") if err != nil { log.Fatalf("Error when encoding config file: %v", err) } if _, err := f.Write(b); err != nil { log.Fatalf("Error when writing config file: %v", err) } }
package dockerfile import ( "testing" "github.com/stretchr/testify/assert" "github.com/tilt-dev/tilt/internal/container" ) func TestInjectUntagged(t *testing.T) { df := Dockerfile(` FROM gcr.io/windmill/foo ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) assert.Equal(t, ` FROM gcr.io/windmill/foo:deadbeef ADD . . `, string(newDf)) } } func TestInjectTagged(t *testing.T) { df := Dockerfile(` FROM gcr.io/windmill/foo:v1 ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) assert.Equal(t, ` FROM gcr.io/windmill/foo:deadbeef ADD . . `, string(newDf)) } } func TestInjectNoMatch(t *testing.T) { df := Dockerfile(` FROM gcr.io/windmill/bar:v1 ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.False(t, modified) assert.Equal(t, df, newDf) } } func TestInjectCopyFrom(t *testing.T) { df := Dockerfile(` FROM golang:1.10 COPY --from=gcr.io/windmill/foo /src/package.json /src/package.json ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) assert.Equal(t, ` FROM golang:1.10 COPY --from=gcr.io/windmill/foo:deadbeef /src/package.json /src/package.json ADD . . `, string(newDf)) } } func TestInjectCopyFromWithTag(t *testing.T) { df := Dockerfile(` FROM golang:1.10 COPY --from=gcr.io/windmill/foo:bar /src/package.json /src/package.json ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) assert.Equal(t, ` FROM golang:1.10 COPY --from=gcr.io/windmill/foo:deadbeef /src/package.json /src/package.json ADD . . `, string(newDf)) } } func TestInjectCopyNormalizedNames(t *testing.T) { df := Dockerfile(` FROM golang:1.10 COPY --from=vandelay/common /usr/src/common/package.json /usr/src/common/yarn.lock /usr/src/common/ ADD . . `) ref := container.MustParseNamedTagged("vandelay/common:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) assert.Equal(t, ` FROM golang:1.10 COPY --from=vandelay/common:deadbeef /usr/src/common/package.json /usr/src/common/yarn.lock /usr/src/common/ ADD . . `, string(newDf)) } } func TestInjectTwice(t *testing.T) { df := Dockerfile(` FROM golang:1.10 COPY --from="vandelay/common" /usr/src/common/package.json /usr/src/common/yarn.lock ADD . . `) ref := container.MustParseNamedTagged("vandelay/common:deadbeef") ast, err := ParseAST(df) if err != nil { t.Fatal(err) } modified, err := ast.InjectImageDigest(container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) newDf, err := ast.Print() if err != nil { t.Fatal(err) } assert.Equal(t, ` FROM golang:1.10 COPY --from=vandelay/common:deadbeef /usr/src/common/package.json /usr/src/common/yarn.lock ADD . . `, string(newDf)) } modified, err = ast.InjectImageDigest(container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) newDf, err := ast.Print() if err != nil { t.Fatal(err) } assert.Equal(t, ` FROM golang:1.10 COPY --from=vandelay/common:deadbeef /usr/src/common/package.json /usr/src/common/yarn.lock ADD . . `, string(newDf)) } } func TestInjectBuildArgDefault(t *testing.T) { df := Dockerfile(` ARG TAG="latest" FROM gcr.io/windmill/foo:${TAG} ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, nil) if assert.NoError(t, err) { assert.True(t, modified) assert.Equal(t, ` ARG TAG="latest" FROM gcr.io/windmill/foo:deadbeef ADD . . `, string(newDf)) } } func TestInjectBuildArgNoDefault(t *testing.T) { df := Dockerfile(` ARG TAG FROM gcr.io/windmill/foo:${TAG} ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, []string{"TAG=latest"}) if assert.NoError(t, err) { assert.True(t, modified) // N.B. the rendered AST should still maintain the original value for the build arg assert.Equal(t, ` ARG TAG FROM gcr.io/windmill/foo:deadbeef ADD . . `, string(newDf)) } } func TestInjectBuildArgOverride(t *testing.T) { df := Dockerfile(` ARG TAG="latest" FROM gcr.io/windmill/foo:${TAG} ADD . . `) ref := container.MustParseNamedTagged("gcr.io/windmill/foo:deadbeef") newDf, modified, err := InjectImageDigest(df, container.NameSelector(ref), ref, []string{"TAG=v2.0.1"}) if assert.NoError(t, err) { assert.True(t, modified) // N.B. the rendered AST should still maintain the original value for the build arg assert.Equal(t, ` ARG TAG="latest" FROM gcr.io/windmill/foo:deadbeef ADD . . `, string(newDf)) } }
package main func foox(sl []int) { sl[0] = 9 } func fooy() string { defer println("fooy defer") panic("fooy panic") return "fooy func" } func main() { /** sl := make([]int, 10) fmt.Println(reflect.TypeOf(sl)) sl = append(sl, 1) fmt.Println(sl) foox(sl) fmt.Println(sl) */ fooy() }
package backend import ( "net/http" "github.com/goadesign/goa" "github.com/MiCHiLU/goapp-scaffold/app" ) type itemController struct { *goa.Controller } func newItemsController(service *goa.Service) *itemController { return &itemController{Controller: service.NewController("itemController")} } func (c *itemController) Get(ctx *app.GetItemsContext) error { return ctx.OK(nil) } func handleFunc( w http.ResponseWriter, r *http.Request, ) { return }
package countchars import ( "math/rand" "testing" "github.com/ninedraft/huffy" ) func TestDiv(test *testing.T) { type TestCase struct { X, Y int Expected int } huffy.Tester{ Generator: func(rnd *rand.Rand, id int) interface{} { var x = rnd.Intn(100) + 2 var y = rnd.Intn(x-1) + 1 var expected = x / y if x%y != 0 { expected++ } return TestCase{ X: x, Y: y, Expected: expected, } }, Unit: func(test *testing.T, v interface{}) { var tc = v.(TestCase) var got = Div(tc.X, tc.Y) if tc.Expected != got { test.Fatalf("%d/%d: expected %d, got %d", tc.X, tc.Y, tc.Expected, got) } }, }.R(test) }
package store import ( "github.com/golang/protobuf/proto" "github.com/syndtr/goleveldb/leveldb" ) // ErrNotFound ... var ErrNotFound = leveldb.ErrNotFound // Store ... type Store interface { Save(*Route) error Delete(string) error Load(string, *Route) error LoadAll() ([]*Route, error) Close() error } // Store ... type store struct { db *leveldb.DB } // Open ... func Open(path string) (Store, error) { db, err := leveldb.OpenFile(path, nil) if err != nil { return nil, err } return &store{ db: db, }, nil } // Save ... func (s *store) Save(r *Route) error { b, err := proto.Marshal(r) if err != nil { return err } return s.db.Put([]byte(r.Name), b, nil) } // Load ... func (s *store) Load(name string, r *Route) error { b, err := s.db.Get([]byte(name), nil) if err != nil { return err } return proto.Unmarshal(b, r) } // LoadAll ... func (s *store) LoadAll() ([]*Route, error) { var rts []*Route it := s.db.NewIterator(nil, nil) defer it.Release() for it.Next() { r := &Route{} if err := proto.Unmarshal(it.Value(), r); err != nil { return nil, err } rts = append(rts, r) } if err := it.Error(); err != nil { return nil, err } return rts, nil } // Delete ... func (s *store) Delete(name string) error { return s.db.Delete([]byte(name), nil) } // Close ... func (s *store) Close() error { return s.db.Close() }
package main import ( "log" "os" ) func main() { filename := os.Args[1] f, err := os.Open(filename) if err != nil { log.Fatal(err) } parser := NewParder(f) codeWriter := NewCodeWriter(os.Stdout) codeWriter.SetFileName(filename) for parser.HasMoreCommands() { log.Printf("%s", parser.line) switch parser.CommandType() { case C_ARITHMETIC: codeWriter.WriteArithmetic(parser.Arg1()) case C_PUSH, C_POP: codeWriter.WritePushPop(parser.Command(), parser.Arg1(), parser.Arg2()) } } }
package auth import ( "errors" userModel "go_simpleweibo/app/models/user" "go_simpleweibo/config" "github.com/gin-gonic/gin" ) // SaveCurrentUserToContext : 保存用户数据到 context 中 func SaveCurrentUserToContext(c *gin.Context) { user, err := getCurrentUserFromSession(c) if err != nil { return } c.Keys[config.AppConfig.ContextCurrentUserDataKey] = user } // GetCurrentUserFromContext : 从 context 中获取用户模型 func GetCurrentUserFromContext(c *gin.Context) (*userModel.User, error) { err := errors.New("没有获取到用户数据") userDataFromContext := c.Keys[config.AppConfig.ContextCurrentUserDataKey] if userDataFromContext == nil { return nil, err } user, ok := userDataFromContext.(*userModel.User) if !ok { return nil, err } return user, nil } // GetUserFromContextOrDataBase : 从 context 或者从数据库中获取用户模型 func GetUserFromContextOrDataBase(c *gin.Context, id int) (*userModel.User, error) { // 当前用户存在并且就是想要获取的那个用户 currentUser, err := GetCurrentUserFromContext(c) if currentUser != nil && err == nil { if int(currentUser.ID) == id { return currentUser, nil } } // 获取的是其他指定 id 的用户 otherUser, err := userModel.Get(id) if err != nil { return nil, err } return otherUser, nil }
package repository_test import ( "context" "fmt" "testing" "time" "github.com/DATA-DOG/go-sqlmock" "github.com/sesha04/test_kumparan/article/repository" "github.com/sesha04/test_kumparan/domain" "github.com/stretchr/testify/assert" ) func TestStore(t *testing.T) { ar := &domain.Article{ Title: "Judul", Body: "Body", Author: "Sesha Andipa", } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } query := "INSERT article SET title=\\? , body=\\? , author=\\?, updated_at=\\? , created_at=\\?" prep := mock.ExpectPrepare(query) prep.ExpectExec().WithArgs(ar.Title, ar.Body, ar.Author, sqlmock.AnyArg(), sqlmock.AnyArg()).WillReturnResult(sqlmock.NewResult(12, 1)) a := repository.NewArticleRepository(db) err = a.Store(context.TODO(), ar) assert.NoError(t, err) assert.Equal(t, int64(12), ar.ID) } func TestGetArticleWithoutParams(t *testing.T) { q := domain.ArticleQuery{} db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } mockArticles := []domain.Article{ { ID: 1, Title: "title 1", Body: "Body 1", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, { ID: 2, Title: "title 2", Body: "Body 2", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, } rows := sqlmock.NewRows([]string{"id", "title", "body", "author", "updated_at", "created_at"}). AddRow(mockArticles[0].ID, mockArticles[0].Title, mockArticles[0].Body, mockArticles[0].Author, mockArticles[0].UpdatedAt, mockArticles[0].CreatedAt). AddRow(mockArticles[1].ID, mockArticles[1].Title, mockArticles[1].Body, mockArticles[1].Author, mockArticles[1].UpdatedAt, mockArticles[1].CreatedAt) query := "SELECT id, title, body, author, updated_at, created_at FROM article ORDER BY created_at DESC" mock.ExpectQuery(query).WillReturnRows(rows) a := repository.NewArticleRepository(db) list, err := a.GetArticles(context.TODO(), q) assert.NoError(t, err) assert.Len(t, list, 2) } func TestGetArticleByAuthor(t *testing.T) { author := "Sesha Andipa" q := domain.ArticleQuery{ Author: author, } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } mockArticles := []domain.Article{ { ID: 1, Title: "title 1", Body: "Body 1", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, { ID: 2, Title: "title 2", Body: "Body 2", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, } rows := sqlmock.NewRows([]string{"id", "title", "body", "author", "updated_at", "created_at"}). AddRow(mockArticles[0].ID, mockArticles[0].Title, mockArticles[0].Body, mockArticles[0].Author, mockArticles[0].UpdatedAt, mockArticles[0].CreatedAt). AddRow(mockArticles[1].ID, mockArticles[1].Title, mockArticles[1].Body, mockArticles[1].Author, mockArticles[1].UpdatedAt, mockArticles[1].CreatedAt) query := "SELECT id, title, body, author, updated_at, created_at FROM article WHERE author = \\? ORDER BY created_at DESC" mock.ExpectQuery(query).WithArgs(q.Author).WillReturnRows(rows) a := repository.NewArticleRepository(db) list, err := a.GetArticles(context.TODO(), q) assert.NoError(t, err) assert.Len(t, list, 2) } func TestGetArticleByBodyOrTitle(t *testing.T) { words := "word1 word2" q := domain.ArticleQuery{ Query: words, } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } mockArticles := []domain.Article{ { ID: 1, Title: "title 1", Body: "word1 word2", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, { ID: 2, Title: "title 2", Body: "word1 word2 word3", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, } rows := sqlmock.NewRows([]string{"id", "title", "body", "author", "updated_at", "created_at"}). AddRow(mockArticles[0].ID, mockArticles[0].Title, mockArticles[0].Body, mockArticles[0].Author, mockArticles[0].UpdatedAt, mockArticles[0].CreatedAt). AddRow(mockArticles[1].ID, mockArticles[1].Title, mockArticles[1].Body, mockArticles[1].Author, mockArticles[1].UpdatedAt, mockArticles[1].CreatedAt) query := "SELECT id, title, body, author, updated_at, created_at FROM article WHERE \\(body LIKE \\? OR title = \\?\\) ORDER BY created_at DESC" mock.ExpectQuery(query).WithArgs(fmt.Sprint("%", q.Query, "%"), q.Query).WillReturnRows(rows) a := repository.NewArticleRepository(db) list, err := a.GetArticles(context.TODO(), q) assert.NoError(t, err) assert.Len(t, list, 2) } func TestGetArticleByBodyOrTitleAndAuthor(t *testing.T) { author := "Sesha Andipa" words := "word1 word2" q := domain.ArticleQuery{ Author: author, Query: words, } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } mockArticles := []domain.Article{ { ID: 1, Title: "title 1", Body: "word1 word2", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, { ID: 2, Title: "title 2", Body: "word1 word2 word3", Author: "Sesha Andipa", UpdatedAt: time.Now(), CreatedAt: time.Now(), }, } rows := sqlmock.NewRows([]string{"id", "title", "body", "author", "updated_at", "created_at"}). AddRow(mockArticles[0].ID, mockArticles[0].Title, mockArticles[0].Body, mockArticles[0].Author, mockArticles[0].UpdatedAt, mockArticles[0].CreatedAt). AddRow(mockArticles[1].ID, mockArticles[1].Title, mockArticles[1].Body, mockArticles[1].Author, mockArticles[1].UpdatedAt, mockArticles[1].CreatedAt) query := "SELECT id, title, body, author, updated_at, created_at FROM article WHERE author = \\? AND \\(body LIKE \\? OR title = \\?\\) ORDER BY created_at DESC" mock.ExpectQuery(query).WithArgs(q.Author, fmt.Sprint("%", q.Query, "%"), q.Query).WillReturnRows(rows) a := repository.NewArticleRepository(db) list, err := a.GetArticles(context.TODO(), q) assert.NoError(t, err) assert.Len(t, list, 2) }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package treeprinter import ( "bytes" "fmt" "strings" ) var ( edgeLinkChr = rune('│') edgeMidChr = rune('├') edgeLastChr = rune('└') horLineChr = rune('─') bulletChr = rune('•') ) // Node is a handle associated with a specific depth in a tree. See below for // sample usage. type Node struct { tree *tree level int } // New creates a tree printer and returns a sentinel node reference which // should be used to add the root. Sample usage: // // tp := New() // root := tp.Child("root") // root.Child("child-1") // root.Child("child-2").Child("grandchild\ngrandchild-more-info") // root.Child("child-3") // // fmt.Print(tp.String()) // // Output: // // root // ├── child-1 // ├── child-2 // │ └── grandchild // │ grandchild-more-info // └── child-3 // // Note that the Child calls can't be rearranged arbitrarily; they have // to be in the order they need to be displayed (depth-first pre-order). func New() Node { return NewWithStyle(DefaultStyle) } // NewWithStyle creates a tree printer like New, permitting customization of // the style of the resulting tree. func NewWithStyle(style Style) Node { t := &tree{style: style} switch style { case CompactStyle: t.edgeLink = []rune{edgeLinkChr} t.edgeMid = []rune{edgeMidChr, ' '} t.edgeLast = []rune{edgeLastChr, ' '} case BulletStyle: t.edgeLink = []rune{edgeLinkChr} t.edgeMid = []rune{edgeMidChr, horLineChr, horLineChr, ' '} t.edgeLast = []rune{edgeLastChr, horLineChr, horLineChr, ' '} default: t.edgeLink = []rune{' ', edgeLinkChr} t.edgeMid = []rune{' ', edgeMidChr, horLineChr, horLineChr, ' '} t.edgeLast = []rune{' ', edgeLastChr, horLineChr, horLineChr, ' '} } return Node{ tree: t, level: 0, } } // Style is one of the predefined treeprinter styles. type Style int const ( // DefaultStyle is the default style. Example: // // foo // ├── bar1 // │ bar2 // │ └── baz // └── qux // DefaultStyle Style = iota // CompactStyle is a compact style, for deeper trees. Example: // // foo // ├ bar1 // │ bar2 // │ └ baz // └ qux // CompactStyle // BulletStyle is a style that shows a bullet for each node, and groups any // other lines under that bullet. Example: // // • foo // │ // ├── • bar1 // │ │ bar2 // │ │ // │ └── • baz // │ // └── • qux // BulletStyle ) // tree implements the tree printing machinery. // // All Nodes hold a reference to the tree and Node calls result in modification // of the tree. At any point in time, tree.rows contains the formatted tree that // was described by the Node calls performed so far. // // When new nodes are added, some of the characters of the previous formatted // tree need to be updated. Here is an example stepping through the state: // // API call Rows // // // tp := New() <empty> // // // root := tp.Child("root") root // // // root.Child("child-1") root // └── child-1 // // // c2 := root.Child("child-2") root // ├── child-1 // └── child-2 // // Note: here we had to go back up and change └─ into ├─ for child-1. // // // c2.Child("grandchild") root // ├── child-1 // └── child-2 // └── grandchild // // // root.Child("child-3" root // ├── child-1 // ├── child-2 // │ └── grandchild // └── child-3 // // Note: here we had to go back up and change └─ into ├─ for child-2, and // add a │ on the grandchild row. In general, we may need to add an // arbitrary number of vertical bars. // // In order to perform these character changes, we maintain information about // the nodes on the bottom-most path. type tree struct { style Style // rows maintains the rows accumulated so far, as rune arrays. rows [][]rune // stack contains information pertaining to the nodes on the bottom-most path // of the tree. stack []nodeInfo edgeLink []rune edgeMid []rune edgeLast []rune } type nodeInfo struct { // firstChildConnectRow is the index (in tree.rows) of the row up to which we // have to connect the first child of this node. firstChildConnectRow int // nextSiblingConnectRow is the index (in tree.rows) of the row up to which we // have to connect the next sibling of this node. Typically this is the same // with firstChildConnectRow, except when the node has multiple rows. For // example: // // foo // └── bar1 <---- nextSiblingConnectRow // bar2 <---- firstChildConnectRow // // firstChildConnectRow is used when adding "baz", nextSiblingConnectRow // is used when adding "qux": // foo // ├── bar1 // │ bar2 // │ └── baz // └── qux // nextSiblingConnectRow int } // set copies the string of runes into a given row, at a specific position. The // row is extended with spaces if needed. func (t *tree) set(rowIdx int, colIdx int, what []rune) { // Extend the line if necessary. for len(t.rows[rowIdx]) < colIdx+len(what) { t.rows[rowIdx] = append(t.rows[rowIdx], ' ') } copy(t.rows[rowIdx][colIdx:], what) } // addRow adds a row with a given text, with the proper indentation for the // given level. func (t *tree) addRow(level int, text string) (rowIdx int) { runes := []rune(text) // Each level indents by this much. k := len(t.edgeLast) indent := level * k row := make([]rune, indent+len(runes)) for i := 0; i < indent; i++ { row[i] = ' ' } copy(row[indent:], runes) t.rows = append(t.rows, row) return len(t.rows) - 1 } // Childf adds a node as a child of the given node. func (n Node) Childf(format string, args ...interface{}) Node { return n.Child(fmt.Sprintf(format, args...)) } // Child adds a node as a child of the given node. Multi-line strings are // supported with appropriate indentation. func (n Node) Child(text string) Node { if strings.ContainsRune(text, '\n') { splitLines := strings.Split(text, "\n") node := n.childLine(splitLines[0]) for _, l := range splitLines[1:] { node.AddLine(l) } return node } return n.childLine(text) } // AddLine adds a new line to a node without an edge. func (n Node) AddLine(text string) { t := n.tree if t.style == BulletStyle { text = " " + text } rowIdx := t.addRow(n.level-1, text) if t.style != BulletStyle { t.stack[n.level-1].firstChildConnectRow = rowIdx } } // childLine adds a node as a child of the given node. func (n Node) childLine(text string) Node { t := n.tree if t.style == BulletStyle { text = fmt.Sprintf("%c %s", bulletChr, text) if n.level > 0 { n.AddEmptyLine() } } rowIdx := t.addRow(n.level, text) edgePos := (n.level - 1) * len(t.edgeLast) if n.level == 0 { // Case 1: root. if len(t.stack) != 0 { panic("multiple root nodes") } } else if len(t.stack) <= n.level { // Case 2: first child. Connect to parent. if len(t.stack) != n.level { panic("misuse of node") } parentRow := t.stack[n.level-1].firstChildConnectRow for i := parentRow + 1; i < rowIdx; i++ { t.set(i, edgePos, t.edgeLink) } t.set(rowIdx, edgePos, t.edgeLast) } else { // Case 3: non-first child. Connect to sibling. siblingRow := t.stack[n.level].nextSiblingConnectRow t.set(siblingRow, edgePos, t.edgeMid) for i := siblingRow + 1; i < rowIdx; i++ { t.set(i, edgePos, t.edgeLink) } t.set(rowIdx, edgePos, t.edgeLast) // Update the nextSiblingConnectRow. t.stack = t.stack[:n.level] } t.stack = append(t.stack, nodeInfo{ firstChildConnectRow: rowIdx, nextSiblingConnectRow: rowIdx, }) // Return a TreePrinter that can be used for children of this node. return Node{ tree: t, level: n.level + 1, } } // AddEmptyLine adds an empty line to the output; used to introduce vertical // spacing as needed. func (n Node) AddEmptyLine() { n.tree.rows = append(n.tree.rows, []rune{}) } // FormattedRows returns the formatted rows. Can only be called on the result of // treeprinter.New. func (n Node) FormattedRows() []string { if n.level != 0 { panic("Only the root can be stringified") } res := make([]string, len(n.tree.rows)) for i, r := range n.tree.rows { res[i] = string(r) } return res } func (n Node) String() string { if n.level != 0 { panic("Only the root can be stringified") } var buf bytes.Buffer for _, r := range n.tree.rows { buf.WriteString(string(r)) buf.WriteByte('\n') } return buf.String() }
/* * Package bitarray implements the Sieve interface. * This is *NOT* a threadsafe package. */ package sieve import ( "fmt" "math" "math/bits" ) const ( constUint64BitCount = 64 constUint64MaxValue = math.MaxUint64 ) // bitarray is a struct that maintains state of a bit array. type bitarray struct { blks []uint64 size int } // returns the index of the blk and the position of the bit within that blk func bitPosition(index int) (int, int) { return index / constUint64BitCount, index % constUint64BitCount } // returns the number of blocks used within sieve for supplied @index func blkCount(index int) int { blkIndex, bitIndex := bitPosition(index) if bitIndex > 0 { blkIndex++ } return blkIndex } func newBitArray(size, capacity int) *bitarray { return &bitarray{make([]uint64, blkCount(capacity)), size} } // NewSieve creates a new instance of a bit array of size @size. func NewSieve(size, capacity int) Sieve { return newBitArray(size, capacity) } func (ba *bitarray) Capacity() int { return len(ba.blks) * constUint64BitCount } func (ba *bitarray) Size() int { return ba.size } func (ba *bitarray) Resize(size int) error { // throw error if new size greater available capacity if capacity := ba.Capacity(); capacity < size { return ResizeOutOfRangeError{capacity, size} } // only clear active blocks blkCount := blkCount(size) for i := 0; i < blkCount; i++ { ba.blks[i] = 0 } // resize ba.size = size return nil } func (ba *bitarray) Count() int { count := 0 blkIndex, bitIndex := bitPosition(ba.size) for i := 0; i < blkIndex; i++ { count += bits.OnesCount64(ba.blks[i]) } if bitIndex > 0 { blk := ba.blks[blkIndex] for i := 0; i < bitIndex; i++ { if (blk & (1 << uint(i))) != 0 { count++ } } } return count } func (ba *bitarray) Set(index int) error { if index >= ba.size { return PositionOutOfRangeError{index, ba.size} } blkIndex, bitIndex := bitPosition(index) ba.blks[blkIndex] |= (1 << uint(bitIndex)) return nil } func (ba *bitarray) Clear(index int) error { if index >= ba.size { return PositionOutOfRangeError{index, ba.size} } blkIndex, bitIndex := bitPosition(index) ba.blks[blkIndex] &= ^(1 << uint(bitIndex)) return nil } func (ba *bitarray) Toggle(index int) error { if index >= ba.size { return PositionOutOfRangeError{index, ba.size} } blkIndex, bitIndex := bitPosition(index) ba.blks[blkIndex] ^= (1 << uint(bitIndex)) return nil } func (ba *bitarray) SetAll() { blkCount := blkCount(ba.Size()) for i := 0; i < blkCount; i++ { ba.blks[i] = constUint64MaxValue } } func (ba *bitarray) ClearAll() { blkCount := blkCount(ba.Size()) for i := 0; i < blkCount; i++ { ba.blks[i] = 0 } } func (ba *bitarray) ToggleAll() { blkCount := blkCount(ba.Size()) for i := 0; i < blkCount; i++ { ba.blks[i] = ^ba.blks[i] } } func (ba *bitarray) IsSet(index int) bool { blkIndex, bitIndex := bitPosition(index) return (ba.blks[blkIndex] & (1 << uint(bitIndex))) != 0 } func (ba *bitarray) ToNums() []int { blkIndex, bitIndex := bitPosition(ba.size) nums := make([]int, 0, (blkIndex+1)*8) // whole blocks for blkInd := 0; blkInd < blkIndex; blkInd++ { blk, blkPos := ba.blks[blkInd], blkInd*constUint64BitCount if blk == 0 { continue } // TODO use leading and trailing zeros in bits package // debugNums1, debugNums2 := make([]int, 0, 30), make([]int, 0, 30) bitLead, bitTail := constUint64BitCount-bits.LeadingZeros64(blk), bits.TrailingZeros64(blk) for bitInd := bitTail; bitInd < bitLead; bitInd++ { if blk&(1<<uint(bitInd)) != 0 { nums = append(nums, blkPos+bitInd) // debugNums1 = append(debugNums1, bitInd) } } // for bitInd := 0; bitInd < constUint64BitCount; bitInd++ { // if blk&(1<<uint(bitInd)) != 0 { // nums = append(nums, blkPos+bitInd) // // debugNums2 = append(debugNums2, bitInd) // } // } // fmt.Printf("%064b ; Tail:%d, Lead:%d, Count:%d\n", blk, bitTail, bitLead, bits.OnesCount64(blk)) // fmt.Printf("%d %v\n", len(debugNums1), debugNums1) // fmt.Printf("%d %v\n\n", len(debugNums2), debugNums2) } // last part block if bitIndex > 0 { blk, blkPos := ba.blks[blkIndex], blkIndex*constUint64BitCount for bitInd := 0; bitInd < bitIndex; bitInd++ { if blk&(1<<uint(bitInd)) != 0 { nums = append(nums, blkPos+bitInd) } } } return nums } func (ba *bitarray) SubsetOf(super *bitarray) bool { isSubset := true blkCount := blkCount(ba.Size()) for i := 0; i < blkCount && isSubset; i++ { isSubset = isSubset && (ba.blks[i]&super.blks[i] == ba.blks[i]) } return isSubset } func (ba *bitarray) SetSeries(startIndex, stepIndex int) { for i := startIndex; i < ba.size; i += stepIndex { ba.Set(i) } } func (ba *bitarray) ClearSeries(startIndex, stepIndex int) { for i := startIndex; i < ba.size; i += stepIndex { ba.Clear(i) } } func (ba *bitarray) PrintRange(frIndex, toIndex int) string { blkString, output := fmt.Sprintf("%%0%db\n", constUint64BitCount), "" blkCountStt, blkCountEnd := blkCount(frIndex), blkCount(toIndex) for i := blkCountStt; i <= blkCountEnd; i++ { output += fmt.Sprintf(blkString, ba.blks[i]) } return output }
package commands import ( "encoding/xml" "log" "testing" ) func TestFullCallsListParse(t *testing.T) { cmd := []byte(`<NCC from="naubuddy-17.node.domain" to="naucrm-68.node.domain"> <FullCallsList time_t="1513702855"/></NCC>`) var rs FullCallsList err := xml.Unmarshal(cmd, &rs) if err != nil { log.Fatalf("%v\n", err) } assertEqual(t, rs.FullCallsList.TimeT, uint64(1513702855)) }
package middleware import ( "crud-using-chi/internal/models" "crud-using-chi/pkg/common" "fmt" "github.com/dgrijalva/jwt-go" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" "github.com/spf13/viper" "net/http" ) type ( MiddlewareUser struct { Conf *viper.Viper Logger *logrus.Logger DB *sqlx.DB } ) func NewMiddlerwareUser(conf *viper.Viper, logger *logrus.Logger, db *sqlx.DB) (mu *MiddlewareUser) { mu = &MiddlewareUser{} mu.Conf = conf mu.Logger = logger mu.DB = db return } func (mu *MiddlewareUser) IsAuthorized(endpoint http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var ( modelAuth = models.NewAuthModel(mu.DB) session models.Session ) if r.Header["Token"] != nil { token, err := jwt.Parse(r.Header["Token"][0], func(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("Something went wrong") } return []byte(mu.Conf.GetString("jwt.jwt_signing_key")), nil }) if err != nil { mu.Logger.Errorf("Error occured : %v", err) } session, err = modelAuth.GetSessionByToken(token.Raw) if token.Valid && session.Valid { token, _ := jwt.ParseWithClaims(token.Raw, &models.MyCustomClaims{}, func(token *jwt.Token) (interface{}, error) { return []byte(mu.Conf.GetString("jwt.jwt_signing_key")), nil }) models.Claims = token.Claims.(*models.MyCustomClaims) endpoint.ServeHTTP(w, r) } else { common.RespondError(w, http.StatusUnauthorized, "Unauthorized") return } } else { common.RespondError(w, http.StatusUnauthorized, "Unauthorized") return } }) }
package cmd import ( "sort" "github.com/object88/cprofile" ) func createGlobalsCommand(o *globalOptions) *astCmd { astSetup := &astSetup{ "globals", "Returns list of instances of global variables.", "Returns the list of global variables for a program, with file name and offsets.", func(p *cprofile.Program) { globals := []string{} pkgs := p.Imports() if len(pkgs) == 0 { return } for _, pkg := range pkgs { gs := pkg.Globals(p.FileSet()) for _, v := range gs { globals = append(globals, v) } } sort.Strings(globals) stdout := cprofile.Stdout() for _, v := range globals { stdout.Printf("%s\n", v) } }, nil, } globalsCmd := createAstCommand(o, astSetup) return globalsCmd }
package deferTrap import "net/http" //因为在这里我们并没有检查我们的请求是否成功执行,当它失败的时候, //我们访问了 Body 中的空变量 res ,因此会抛出异常 func Do() error { res, err := http.Get("http://www.google.com") defer res.Body.Close() if err != nil { return err } // ..code... return nil } //在这里,你同样需要检查 res 的值是否为 nil ,这是 http.Get 中的一个警告。通常情况下,出错的时候,返回的内容应为空并且错误会被返回,可当你获得的是一个重定向 error 时, res 的值并不会为 nil ,但其又会将错误返回。 //上面的代码保证了无论如何 Body 都会被关闭,如果你没有打算使用其中的数据,那么你还需要丢弃已经接收的数据。 func Solution() error { res, err := http.Get("http://xxxxxxxxxx") if res != nil { defer res.Body.Close() } if err != nil { return err } // ..code... return nil }
package slice import ( "fmt" "reflect" "sort" "github.com/xiagoo/gosort/consts" ) type baseSort struct { length int less func(i, j int) bool swap func(i, j int) } func (bs *baseSort) Len() int { return bs.length } func (bs *baseSort) Less(i, j int) bool { return bs.less(i, j) } func (bs *baseSort) Swap(i, j int) { bs.swap(i, j) } //SortAscByKey sort slice by key, key should by struct field name func SortAscByKey(slice interface{}, sortKey string){ sortByKey(slice, sortKey, consts.Asc) } //SortDescByKey sort slice by key, key should by struct field name func SortDescByKey(slice interface{}, sortKey string){ sortByKey(slice, sortKey, consts.Desc) } //SortByKey sort slice by key, key should by struct field name, sort type contain asc and desc func sortByKey(slice interface{}, sortKey string, sortType int) { st := reflect.TypeOf(slice) if st.Kind() != reflect.Slice { panic(fmt.Sprintf("type must be slice , you type is %s", st.Kind())) } if st.Elem().Kind() != reflect.Ptr { panic(fmt.Sprintf("type must be ptr, you type is %s", st.Elem().Kind())) } if st.Elem().Elem().Kind() != reflect.Struct { panic(fmt.Sprintf("type must be struct, you type is %s", st.Elem().Elem().Kind())) } field, flag := st.Elem().Elem().FieldByName(sortKey) if !flag { panic(fmt.Sprintf("struct field doesn't exist %s", sortKey)) } sv := reflect.ValueOf(slice) less := func(i, j int) bool { svi := sv.Index(i).Elem().FieldByName(sortKey) svj := sv.Index(j).Elem().FieldByName(sortKey) switch field.Type.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if consts.Asc == sortType { return svi.Int() < svj.Int() } return svi.Int() > svj.Int() case reflect.Float32, reflect.Float64: if consts.Asc == sortType { return svi.Float() < svj.Float() } return svi.Float() > svj.Float() case reflect.String: if consts.Asc == sortType { return svi.String() < svj.String() } return svi.String() > svj.String() default: return false } } sort.Sort(sortSlice(slice, less)) } func sortSlice(slice interface{}, less func(i, j int) bool) sort.Interface { sv := reflect.ValueOf(slice) if sv.Kind() != reflect.Slice { panic(fmt.Sprintf("slice.Sort need slice value of type %T", slice)) } return &baseSort{ length: sv.Len(), less: less, swap: swapper(sv), } } func swapper(v reflect.Value) func(i, j int) { tmp := reflect.New(v.Type().Elem()).Elem() return func(i, j int) { v1 := v.Index(i) v2 := v.Index(j) tmp.Set(v1) v1.Set(v2) v2.Set(tmp) } }
package overmount import ( "time" ) // ImageConfig is a portable, non-standard format used by overmount for the // generation of other configuration formats used in images. It is an attempt // to be abstract from the formats themselves. It is intentionally flat to // avoid merging problems with newer editions of overmount. // // NOTE: some portions of this code are taken from docker/docker and opencontainers/image-spec. type ImageConfig struct { // ID is a unique 64 character identifier of the image ID string `json:"id,omitempty"` // Parent is the ID of the parent image Parent string `json:"parent,omitempty"` // Comment is the commit message that was set when committing the image Comment string `json:"comment,omitempty"` // Created is the timestamp at which the image was created Created time.Time `json:"created"` // Container is the id of the container used to commit Container string `json:"container,omitempty"` // ContainerConfig is the configuration of the container that is committed into the image ContainerConfig interface{} `json:"container_config,omitempty"` // DockerVersion specifies the version of Docker that was used to build the image DockerVersion string `json:"docker_version,omitempty"` // Author is the name of the author that was specified when committing the image Author string `json:"author,omitempty"` // Architecture is the hardware that the image is built and runs on Architecture string `json:"architecture,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // User defines the username or UID which the process in the container should run as. User string `json:"user,omitempty"` // ExposedPorts a set of ports to expose from a container running this image. ExposedPorts map[string]struct{} `json:"exposed_ports,omitempty"` // Env is a list of environment variables to be used in a container. Env []string `json:"env,omitempty"` // Entrypoint defines a list of arguments to use as the command to execute when the container starts. Entrypoint []string `json:"entrypoint,omitempty"` // Cmd defines the default arguments to the entrypoint of the container. Cmd []string `json:"cmd,omitempty"` // Volumes is a set of directories which should be created as data volumes in a container running this image. Volumes map[string]struct{} `json:"volumes,omitempty"` // WorkingDir sets the current working directory of the entrypoint process in the container. WorkingDir string `json:"working_dir,omitempty"` // Labels contains arbitrary metadata for the container. Labels map[string]string `json:"labels,omitempty"` // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"stopsignal,omitempty"` }
package main import ( "github.com/stretchr/testify/assert" "testing" ) func TestRotationCase0(t *testing.T) { arr := []int{1, 2, 3, 4, 5} shift := 2 exp := "4 5 1 2 3 " assert.Equal(t, exp, SolveRotation(arr, shift)) } func TestRotationCase1(t *testing.T) { arr := []int{1, 2, 3, 4, 5} shift := 10 exp := "1 2 3 4 5 " assert.Equal(t, exp, SolveRotation(arr, shift)) } func TestRotationCase2(t *testing.T) { arr := []int{1, 2, 3, 4, 5} shift := 3 exp := "3 4 5 1 2 " assert.Equal(t, exp, SolveRotation(arr, shift)) } func TestRotationCase3(t *testing.T) { arr := []int{1, 2, 3, 4, 5} shift := 5 exp := "1 2 3 4 5 " assert.Equal(t, exp, SolveRotation(arr, shift)) }
package utils type Semaphore interface { Down() Up() } type semaphore struct { sem chan struct{} } func (s *semaphore) Down() { s.sem <- struct{}{} } func (s *semaphore) Up() { _ = <-s.sem } func NewSemaphore(capacity int) Semaphore { return &semaphore{ sem: make(chan struct{}, capacity), } }
package proteus import ( "context" "database/sql" ) // Executor runs queries that modify the data store. type Executor interface { // Exec executes a query without returning any rows. // The args are for any placeholder parameters in the query. Exec(query string, args ...interface{}) (sql.Result, error) } // Querier runs queries that return Rows from the data store type Querier interface { // Query executes a query that returns rows, typically a SELECT. // The args are for any placeholder parameters in the query. Query(query string, args ...interface{}) (*sql.Rows, error) } type Wrapper interface { Executor Querier } // ParamAdapter maps to valid positional parameters in a DBMS. // For example, MySQL uses ? for every parameter, while Postgres uses $NUM and Oracle uses :NUM type ParamAdapter func(pos int) string // QueryMapper maps from a query name to an actual query // It is used to support the proq struct tag, when it contains q:name type QueryMapper interface { // Maps the supplied name to a query string // returns an empty string if there is no query associated with the supplied name Map(name string) string } // ContextQuerier defines the interface of a type that runs a SQL query with a context type ContextQuerier interface { // QueryContext executes a query that returns rows, typically a SELECT. // The args are for any placeholder parameters in the query. QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) } // ContextExecutor defines the interface of a type that runs a SQL exec with a context type ContextExecutor interface { // ExecContext executes a query without returning any rows. // The args are for any placeholder parameters in the query. ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) } // ContextWrapper is an interface that contains both ContextQuerier and ContextExecutor. It represents all // of the operations that are performed by Proteus type ContextWrapper interface { ContextQuerier ContextExecutor }
package store type Store interface { //Get(key) Get(string) (string, error) //Set(key, value) Set(string, string) //Delete(key) Delete(string) Close() //All() map[string]string }
package main // Leetcode 977. (easy) func sortedSquares(A []int) []int { i, j := 0, len(A)-1 res := make([]int, len(A)) k := len(A) - 1 for i <= j { if abs(A[i]) > abs(A[j]) { res[k] = A[i] * A[i] i++ } else { res[k] = A[j] * A[j] j-- } k-- } return res } func abs(a int) int { if a < 0 { return -a } return a }
// SPDX-License-Identifier: MIT // Package apidoc RESTful API 文档生成工具 // // 从代码文件的注释中提取特定格式的内容,生成 RESTful API 文档,支持大部分的主流的编程语言。 package apidoc import ( "bytes" "log" "net/http" "path/filepath" "regexp" "time" "golang.org/x/text/language" "github.com/caixw/apidoc/v7/build" "github.com/caixw/apidoc/v7/core" "github.com/caixw/apidoc/v7/internal/ast" "github.com/caixw/apidoc/v7/internal/docs" "github.com/caixw/apidoc/v7/internal/locale" "github.com/caixw/apidoc/v7/internal/lsp" ) const ( // LSPVersion 当前支持的 language server protocol 版本 LSPVersion = lsp.Version // DocVersion 文档的版本 DocVersion = ast.Version ) // Config 配置文件 apidoc.yaml 所表示的内容 type Config = build.Config // SetLocale 设置当前的本地化 ID // // 如果不调用此函数,则默认会采用 internal/locale.DefaultLocaleID 的值。 // 如果想采用当前系统的本地化信息,可以使用 // github.com/issue9/localeutil.SystemLanguageTag 函数。 func SetLocale(tag language.Tag) { locale.SetTag(tag) } // Locale 获取当前设置的本地化 ID func Locale() language.Tag { return locale.Tag() } // Locales 返回当前所有支持的本地化信息 func Locales() []language.Tag { return locale.Tags() } // Version 当前程序的版本号 // // full 表示是否需要在版本号中包含编译日期和编译时的 Git 记录 ID。 func Version(full bool) string { if full { return core.FullVersion() } return core.Version() } // Build 解析文档并输出文档内容 // // 如果是文档语法错误,则相关的错误信息会反馈给 h,由 h 处理错误信息; // 如果是配置项(o 和 i)有问题,则以 *core.Error 类型返回错误信息。 // // NOTE: 如果需要从配置文件进行构建文档,可以采用 Config.Build func Build(h *core.MessageHandler, o *build.Output, i ...*build.Input) error { return build.Build(h, o, i...) } // Buffer 生成文档内容并返回 // // 如果是文档语法错误,则相关的错误信息会反馈给 h,由 h 处理错误信息; // 如果是配置项(o 和 i)有问题,则以 *core.Error 类型返回错误信息。 // // NOTE: 如果需要从配置文件进行构建文档,可以采用 Config.Buffer func Buffer(h *core.MessageHandler, o *build.Output, i ...*build.Input) (*bytes.Buffer, error) { return build.Buffer(h, o, i...) } // CheckSyntax 测试文档语法 func CheckSyntax(h *core.MessageHandler, i ...*build.Input) error { return build.CheckSyntax(h, i...) } // ServeLSP 提供 language server protocol 服务 // // header 表示传递内容是否带报头; // t 表示允许连接的类型,目前可以是 tcp、udp、stdio 和 unix; // timeout 表示服务端每次读取客户端时的超时时间,如果为 0 表示不会超时。 // 超时并不会出错,而是重新开始读取数据,防止被读取一直阻塞,无法结束进程; func ServeLSP(header bool, t, addr string, timeout time.Duration, info, erro *log.Logger) error { return lsp.Serve(header, t, addr, timeout, info, erro) } // Static 为 dir 指向的路径内容搭建一个静态文件服务 // // dir 为静态文件的根目录,一般指向 /docs // 用于搭建一个本地版本的 https://apidoc.tools,默认页为 index.xml。 // 如果 dir 值为空,则会采用内置的文档内容作为静态文件服务的内容。 // // stylesheet 表示是否只展示 XSL 及相关的内容。 // // 用户可以通过以下代码搭建一个简易的 https://apidoc.tools 网站: // // http.Handle("/apidoc", apidoc.Static(...)) func Static(dir core.URI, stylesheet bool, erro *log.Logger) http.Handler { return docs.Handler(dir, stylesheet, erro) } // Server 用于生成查看文档中间件的配置项 type Server struct { Status int // 默认值为 200 Path string // 文档在路由中的地址,默认值为 apidoc.xml ContentType string // 文档的 ContentType,为空表示采用 application/xml Dir core.URI // 除文档不之外的附加项,比如 xsl,css 等内容的所在位置,如果为空表示采用内嵌的数据; Stylesheet bool // 是否只采用 Dir 中的 xsl 和 css 等样式数据,而忽略其它文件 Erro *log.Logger // 服务出错时的错误信息输出通道,默认采用 log.Default() } func (srv *Server) sanitize() { if srv.Status == 0 { srv.Status = http.StatusOK } if srv.Path == "" { srv.Path = "/apidoc.xml" } if srv.ContentType == "" { srv.ContentType = "application/xml" } if srv.Erro == nil { srv.Erro = log.Default() } } // Buffer 将 buf 作为文档内容生成中间件 func (srv *Server) Buffer(buf []byte) http.Handler { srv.sanitize() buf = addStylesheet(buf) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == srv.Path { w.Header().Set("Content-Type", srv.ContentType) w.WriteHeader(srv.Status) w.Write(buf) return } Static(srv.Dir, srv.Stylesheet, srv.Erro).ServeHTTP(w, r) }) } // File 将 path 指向的内容作为文档内容生成中间件 func (srv *Server) File(path core.URI) (http.Handler, error) { data, err := path.ReadAll(nil) if err != nil { return nil, err } if srv.Path == "" { file, err := path.File() if err != nil { return nil, err } srv.Path = "/" + filepath.Base(file) } return srv.Buffer(data), nil } // 用于查找 <?xml 指令 var procInst = regexp.MustCompile(`<\?xml .+ ?>`) func addStylesheet(data []byte) []byte { pi := ` <?xml-stylesheet type="text/xsl" href="` + docs.StylesheetURL("./") + `"?>` if rslt := procInst.Find(data); len(rslt) > 0 { return procInst.ReplaceAll(data, append(rslt, []byte(pi)...)) } ret := make([]byte, 0, len(data)+len(pi)) return append(append(ret, pi...), data...) }
package sshd import ( "testing" "golang.org/x/crypto/ssh" ) const ( testingClientKey = `-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEArjUDq6/7ljVzoa7unbdSRMNIwfFd7S0YM931w7YstZXFvnuN eavoAxDkL0mdWxV0Pi6f+FFi31oY3YHUBaWdvkZHXCY9L3zWRKz00SRNnyeQG8tO GGhvhvgC6iGIE6A9IJlLxDm6scylp6JaN27P4CUNXy8gT0GnxvdwMGujgbMbPU2x XAC5JnT1SP++7wJeZDwM1TXGn386EuTBN1epIsIvmsriW7eOXvOOf+7eQy/RbCJo 51rmF9kMnUqRvY5Be0Ur4D842JiFGRIVyon/IGoBD2d/JOPi36npJxz47LdTWsL0 dLREL009U8ttC00QuKoScHq4G39Jw8NmXQhhXwIDAQABAoIBAEhkW2wjK2dWOwD7 UslTfup4RGnjxWZkEOSs3g5ATAABhzUK3tWq7DUp9cj4zF0nYzDb6zojh/TM2fxi kRrvoceKKOlQMqjjNZ9ASFQIxADZTfde2sslywLJWVy2JngRZJWBXozieISeSFCL FPZoJBY/D3l4efK1k+UIuiRE9qNUfKq/XlZuh57nXJ/FoNdPeCh68Q0EcxlDiEmZ cqpgKmRSnQDZBj+gaVDy6LQQ353ZlLkArPvaJFHo4pjUEsmvmG6GWkh2yR3MTdIA LiN7MS8bk6zcL/1grQ6/R5we12A9V+XVvou4VPAwRacv74nIbbU8Y4UztrGentsN +Bq5B0ECgYEA4I2ABmWdWluUrpUsXZEIgCVu3ee1FQmrqXUSeanGS+uaXXBJGbV5 kxvZqRaZ+mHxtOwAhmmHl/2M+hKc64m9018TX4BcDyXA/6HND1TJxxStYw92lCMG tBT1NPoNKnHH9goJgLO24y3qu2Aax7FuF7YPLR8r0agfRUiWg9B2yQ8CgYEAxpqP HCP4V1peElVtqppqTBz4S9Li9dnR+JuMkQZxt2nUuy29hR0SJlbIXRohjN3UHIOK c1wMy0EIXmcLxApRpEDKcm1zcDF3LL7l0hFFX4+JYTcVNn5VLeUUQtFGmDaSQ/Y5 dT59kfSu8zVX9ZecUAUV9CLJC4MF7F0gCJzWwrECgYBKcp9ff5ELxBEnUI3E97C5 y69WItwGfY5MQGQ/sensgdBL6k5SF7iW7UTcqoGiYZahRR1nctVhrs5umn0sGh61 VXA22XesDfhOyHYT/yhmuJRDo3zM4E/4pHondj+nMtH44JsF8I9SAocwWEyIqGq3 scSWUR9WA0da0RYV3aeEQQKBgC9mCcukhguLBLKJcu/phH7/1v55qTMVtjgIH6cp C5DDkELP6tBPHNrLkWwu5VzyQEJB3pQjnuYPckjdfQBfmhaCZA6lMozPMWsbcEwP VSg2YIo0FDr6MagPaSN9QMTpGUVhCVuC+4MPC4X98C0r7uFmJVQrzSGTNqGvpAqK K/MxAoGAX6SsKbBlANkAm4bhHRj7xse31n8mUT/ewp4h6TmknL4aSoPH+PqgDQQ5 WauSC6B2gAKgYogsDa+Ij8ck2NFFlPyeCuW88FOUXXBbOTj+S2dscJ85OIiZX7MV hnpuSad2mCqNaqwU+/9ANrycBpaQtyHBspAYuO3/UUbilmJKgLo= -----END RSA PRIVATE KEY-----` testingClientFingerprint = `fa:61:1a:1f:45:6a:fa:32:5f:18:c4:4b:a5:b3:99:a3` ) func sshTestingClientKey() (ssh.Signer, error) { return ssh.ParsePrivateKey([]byte(testingClientKey)) } func TestFingerprint(t *testing.T) { key, _ := sshTestingClientKey() fp := fingerprint(key.PublicKey()) if fp != testingClientFingerprint { t.Errorf("Expected fingerprint %s to match %s.", fp, testingClientFingerprint) } }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package vkb import ( "io/ioutil" "os" "reflect" "testing" "chromiumos/tast/local/coords" ) func TestNewStrokeGroup(t *testing.T) { want := &strokeGroup{ width: 0.0, height: 0.0, strokes: []stroke{ { points: []point{ { x: 10.0, y: 10.0, }, { x: 15.0, y: 15.0, }, { x: 20.0, y: 20.0, }, { x: 25.0, y: 25.0, }, { x: 30.0, y: 30.0, }, }, }, }, } file, err := ioutil.TempFile("", "handwriting_test_") if err != nil { t.Fatal("TempFile() failed: ", err) } defer os.Remove(file.Name()) defer file.Close() if _, err := file.Write([]byte(`<svg><defs><path d="M10 10L20 20L30 30"></path></defs></svg>`)); err != nil { t.Fatal("Write() failed: ", err) } svgFile, err := readSvg(file.Name()) if err != nil { t.Fatal("readSvg() failed", err) } got := newStrokeGroup(svgFile, 5) if !reflect.DeepEqual(got, want) { t.Errorf("newStrokeGroup() = %+v; want %+v", got, want) } } func TestScale(t *testing.T) { want := &strokeGroup{ width: 141.0, height: 141.0, strokes: []stroke{ { points: []point{ { x: 441.5, y: 512.0, }, { x: 586.025, y: 649.475, }, }, }, }, } canvasLoc := coords.Rect{ Left: 97, Top: 465, Width: 830, Height: 235, } sg := &strokeGroup{ width: 0.0, height: 0.0, strokes: []stroke{ { points: []point{ { x: 100.0, y: 110.5, }, { x: 120.5, y: 130.0, }, }, }, }, } sg.scale(canvasLoc) if !reflect.DeepEqual(sg, want) { t.Errorf("scale() = %+v; want %+v", sg, want) } }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package cellular import ( "context" "time" "chromiumos/tast/local/cellular" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/uiauto/ossettings" "chromiumos/tast/local/network/netconfig" "chromiumos/tast/testing" ) type testParameters struct { roamingSubLabel string } func init() { testing.AddTest(&testing.Test{ Func: RoamingStatusLabel, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Checks the roaming label status on a roaming and non roaming SIM", Contacts: []string{ "nikhilcn@chromium.org", "cros-connectivity@google.com", }, SoftwareDeps: []string{"chrome"}, Attr: []string{"group:cellular", "cellular_unstable"}, Params: []testing.Param{ { Name: "on_roaming_sim", ExtraAttr: []string{"cellular_sim_roaming"}, Val: testParameters{ roamingSubLabel: "Currently roaming", }, }, { Name: "on_non_roaming_sim", ExtraAttr: []string{"cellular_sim_prod_esim"}, Val: testParameters{ roamingSubLabel: "Not currently roaming", }, }, }, Timeout: 3 * time.Minute, }) } func RoamingStatusLabel(ctx context.Context, s *testing.State) { cr, err := chrome.New(ctx) if err != nil { s.Fatal("Failed to start Chrome: ", err) } tconn, err := cr.TestAPIConn(ctx) if err != nil { s.Fatal("Failed to create Test API connection: ", err) } err = cellular.SetRoamingPolicy(ctx, true, false) if err != nil { s.Fatal("Failed to set roaming property: ", err) } err = cellular.ConnectToCellularNetwork(ctx) if err != nil { s.Fatal("Failed to set roaming property: ", err) } networkName, err := cellular.GetCellularNetwork(ctx) if err != nil { s.Fatal("Failed to get a cellular network: ", err) } app, err := ossettings.OpenNetworkDetailPage(ctx, tconn, cr, networkName, netconfig.Cellular) if err != nil { s.Fatal("Failed to open network detail page: ", networkName) } defer app.Close(ctx) roamingSubLabel, err := app.RoamingSubLabel(ctx, cr) if err != nil { s.Fatal("Failed to fetch sublabel: ", err) } if roamingSubLabel != s.Param().(testParameters).roamingSubLabel { s.Fatalf("Roaming sub-label is incorrect: got %q, want %q", roamingSubLabel, s.Param().(testParameters).roamingSubLabel) } }
package data import ( "math/rand" "time" ) type Player struct { Name string Deck []Card } func (h *Player)AddToDeck(c Card) { h.Deck = append(h.Deck, c) } func (h *Player)GetNextCard() Card { if len(h.Deck) == 0 { return Card{ Rank: "", Suit: "", } } card := h.Deck[0]; h.Deck = h.Deck[1:len(h.Deck)] return card } func (h *Player)ShuffleDeck() { dest := make([]Card, len(h.Deck)) perm := rand.Perm(len(h.Deck)) rand.Seed(time.Now().UTC().UnixNano()) for i, v := range perm { dest[v] = h.Deck[i] } h.Deck = dest } func (h Player)IsDeckEmpty() bool { return len(h.Deck) == 0 } func (h Player)DeckSize() int { return len(h.Deck) }
package messaging import ( "encoding/binary" "encoding/json" "fmt" "io" "log" "net/url" "os" "path/filepath" "sort" "strconv" "sync" "time" "github.com/boltdb/bolt" "github.com/influxdb/influxdb/raft" ) // DefaultPollInterval is the default amount of time a topic reader will wait // between checks for new segments or new data on an existing segment. This // only occurs when the reader is at the end of all the data. const DefaultPollInterval = 100 * time.Millisecond // Broker represents distributed messaging system segmented into topics. // Each topic represents a linear series of events. type Broker struct { mu sync.RWMutex path string // data directory index uint64 // highest applied index meta *bolt.DB // metadata topics map[uint64]*Topic // topics by id // Log is the distributed raft log that commands are applied to. Log interface { URL() url.URL URLs() []url.URL Leader() (uint64, url.URL) IsLeader() bool ClusterID() uint64 Apply(data []byte) (index uint64, err error) } Logger *log.Logger } // NewBroker returns a new instance of a Broker with default values. func NewBroker() *Broker { b := &Broker{ topics: make(map[uint64]*Topic), } b.SetLogOutput(os.Stderr) return b } // Path returns the path used when opening the broker. // Returns empty string if the broker is not open. func (b *Broker) Path() string { return b.path } // metaPath returns the file path to the broker's metadata file. func (b *Broker) metaPath() string { if b.path == "" { return "" } return filepath.Join(b.path, "meta") } // URL returns the URL of the broker. func (b *Broker) URL() url.URL { return b.Log.URL() } // URLs returns a list of all broker URLs in the cluster. func (b *Broker) URLs() []url.URL { return b.Log.URLs() } // IsLeader returns true if the broker is the current cluster leader. func (b *Broker) IsLeader() bool { return b.Log.IsLeader() } // LeaderURL returns the URL to the leader broker. func (b *Broker) LeaderURL() url.URL { _, u := b.Log.Leader() return u } // ClusterID returns the identifier for the cluster. func (b *Broker) ClusterID() uint64 { return b.Log.ClusterID() } // TopicPath returns the file path to a topic's data. // Returns a blank string if the broker is closed. func (b *Broker) TopicPath(id uint64) string { b.mu.RLock() defer b.mu.RUnlock() return b.topicPath(id) } func (b *Broker) topicPath(id uint64) string { if b.path == "" { return "" } return filepath.Join(b.path, strconv.FormatUint(id, 10)) } // Topic returns a topic on a broker by id. // Returns nil if the topic doesn't exist or the broker is closed. func (b *Broker) Topic(id uint64) *Topic { b.mu.RLock() defer b.mu.RUnlock() return b.topics[id] } // Index returns the highest index seen by the broker across all topics. // Returns 0 if the broker is closed. func (b *Broker) Index() (uint64, error) { b.mu.RLock() defer b.mu.RUnlock() return b.index, nil } // opened returns true if the broker is in an open and running state. func (b *Broker) opened() bool { return b.path != "" } // SetLogOutput sets writer for all Broker log output. func (b *Broker) SetLogOutput(w io.Writer) { b.Logger = log.New(w, "[broker] ", log.LstdFlags) } // Open initializes the log. // The broker then must be initialized or join a cluster before it can be used. func (b *Broker) Open(path string) error { b.mu.Lock() defer b.mu.Unlock() // Require a non-blank path. if path == "" { return ErrPathRequired } if err := func() error { b.path = path // Ensure root directory exists. if err := os.MkdirAll(path, 0777); err != nil { return fmt.Errorf("mkdir: %s", err) } // Open meta file. meta, err := bolt.Open(b.metaPath(), 0666, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return fmt.Errorf("open meta: %s", err) } b.meta = meta // Initialize data from meta store. if err := b.meta.Update(func(tx *bolt.Tx) error { tx.CreateBucketIfNotExists([]byte("meta")) // Read in index from meta store, if set. if v := tx.Bucket([]byte("meta")).Get([]byte("index")); v != nil { b.index = btou64(v) } return nil }); err != nil { return err } // Read all topic metadata into memory. if err := b.openTopics(); err != nil { return fmt.Errorf("open topics: %s", err) } return nil }(); err != nil { _ = b.close() return err } return nil } // openTopics reads all topic metadata into memory. func (b *Broker) openTopics() error { // Read all topics from the broker directory. topics, err := ReadTopics(b.path) if err != nil { return fmt.Errorf("read topics: %s", err) } // Open each topic and append to the map. b.topics = make(map[uint64]*Topic) for _, t := range topics { if err := t.Open(); err != nil { return fmt.Errorf("open topic: id=%d, err=%s", t.id, err) } b.topics[t.id] = t } // Retrieve the highest index across all topics. for _, t := range b.topics { if t.index > b.index { b.index = t.index } } return nil } // Close closes the broker and all topics. func (b *Broker) Close() error { b.mu.Lock() defer b.mu.Unlock() return b.close() } func (b *Broker) close() error { // Return error if the broker is already closed. if !b.opened() { return ErrClosed } b.path = "" // Close meta data. if b.meta != nil { _ = b.meta.Close() b.meta = nil } // Close all topics. b.closeTopics() return nil } // closeTopics closes all topic files and clears the topics map. func (b *Broker) closeTopics() { for _, t := range b.topics { _ = t.Close() } b.topics = make(map[uint64]*Topic) } // SetMaxIndex sets the highest index applied by the broker. // This is only used for internal log messages. Topics may have a higher index. func (b *Broker) SetMaxIndex(index uint64) error { b.mu.Lock() defer b.mu.Unlock() return b.setMaxIndex(index) } func (b *Broker) setMaxIndex(index uint64) error { // Update index in meta database. if err := b.meta.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("meta")).Put([]byte("index"), u64tob(index)) }); err != nil { return err } // Set in-memory index. b.index = index return nil } // Snapshot streams the current state of the broker and returns the index. func (b *Broker) Snapshot(w io.Writer) (uint64, error) { // TODO: Prevent truncation during snapshot. // Calculate header under lock. b.mu.RLock() hdr, err := b.createSnapshotHeader() b.mu.RUnlock() if err != nil { return 0, fmt.Errorf("create snapshot: %s", err) } // Encode snapshot header. buf, err := json.Marshal(&hdr) if err != nil { return 0, fmt.Errorf("encode snapshot header: %s", err) } // Write header frame. if err := binary.Write(w, binary.BigEndian, uint32(len(buf))); err != nil { return 0, fmt.Errorf("write header size: %s", err) } if _, err := w.Write(buf); err != nil { return 0, fmt.Errorf("write header: %s", err) } // Stream each topic sequentially. for _, t := range hdr.Topics { for _, s := range t.Segments { if _, err := copyFileN(w, s.path, s.Size); err != nil { return 0, err } } } // Return the snapshot and its last applied index. return hdr.Index, nil } // createSnapshotHeader creates a snapshot header. func (b *Broker) createSnapshotHeader() (*snapshotHeader, error) { // Create parent header. sh := &snapshotHeader{Index: b.index} // Append topics. for _, t := range b.topics { // Create snapshot topic. st := &snapshotTopic{ID: t.id} // Read segments from disk. segments, err := ReadSegments(t.path) if err != nil && !os.IsNotExist(err) { return nil, fmt.Errorf("read segments: %s", err) } // Add segments to topic. for _, s := range segments { // Retrieve current segment file size from disk. var size int64 fi, err := os.Stat(s.Path) if os.IsNotExist(err) { size = 0 } else if err == nil { size = fi.Size() } else { return nil, fmt.Errorf("stat segment: %s", err) } // Append segment. st.Segments = append(st.Segments, &snapshotTopicSegment{ Index: s.Index, Size: size, path: s.Path, }) } // Append topic to the snapshot. sh.Topics = append(sh.Topics, st) } return sh, nil } // copyFileN copies n bytes from a path to a writer. func copyFileN(w io.Writer, path string, n int64) (int64, error) { // Open file for reading. f, err := os.Open(path) if err != nil { return 0, err } defer func() { _ = f.Close() }() // Copy file up to n bytes. return io.CopyN(w, f, n) } // Restore reads the broker state. func (b *Broker) Restore(r io.Reader) error { b.mu.Lock() defer b.mu.Unlock() // Remove and recreate broker path. if err := b.reset(); err != nil && !os.IsNotExist(err) { return fmt.Errorf("reset: %s", err) } else if err = os.MkdirAll(b.path, 0777); err != nil { return fmt.Errorf("mkdir: %s", err) } // Read header frame. var sz uint32 if err := binary.Read(r, binary.BigEndian, &sz); err != nil { return fmt.Errorf("read header size: %s", err) } buf := make([]byte, sz) if _, err := io.ReadFull(r, buf); err != nil { return fmt.Errorf("read header: %s", err) } // Decode header. sh := &snapshotHeader{} if err := json.Unmarshal(buf, &sh); err != nil { return fmt.Errorf("decode header: %s", err) } // Close any topics which might be open and clear them out. b.closeTopics() // Copy topic files from snapshot to local disk. for _, st := range sh.Topics { t := NewTopic(st.ID, b.topicPath(st.ID)) // Create topic directory. if err := os.MkdirAll(t.Path(), 0777); err != nil { return fmt.Errorf("make topic dir: %s", err) } // Copy data from snapshot into segment files. // We don't instantiate the segments because that will be done // automatically when calling Open() on the topic. for _, ss := range st.Segments { if err := func() error { // Create a new file with the starting index. f, err := os.Create(t.segmentPath(ss.Index)) if err != nil { return fmt.Errorf("open segment: %s", err) } defer func() { _ = f.Close() }() // Copy from stream into file. if _, err := io.CopyN(f, r, ss.Size); err != nil { return fmt.Errorf("copy segment: %s", err) } return nil }(); err != nil { return err } } // Open topic. if err := t.Open(); err != nil { return fmt.Errorf("open topic: %s", err) } b.topics[t.id] = t } // Set the highest seen index. if err := b.setMaxIndex(sh.Index); err != nil { return fmt.Errorf("set max index: %s", err) } b.index = sh.Index return nil } // reset removes all files in the broker directory besides the raft directory. func (b *Broker) reset() error { // Open handle to directory. f, err := os.Open(b.path) if err != nil { return err } defer func() { _ = f.Close() }() // Read directory items. fis, err := f.Readdir(0) if err != nil { return err } // Remove all files & directories besides raft. for _, fi := range fis { if fi.Name() == "raft" { continue } if err := os.RemoveAll(fi.Name()); err != nil { return fmt.Errorf("remove: %s", fi.Name()) } } return nil } // Publish writes a message. // Returns the index of the message. Otherwise returns an error. func (b *Broker) Publish(m *Message) (uint64, error) { buf, err := m.MarshalBinary() assert(err == nil, "marshal binary error: %s", err) return b.Log.Apply(buf) } // TopicReader returns a new topic reader for a topic starting from a given index. func (b *Broker) TopicReader(topicID, index uint64, streaming bool) io.ReadCloser { return NewTopicReader(b.TopicPath(topicID), index, streaming) } // SetTopicMaxIndex updates the highest replicated index for a topic. // If a higher index is already set on the topic then the call is ignored. // This index is only held in memory and is used for topic segment reclamation. func (b *Broker) SetTopicMaxIndex(topicID, index uint64) error { _, err := b.Publish(&Message{ Type: SetTopicMaxIndexMessageType, Data: marshalTopicIndex(topicID, index), }) return err } func (b *Broker) applySetTopicMaxIndex(m *Message) { topicID, index := unmarshalTopicIndex(m.Data) // Set index if it's not already set higher. t := b.topics[topicID] if t != nil && t.index < index { t.index = index } } func marshalTopicIndex(topicID, index uint64) []byte { b := make([]byte, 16) binary.BigEndian.PutUint64(b[0:8], topicID) binary.BigEndian.PutUint64(b[8:16], index) return b } func unmarshalTopicIndex(b []byte) (topicID, index uint64) { topicID = binary.BigEndian.Uint64(b[0:8]) index = binary.BigEndian.Uint64(b[8:16]) return } // Apply executes a message against the broker. func (b *Broker) Apply(m *Message) error { b.mu.Lock() defer b.mu.Unlock() // Exit if broker isn't open. if !b.opened() { return ErrClosed } // Ensure messages with old indexes aren't re-applied. assert(m.Index > b.index, "stale apply: msg=%d, broker=%d", m.Index, b.index) // Process internal commands separately than the topic writes. switch m.Type { case SetTopicMaxIndexMessageType: b.applySetTopicMaxIndex(m) default: // Create topic if not exists. t := b.topics[m.TopicID] if t == nil { t = NewTopic(m.TopicID, b.topicPath(m.TopicID)) if err := t.Open(); err != nil { return fmt.Errorf("open topic: %s", err) } b.topics[t.id] = t } // Write message to topic. if err := t.WriteMessage(m); err != nil { return fmt.Errorf("write message: %s", err) } } // Save highest applied index in memory. // Only internal messages need to have their indexes saved to disk. b.index = m.Index return nil } // snapshotHeader represents the header of a snapshot. type snapshotHeader struct { Topics []*snapshotTopic `json:"topics"` Index uint64 `json:"index"` } type snapshotTopic struct { ID uint64 `json:"id"` Segments []*snapshotTopicSegment `json:"segments"` } type snapshotTopicSegment struct { Index uint64 `json:"index"` Size int64 `json:"size"` path string } // RaftFSM is a wrapper struct around the broker that implements the raft.FSM interface. // It will panic for any errors that occur during Apply. type RaftFSM struct { Broker interface { Apply(m *Message) error Index() (uint64, error) SetMaxIndex(uint64) error Snapshot(w io.Writer) (uint64, error) Restore(r io.Reader) error } } func (fsm *RaftFSM) Index() (uint64, error) { return fsm.Broker.Index() } func (fsm *RaftFSM) Snapshot(w io.Writer) (uint64, error) { return fsm.Broker.Snapshot(w) } func (fsm *RaftFSM) Restore(r io.Reader) error { return fsm.Broker.Restore(r) } // MustApply applies a raft command to the broker. Panic on error. func (fsm *RaftFSM) MustApply(e *raft.LogEntry) { switch e.Type { case raft.LogEntryCommand: // Decode message. m := &Message{} if err := m.UnmarshalBinary(e.Data); err != nil { panic("message unmarshal: " + err.Error()) } m.Index = e.Index // Apply message. if err := fsm.Broker.Apply(m); err != nil { panic(err.Error()) } default: // Move internal index forward if it's an internal raft comand. if err := fsm.Broker.SetMaxIndex(e.Index); err != nil { panic(fmt.Sprintf("set max index: idx=%d, err=%s", e.Index, err)) } } } // DefaultMaxSegmentSize is the largest a segment can get before starting a new segment. const DefaultMaxSegmentSize = 10 * 1024 * 1024 // 10MB // topic represents a single named queue of messages. // Each topic is identified by a unique path. // // Topics write their entries to segmented log files which contain a // contiguous range of entries. type Topic struct { mu sync.Mutex id uint64 // unique identifier index uint64 // highest index replicated path string // on-disk path file *os.File // last segment writer opened bool // The largest a segment can get before splitting into a new segment. MaxSegmentSize int64 } // NewTopic returns a new instance of Topic. func NewTopic(id uint64, path string) *Topic { return &Topic{ id: id, path: path, MaxSegmentSize: DefaultMaxSegmentSize, } } // ID returns the topic identifier. func (t *Topic) ID() uint64 { return t.id } // Path returns the topic path. func (t *Topic) Path() string { return t.path } // Index returns the highest replicated index for the topic. func (t *Topic) Index() uint64 { t.mu.Lock() defer t.mu.Unlock() return t.index } // SegmentPath returns the path to a segment starting with a given log index. func (t *Topic) SegmentPath(index uint64) string { t.mu.Lock() defer t.mu.Unlock() return t.segmentPath(index) } func (t *Topic) segmentPath(index uint64) string { if t.path == "" { return "" } return filepath.Join(t.path, strconv.FormatUint(index, 10)) } // Open opens a topic for writing. func (t *Topic) Open() error { t.mu.Lock() defer t.mu.Unlock() // Ensure topic is not already open and it has a path. if t.opened { return ErrTopicOpen } else if t.path == "" { return ErrPathRequired } if err := func() error { t.opened = true // Ensure the parent directory exists. if err := os.MkdirAll(t.path, 0777); err != nil { return err } // Read available segments. segments, err := ReadSegments(t.path) if err != nil && !os.IsNotExist(err) { return fmt.Errorf("read segments: %s", err) } // Read max index and open file handle if we have segments. if len(segments) > 0 { s := segments.Last() // Read the last segment and extract the last message index. index, err := ReadSegmentMaxIndex(s.Path) if err != nil { return fmt.Errorf("read segment max index: %s", err) } t.index = index // Open file handle on the segment. f, err := os.OpenFile(s.Path, os.O_RDWR|os.O_APPEND, 0666) if err != nil { return fmt.Errorf("open segment: %s", err) } t.file = f } return nil }(); err != nil { _ = t.close() return err } return nil } // Close closes the topic and segment writer. func (t *Topic) Close() error { t.mu.Lock() defer t.mu.Unlock() return t.close() } func (t *Topic) close() error { if t.file != nil { _ = t.file.Close() t.file = nil } t.opened = false t.index = 0 return nil } // ReadIndex reads the highest available index for a topic from disk. func (t *Topic) ReadIndex() (uint64, error) { // Read a list of all segments. segments, err := ReadSegments(t.path) if err != nil && !os.IsNotExist(err) { return 0, fmt.Errorf("read segments: %s", err) } // Ignore if there are no available segments. if len(segments) == 0 { return 0, nil } // Read highest index on the last segment. index, err := ReadSegmentMaxIndex(segments.Last().Path) if err != nil { return 0, fmt.Errorf("read segment max index: %s", err) } return index, nil } // WriteMessage writes a message to the end of the topic. func (t *Topic) WriteMessage(m *Message) error { t.mu.Lock() defer t.mu.Unlock() // Return error if message index is lower than the topic's highest index. if m.Index <= t.index { return ErrStaleWrite } // Close the current file handle if it's too large. if t.file != nil { if fi, err := t.file.Stat(); err != nil { return fmt.Errorf("stat: %s", err) } else if fi.Size() > t.MaxSegmentSize { _ = t.file.Close() t.file = nil } } // Create a new segment if we have no handle. if t.file == nil { f, err := os.OpenFile(t.segmentPath(m.Index), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return fmt.Errorf("create segment file: %s", err) } t.file = f } // Encode message. b := make([]byte, messageHeaderSize+len(m.Data)) copy(b, m.marshalHeader()) copy(b[messageHeaderSize:], m.Data) // Write to last segment. if _, err := t.file.Write(b); err != nil { return fmt.Errorf("write segment: %s", err) } return nil } // Topics represents a list of topics sorted by id. type Topics []*Topic func (a Topics) Len() int { return len(a) } func (a Topics) Less(i, j int) bool { return a[i].id < a[j].id } func (a Topics) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // ReadTopics reads all topics from a directory path. func ReadTopics(path string) (Topics, error) { // Open handle to directory. f, err := os.Open(path) if err != nil { return nil, err } defer func() { _ = f.Close() }() // Read directory items. fis, err := f.Readdir(0) if err != nil { return nil, err } // Create a topic for each directory with a numeric name. var a Topics for _, fi := range fis { // Skip non-directory paths. if !fi.IsDir() { continue } topicID, err := strconv.ParseUint(fi.Name(), 10, 64) if err != nil { continue } a = append(a, NewTopic(topicID, filepath.Join(path, fi.Name()))) } sort.Sort(a) return a, nil } // Segment represents a contiguous section of a topic log. type Segment struct { Index uint64 // starting index of the segment and name Path string // path to the segment file. } // Size returns the file size of the segment. func (s *Segment) Size() (int64, error) { fi, err := os.Stat(s.Path) if err != nil { return 0, err } return fi.Size(), nil } // Segments represents a list of segments sorted by index. type Segments []*Segment // Last returns the last segment in the slice. // Returns nil if there are no segments. func (a Segments) Last() *Segment { if len(a) == 0 { return nil } return a[len(a)-1] } func (a Segments) Len() int { return len(a) } func (a Segments) Less(i, j int) bool { return a[i].Index < a[j].Index } func (a Segments) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // ReadSegments reads all segments from a directory path. func ReadSegments(path string) (Segments, error) { // Open handle to directory. f, err := os.Open(path) if err != nil { return nil, err } defer func() { _ = f.Close() }() // Read directory items. fis, err := f.Readdir(0) if err != nil { return nil, err } // Create a segment for each file with a numeric name. var a Segments for _, fi := range fis { index, err := strconv.ParseUint(fi.Name(), 10, 64) if err != nil { continue } a = append(a, &Segment{ Index: index, Path: filepath.Join(path, fi.Name()), }) } sort.Sort(a) return a, nil } // ReadSegmentByIndex returns the segment that contains a given index. func ReadSegmentByIndex(path string, index uint64) (*Segment, error) { // Find a list of all segments. segments, err := ReadSegments(path) if os.IsNotExist(err) { return nil, err } else if err != nil { return nil, fmt.Errorf("read segments: %s", err) } // If there are no segments then ignore. // If index is zero then start from the first segment. // If index is less than the first segment range then return error. if len(segments) == 0 { return nil, nil } else if index == 0 { return segments[0], nil } else if index < segments[0].Index { return nil, ErrSegmentReclaimed } // Find segment that contains index. for i := range segments[:len(segments)-1] { if index >= segments[i].Index && index < segments[i+1].Index { return segments[i], nil } } // If no segment ranged matched then return the last segment. return segments[len(segments)-1], nil } // ReadSegmentMaxIndex returns the highest index recorded in a segment. func ReadSegmentMaxIndex(path string) (uint64, error) { // Open segment file. f, err := os.Open(path) if os.IsNotExist(err) { return 0, err } else if err != nil { return 0, fmt.Errorf("open: %s", err) } defer func() { _ = f.Close() }() // Read all messages until the end. dec := NewMessageDecoder(f) index := uint64(0) for { var m Message if err := dec.Decode(&m); err == io.EOF { return index, nil } else if err != nil { return 0, fmt.Errorf("decode: %s", err) } index = m.Index } } // TopicReader reads data on a single topic from a given index. type TopicReader struct { mu sync.Mutex path string // topic directory path index uint64 // starting index streaming bool // true if reader should hang and wait for new messages file *os.File // current segment file handler closed bool // The time between file system polling to check for new segments. PollInterval time.Duration } // NewTopicReader returns a new instance of TopicReader that reads segments // from a path starting from a given index. func NewTopicReader(path string, index uint64, streaming bool) *TopicReader { return &TopicReader{ path: path, index: index, streaming: streaming, PollInterval: DefaultPollInterval, } } // Read reads the next bytes from the reader into the buffer. func (r *TopicReader) Read(p []byte) (int, error) { for { // Retrieve current segment file handle. // If the reader is closed then return EOF. // If we don't have a file and we're streaming then sleep and retry. f, err := r.File() if err == ErrReaderClosed { return 0, io.EOF } else if err != nil { return 0, fmt.Errorf("file: %s", err) } else if f == nil { if r.streaming { time.Sleep(r.PollInterval) continue } return 0, io.EOF } // Read under lock so the underlying file cannot be closed. r.mu.Lock() n, err := f.Read(p) r.mu.Unlock() // Read into buffer. // If no more data is available, then retry with the next segment. if err == io.EOF { if err := r.nextSegment(); err != nil { return 0, fmt.Errorf("next segment: %s", err) } time.Sleep(r.PollInterval) continue } else { return n, err } } } // File returns the current segment file handle. // Returns nil when there is no more data left. func (r *TopicReader) File() (*os.File, error) { r.mu.Lock() defer r.mu.Unlock() // Exit if closed. if r.closed { return nil, ErrReaderClosed } // If the first file hasn't been opened then open it and seek. if r.file == nil { // Find the segment containing the index. // Exit if no segments are available or if path not found. segment, err := ReadSegmentByIndex(r.path, r.index) if os.IsNotExist(err) { return nil, nil } else if err != nil { return nil, fmt.Errorf("segment by index: %s", err) } else if segment == nil { return nil, nil } // Open that segment file. f, err := os.Open(segment.Path) if err != nil { return nil, fmt.Errorf("open: %s", err) } // Seek to index. if err := r.seekAfterIndex(f, r.index); err != nil { _ = f.Close() return nil, fmt.Errorf("seek to index: %s", err) } // Save file handle and segment name. r.file = f } return r.file, nil } // seekAfterIndex moves a segment file to the message after a given index. func (r *TopicReader) seekAfterIndex(f *os.File, seek uint64) error { dec := NewMessageDecoder(f) for { var m Message if err := dec.Decode(&m); err == io.EOF { return nil } else if err != nil { return err } else if m.Index >= seek { // Seek to message start. if _, err := f.Seek(-int64(messageHeaderSize+len(m.Data)), os.SEEK_CUR); err != nil { return fmt.Errorf("seek: %s", err) } return nil } } } // nextSegment closes the current segment's file handle and opens the next segment. func (r *TopicReader) nextSegment() error { r.mu.Lock() defer r.mu.Unlock() // Skip if the reader is closed. if r.closed { return nil } // Find current segment index. index, err := strconv.ParseUint(filepath.Base(r.file.Name()), 10, 64) if err != nil { return fmt.Errorf("parse current segment index: %s", err) } // Read current segment list. // If no segments exist then exit. // If current segment is the last segment then ignore. segments, err := ReadSegments(r.path) if os.IsNotExist(err) { return nil } else if err != nil { return fmt.Errorf("read segments: %s", err) } else if len(segments) == 0 { return nil } else if segments[len(segments)-1].Index == index { if !r.streaming { r.closed = true } return nil } // Loop over segments and find the next one. for i := range segments[:len(segments)-1] { if segments[i].Index == index { // Clear current file. if r.file != nil { r.file.Close() r.file = nil } // Open next segment. f, err := os.Open(segments[i+1].Path) if err != nil { return fmt.Errorf("open next segment: %s", err) } r.file = f return nil } } // This should only occur if our current segment was deleted. r.closed = true return nil } // Close closes the reader. func (r *TopicReader) Close() error { r.mu.Lock() defer r.mu.Unlock() // Close current handle. if r.file != nil { _ = r.file.Close() r.file = nil } // Mark reader as closed. r.closed = true return nil } // MessageType represents the type of message. type MessageType uint16 // BrokerMessageType is a flag set on broker messages to prevent them // from being passed through to topics. const BrokerMessageType = 0x8000 const ( SetTopicMaxIndexMessageType = BrokerMessageType | MessageType(0x00) ) // The size of the encoded message header, in bytes. const messageHeaderSize = 2 + 8 + 8 + 4 // Message represents a single item in a topic. type Message struct { Type MessageType TopicID uint64 Index uint64 Data []byte } // WriteTo encodes and writes the message to a writer. Implements io.WriterTo. func (m *Message) WriteTo(w io.Writer) (n int64, err error) { if n, err := w.Write(m.marshalHeader()); err != nil { return int64(n), err } if n, err := w.Write(m.Data); err != nil { return int64(messageHeaderSize + n), err } return int64(messageHeaderSize + len(m.Data)), nil } // MarshalBinary returns a binary representation of the message. // This implements encoding.BinaryMarshaler. An error cannot be returned. func (m *Message) MarshalBinary() ([]byte, error) { b := make([]byte, messageHeaderSize+len(m.Data)) copy(b, m.marshalHeader()) copy(b[messageHeaderSize:], m.Data) return b, nil } // UnmarshalBinary reads a message from a binary encoded slice. // This implements encoding.BinaryUnmarshaler. func (m *Message) UnmarshalBinary(b []byte) error { m.unmarshalHeader(b) if len(b[messageHeaderSize:]) < len(m.Data) { return fmt.Errorf("message data too short: %d < %d", len(b[messageHeaderSize:]), len(m.Data)) } copy(m.Data, b[messageHeaderSize:]) return nil } // marshalHeader returns a byte slice with the message header. func (m *Message) marshalHeader() []byte { b := make([]byte, messageHeaderSize) binary.BigEndian.PutUint16(b[0:2], uint16(m.Type)) binary.BigEndian.PutUint64(b[2:10], m.TopicID) binary.BigEndian.PutUint64(b[10:18], m.Index) binary.BigEndian.PutUint32(b[18:22], uint32(len(m.Data))) return b } // unmarshalHeader reads message header data from binary encoded slice. // The data field is appropriately sized but is not filled. func (m *Message) unmarshalHeader(b []byte) { m.Type = MessageType(binary.BigEndian.Uint16(b[0:2])) m.TopicID = binary.BigEndian.Uint64(b[2:10]) m.Index = binary.BigEndian.Uint64(b[10:18]) m.Data = make([]byte, binary.BigEndian.Uint32(b[18:22])) } // MessageDecoder decodes messages from a reader. type MessageDecoder struct { r io.Reader } // NewMessageDecoder returns a new instance of the MessageDecoder. func NewMessageDecoder(r io.Reader) *MessageDecoder { return &MessageDecoder{r: r} } // Decode reads a message from the decoder's reader. func (dec *MessageDecoder) Decode(m *Message) error { // Read header bytes. var b [messageHeaderSize]byte if _, err := io.ReadFull(dec.r, b[:]); err == io.EOF { return err } else if err != nil { return fmt.Errorf("read header: %s", err) } m.unmarshalHeader(b[:]) // Read data. if _, err := io.ReadFull(dec.r, m.Data); err != nil { return fmt.Errorf("read body: %s", err) } return nil } // UnmarshalMessage decodes a byte slice into a message. func UnmarshalMessage(data []byte) (*Message, error) { m := &Message{} if err := m.UnmarshalBinary(data); err != nil { return nil, err } return m, nil } type flusher interface { Flush() } // uint64Slice attaches the methods of Interface to []int, sorting in increasing order. type uint64Slice []uint64 func (p uint64Slice) Len() int { return len(p) } func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // mustMarshalJSON encodes a value to JSON. // This will panic if an error occurs. This should only be used internally when // an invalid marshal will cause corruption and a panic is appropriate. func mustMarshalJSON(v interface{}) []byte { b, err := json.Marshal(v) if err != nil { panic("marshal: " + err.Error()) } return b } // mustUnmarshalJSON decodes a value from JSON. // This will panic if an error occurs. This should only be used internally when // an invalid unmarshal will cause corruption and a panic is appropriate. func mustUnmarshalJSON(b []byte, v interface{}) { if err := json.Unmarshal(b, v); err != nil { panic("unmarshal: " + err.Error()) } } // assert will panic with a given formatted message if the given condition is false. func assert(condition bool, msg string, v ...interface{}) { if !condition { panic(fmt.Sprintf("assert failed: "+msg, v...)) } } // u64tob converts a uint64 into an 8-byte slice. func u64tob(v uint64) []byte { b := make([]byte, 8) binary.BigEndian.PutUint64(b, v) return b } // btou64 converts an 8-byte slice into an uint64. func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
package monitor import ( "time" "themis/config" "themis/database" ) const ( flagManage uint = 1 << 2 flagStorage uint = 1 << 1 flagNetwork uint = 1 << 0 // state stateTransitionInterval = 60 ) var ( flagTagMap = map[string]uint{ "manage": flagManage, "storage": flagStorage, "network": flagNetwork, } doFenceStatus = []string{ HostFailedStatus, HostFailedStatus, } openstackDecisionMatrix = []bool{ /* +------------+----------+-----------+--------------+ */ /* | Management | Storage | Network | Fence | */ /* +------------+----------+-----------+--------------+ */ /* | good | good | good | */ false, /* | */ /* | good | good | bad | */ true, /* | */ /* | good | bad | good | */ true, /* | */ /* | good | bad | bad | */ true, /* | */ /* | bad | good | good | */ false, /* | */ /* | bad | good | bad | */ true, /* | */ /* | bad | bad | good | */ true, /* | */ /* | bad | bad | bad | */ true, /* | */ /* +-----------------------------------+--------------+ */ } ) type PolicyEngine struct { config *config.ThemisConfig decisionMatrix []bool } func NewPolicyEngine(config *config.ThemisConfig) *PolicyEngine { return &PolicyEngine{ config: config, decisionMatrix: openstackDecisionMatrix, } } func saveHost(host *database.Host) { host.UpdatedAt = time.Now() database.HostUpdateFields(host, "status", "disabled", "updated_at") } func isAllActive(states []*database.HostState) bool { allActive := true for _, state := range states { if state.FailedTimes > 0 { allActive = false break } } return allActive } func hasAnyFailure(states []*database.HostState) bool { hasFailure := false for _, state := range states { if state.FailedTimes > 0 { hasFailure = true break } } return hasFailure } func hasFatalFailure(states []*database.HostState) bool { keyStates := make([]*database.HostState, 0) for _, s := range states { if s.Tag == "network" || s.Tag == "storage" { keyStates = append(keyStates, s) } } hasFailure := false for _, state := range keyStates { if state.FailedTimes > 0 { hasFailure = true break } } return hasFailure } func updateHostFSM(host *database.Host, states []*database.HostState) { duration := time.Since(host.UpdatedAt).Seconds() switch host.Status { case HostActiveStatus: if hasAnyFailure(states) { host.Status = HostCheckingStatus saveHost(host) } case HostInitialStatus: if duration >= stateTransitionInterval { if isAllActive(states) { host.Status = HostActiveStatus saveHost(host) } } case HostCheckingStatus: if duration >= stateTransitionInterval { if isAllActive(states) { host.Status = HostActiveStatus saveHost(host) } else if hasFatalFailure(states) { host.Status = HostFailedStatus saveHost(host) } } } } func (p *PolicyEngine) HandleEvents(events Events) { // group by hostname hostTags := map[string]map[string]string{} for _, e := range events { tags := hostTags[e.Hostname] if tags != nil { tags[e.NetworkTag] = e.Status } else { tags = map[string]string{ e.NetworkTag: e.Status, } } hostTags[e.Hostname] = tags } for hostname, tags := range hostTags { plog.Debugf("Handle %s's events.", hostname) var host *database.Host host, err := database.HostGetByName(hostname) if err != nil { plog.Warningf("Can't find Host %s.", hostname) return } else if host == nil { // save to database host = &database.Host{ Name: hostname, Status: HostInitialStatus, Disabled: false, } if err := database.HostInsert(host); err != nil { plog.Warning("Save host failed", err) continue } } // update host states var states []*database.HostState states, err = database.StateGetAll(host.Id) if err != nil { plog.Warning("Can't find Host states") continue } for tag, status := range tags { var state *database.HostState for i := range states { if states[i].Tag == tag { state = states[i] break } } if state == nil { // if we don't find matched state state = &database.HostState{ HostId: host.Id, Tag: tag, FailedTimes: 0, } if err := database.StateInsert(state); err != nil { plog.Warning("Save host state failed", err) continue } } if !host.Disabled { if status == "active" && state.FailedTimes > 0 { state.FailedTimes -= 1 } else if status == "failed" { state.FailedTimes += 1 } } database.StateUpdateFields(state, "failed_times") } states, err = database.StateGetAll(host.Id) if err != nil { plog.Warning("Can't find Host states") return } // update host status plog.Debugf("update %s's FSM.", hostname) updateHostFSM(host, states) // judge if a host is down if p.getDecision(host, states) { p.fenceHost(host, states) } } } func (p *PolicyEngine) getDecision(host *database.Host, states []*database.HostState) bool { if host.Disabled { return false } statusDecision := false for _, status := range doFenceStatus { if host.Status == status { statusDecision = true } } var decision uint = 0 for _, s := range states { // judge if one network is down. if s.FailedTimes >= 6 { decision |= flagTagMap[s.Tag] } } return statusDecision && p.decisionMatrix[decision] } func (p *PolicyEngine) fenceHost(host *database.Host, states []*database.HostState) { defer func() { if err := recover(); err != nil { plog.Warning("unexpected error during HandleEvents: ", err) } }() // check if we have disabled fence operation globally if p.config.Fence.DisableFenceOps { plog.Info("fence operation have been disabled.") return } plog.Infof("Begin fence host %s", host.Name) // update host status host.Status = HostFencingStatus saveHost(host) // execute power off through IPMI fencers, err := database.FencerGetByHost(host.Id) if err != nil || len(fencers) < 1 { plog.Warning("Can't find fencers with given host: ", host.Name) return } var IPMIFencers []FencerInterface for _, fencer := range fencers { IPMIFencers = append(IPMIFencers, NewFencer(fencer)) } plog.Debug("Begin execute fence operation") for _, fencer := range IPMIFencers { if err := fencer.Fence(); err != nil { plog.Warningf("Fence operation failed on host %s", host.Name) continue } plog.Infof("Fence operation successed on host: %s", host.Name) break } // evacuate all virtual machine on that host nova, err := NewNovaClient(&p.config.Openstack) if err != nil { plog.Warning("Can't create nova client: ", err) return } services, err := nova.ListServices() if err != nil { plog.Warning("Can't get service list", err) return } for _, service := range services { if host.Name == service.Host && service.Binary == "nova-compute" { nova.ForceDownService(service) nova.DisableService(service, "disabled by themis monitor") } } servers, err := nova.ListServers(host.Name) if err != nil { plog.Warning("Can't get service list: ", err) return } for _, server := range servers { id := server.ID plog.Infof("Try to evacuate instance: %s", id) nova.Evacuate(id) } // disable host status host.Status = HostFencedStatus host.Disabled = true saveHost(host) }
package v1alpha1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // MongoClusterSpec defines the desired state of MongoCluster // +k8s:openapi-gen=true type MongoClusterSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html Mongo MongoSettings `json:"mongo,omitempty"` } // MongoSettings define the specification of the mongo cluster type MongoSettings struct { Image string `json:"image,omitempty"` ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` Replicas int32 `json:"replicas,omitempty"` ReplSet string `json:"replSet,omitempty"` WiredTigerCacheSize string `json:"wiredTigerCacheSize,omitempty"` BindIp string `json:"bindIp,omitempty"` SmallFiles bool `json:"smallfiles,omitempty"` Noprealloc bool `json:"noprealloc,omitempty"` Resources *MongoResources `json:"resources,omitempty"` Storage MongoStorage `json:"storage"` Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // MongoResources sets the limits and requests for a container type MongoResources struct { Requests CPUAndMem `json:"requests,omitempty"` Limits CPUAndMem `json:"limits,omitempty"` } // CPUAndMem defines how many cpu and ram the container will request/limit type CPUAndMem struct { CPU string `json:"cpu"` Memory string `json:"memory"` } type MongoStorage struct { StorageClassName string `json:"storageClassName,omitempty"` Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` } // MongoClusterStatus defines the observed state of MongoCluster // +k8s:openapi-gen=true type MongoClusterStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` UpdateAt string `json:"updateAt,omitempty"` ServiceName string `json:"serviceName,omitempty"` ConfigMapName string `json:"configMapName,omitempty"` PodsFQDN []string `json:"podsFQDN,omitempty"` HealthMembers []string `json:"healthMembers,omitempty"` IssueMembers []string `json:"issueMembers,omitempty"` PrimaryFQDN string `json:"primaryFQDN,omitempty"` IsReady bool `json:"isReady,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MongoCluster is the Schema for the mongoclusters API // +k8s:openapi-gen=true // +kubebuilder:subresource:status type MongoCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec MongoClusterSpec `json:"spec,omitempty"` Status MongoClusterStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MongoClusterList contains a list of MongoCluster type MongoClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []MongoCluster `json:"items"` } func init() { SchemeBuilder.Register(&MongoCluster{}, &MongoClusterList{}) }
// Package producer implements a single partition Kafka producer. package producer import ( "fmt" "time" "github.com/mkocikowski/libkafka/api/Metadata" "github.com/mkocikowski/libkafka/api/Produce" "github.com/mkocikowski/libkafka/batch" "github.com/mkocikowski/libkafka/client" ) func parseResponse(r *Produce.Response) (*Response, error) { if n := len(r.TopicResponses); n != 1 { return nil, fmt.Errorf("unexpected number of topic responses: %d", n) } tr := &(r.TopicResponses[0]) if n := len(tr.PartitionResponses); n != 1 { return nil, fmt.Errorf("unexpected number of partition responses: %d", n) } pr := &(tr.PartitionResponses[0]) return &Response{ ThrottleTimeMs: r.ThrottleTimeMs, Topic: tr.Topic, Partition: pr.Partition, ErrorCode: pr.ErrorCode, BaseOffset: pr.BaseOffset, LogAppendTime: pr.LogAppendTime, LogStartOffset: pr.LogStartOffset, }, nil } type Response struct { Broker *Metadata.Broker Topic string Partition int32 ThrottleTimeMs int32 ErrorCode int16 BaseOffset int64 LogAppendTime int64 LogStartOffset int64 } type PartitionProducer struct { client.PartitionClient Acks int16 // 0: no, 1: leader only, -1: all ISRs (as specified by min.insync.replicas) TimeoutMs int32 } // ProduceStrings with Nop compression. func (p *PartitionProducer) ProduceStrings(now time.Time, values ...string) (*Response, error) { b, err := batch.NewBuilder(now).AddStrings(values...).Build(now) if err != nil { return nil, err } return p.Produce(b) } func produce(c *client.PartitionClient, args *Produce.Args, rs batch.RecordSet) (*Response, error) { resp, err := c.Produce(args, rs) if err != nil { return nil, err } return parseResponse(resp) } // Produce (send) batch to Kafka. Single request is made (no retries). The call // is blocking. See documentation for client.PartitionClient for general // description on how request errors are handled. Specific to Produce requests: // it is possible that the batch was successfuly produced even when the call // returns an error. This can happen when the connection is interrupted while // the client is reading the response. This is an edge case but possible. func (p *PartitionProducer) Produce(b *batch.Batch) (*Response, error) { args := &Produce.Args{ ClientId: p.ClientId, Topic: p.Topic, Partition: p.Partition, Acks: p.Acks, TimeoutMs: p.TimeoutMs, } recordSet := b.Marshal() resp, err := produce(&(p.PartitionClient), args, recordSet) if err != nil { if leader := p.Leader(); leader != nil { err = fmt.Errorf("error calling %+v: %w", leader, err) } return nil, err } resp.Broker = p.Leader() return resp, nil }
package uptime import ( "context" "encoding/json" "fmt" "net/http" "reflect" "testing" ) func TestTagList(t *testing.T) { client, mux, _, teardown := setup() defer teardown() mux.HandleFunc("/check-tags", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") fmt.Fprint(w, `{"count": 1, "results": [{"pk": 1}]}`) }) opt := &TagListOptions{Page: 1, PageSize: 1000} tags, _, err := client.Tags.List(context.Background(), opt) if err != nil { t.Errorf("Tags.List returned error: %v", err) } want := []*Tag{{PK: 1}} if !reflect.DeepEqual(tags, want) { t.Errorf("Checks.List returned %+v, want %+v", tags, want) } } func TestTagCreate(t *testing.T) { client, mux, _, teardown := setup() defer teardown() input := &Tag{ Tag: "test tag", ColorHex: "#000000", } mux.HandleFunc("/check-tags", func(w http.ResponseWriter, r *http.Request) { v := new(Tag) json.NewDecoder(r.Body).Decode(v) testMethod(t, r, "POST") if !reflect.DeepEqual(v, input) { t.Errorf("Request body = %+v, want %+v", v, input) } fmt.Fprint(w, `{"results": {"tag": "test tag", "color_hex": "#000000"}}`) }) tag, _, err := client.Tags.Create(context.Background(), input) if err != nil { t.Errorf("Tags.Create returned error: %v", err) } want := &Tag{Tag: "test tag", ColorHex: "#000000"} if !reflect.DeepEqual(tag, want) { t.Errorf("Tags.Create returned %+v, want %+v", tag, want) } } func TestTagGet(t *testing.T) { client, mux, _, teardown := setup() defer teardown() mux.HandleFunc("/check-tags/1", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") fmt.Fprint(w, `{"pk": 1}`) }) tag, _, err := client.Tags.Get(context.Background(), 1) if err != nil { t.Errorf("Tags.Get returned error: %v", err) } want := &Tag{ PK: 1, } if !reflect.DeepEqual(tag, want) { t.Errorf("Tags.Get returned %+v, want %+v", tag, want) } } func TestTagUpdate(t *testing.T) { client, mux, _, teardown := setup() defer teardown() input := &Tag{ PK: 1, Tag: "test tag", ColorHex: "#000000", } mux.HandleFunc("/check-tags/1", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "PATCH") v := &Tag{} json.NewDecoder(r.Body).Decode(v) if !reflect.DeepEqual(v, input) { t.Errorf("Request body = %+v, want %+v", v, input) } fmt.Fprint(w, `{"results": {"pk": 1}}`) }) tag, _, err := client.Tags.Update(context.Background(), input) if err != nil { t.Errorf("Tags.Update returned error: %v", err) } want := &Tag{PK: 1} if !reflect.DeepEqual(tag, want) { t.Errorf("Tags.Update returned %+v, want %+v", tag, want) } } func TestTagDelete(t *testing.T) { client, mux, _, teardown := setup() defer teardown() mux.HandleFunc("/check-tags/1", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") }) _, err := client.Tags.Delete(context.Background(), 1) if err != nil { t.Errorf("Tags.Delete returned error: %v", err) } }
package main // type ListNode struct { // Val int // Next *ListNode // } func partition(head *ListNode, x int) *ListNode { dummy1 := &ListNode{Val: -1} cur1 := dummy1 dummy2 := &ListNode{Val: -1} cur2 := dummy2 for head != nil { if head.Val < x { cur1.Next = head cur1 = cur1.Next } else { cur2.Next = head cur2 = cur2.Next } temp := head.Next head.Next = nil head = temp } cur1.Next = dummy2.Next return dummy1.Next }
package modules import ( "context" "fmt" "io/ioutil" "net/http" "sync/atomic" "time" "github.com/buguang01/Logger" "github.com/buguang01/bige/messages" "github.com/buguang01/util/threads" ) //设置Web地址 func WebSetIpPort(ipPort string) options { return func(mod IModule) { mod.(*WebModule).ipPort = ipPort } } //设置超时时间(秒) func WebSetTimeout(timeout time.Duration) options { return func(mod IModule) { mod.(*WebModule).timeout = timeout * time.Second } } //设置超时回调方法 func WebSetTimeoutFunc(timeoutfunc func(webmsg messages.IHttpMessageHandle, w http.ResponseWriter, req *http.Request)) options { return func(mod IModule) { mod.(*WebModule).timeoutFun = timeoutfunc } } //设置路由 func WebSetRoute(route messages.IMessageHandle) options { return func(mod IModule) { mod.(*WebModule).RouteHandle = route } } type WebModule struct { ipPort string //监听地址 timeout time.Duration //超时时时 RouteHandle messages.IMessageHandle //消息路由 timeoutFun func(webmsg messages.IHttpMessageHandle, w http.ResponseWriter, req *http.Request) //超时回调,把超时的消息传入 getnum int64 //收到的总消息数 runing int64 //当前在处理的消息数 httpServer *http.Server //HTTP请求的对象 thgo *threads.ThreadGo //协程管理器 } func NewWebModule(opts ...options) *WebModule { result := &WebModule{ ipPort: ":8080", timeout: 30 * time.Second, timeoutFun: webTimeoutRun, getnum: 0, runing: 0, thgo: threads.NewThreadGo(), RouteHandle: messages.JsonMessageHandleNew(), } for _, opt := range opts { opt(result) } return result } //Init 初始化 func (mod *WebModule) Init() { mod.httpServer = &http.Server{ Addr: mod.ipPort, WriteTimeout: mod.timeout, } //还可以加别的参数,已后再加,有需要再加 mux := http.NewServeMux() //这个是主要的逻辑 mux.HandleFunc("/", mod.Handle) mod.httpServer.Handler = mux } //Start 启动 func (mod *WebModule) Start() { mod.thgo.Go(func(ctx context.Context) { Logger.PStatus("Web Module Start!") err := mod.httpServer.ListenAndServe() if err != nil { if err == http.ErrServerClosed { Logger.PStatus("Web run Server closed under requeset!!") } else { Logger.PError(err, "Server closed unexpecteed.") } } }) } //Stop 停止 func (mod *WebModule) Stop() { if err := mod.httpServer.Close(); err != nil { Logger.PError(err, "Close Web Module.") } mod.thgo.CloseWait() Logger.PStatus("Web Module Stop.") } //PrintStatus 打印状态 func (mod *WebModule) PrintStatus() string { return fmt.Sprintf( "\r\n\t\tWeb Module\t:%d/%d\t(get/runing)", atomic.LoadInt64(&mod.getnum), atomic.LoadInt64(&mod.runing)) } //Handle http发来的所有请求都会到这个方法来 func (mod *WebModule) Handle(w http.ResponseWriter, req *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") mod.thgo.Wg.Add(1) defer mod.thgo.Wg.Done() atomic.AddInt64(&mod.getnum, 1) atomic.AddInt64(&mod.runing, 1) defer atomic.AddInt64(&mod.runing, -1) timeout := time.NewTimer(mod.httpServer.WriteTimeout - 2*time.Second) buff, _ := ioutil.ReadAll(req.Body) // fmt.Println(string(buff)) msg, err := mod.RouteHandle.Unmarshal(buff) if err != nil { Logger.PInfo("web RouteHandle Unmarshal Error:%s", err.Error()) return } modmsg, ok := msg.(messages.IHttpMessageHandle) if !ok { Logger.PInfo("Not is Web Msg:%+v", msg) return } else { Logger.PInfo("Web Get Msg:%+v", msg) } threads.Try( func() { g := threads.NewGoRun( func() { modmsg.HttpDirectCall(w, req) }, nil) select { case <-g.Chanresult: timeout.Stop() //上面那个运行完了 break case <-timeout.C: //上面那个可能还没有运行完,但是超时了要返回了 Logger.PDebug("web timeout msg:%+v", modmsg) if mod.timeoutFun != nil { mod.timeoutFun(modmsg, w, req) } break } //调用委托好的消息处理方法 }, func(err interface{}) { Logger.PFatal(err) //如果出异常了,跑这里 w.Write([]byte("catch!")) }, nil) } func webTimeoutRun(webmsg messages.IHttpMessageHandle, w http.ResponseWriter, req *http.Request) { w.Write([]byte("timeout Run!")) }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package power import ( "context" "regexp" "strings" "time" "chromiumos/tast/ctxutil" "chromiumos/tast/dut" "chromiumos/tast/errors" "chromiumos/tast/remote/firmware" "chromiumos/tast/remote/firmware/fixture" "chromiumos/tast/remote/powercontrol" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: BootWithOnlyBattery, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Verifies DUT boots with battery after unplugging AC power supply", Contacts: []string{"ambalavanan.m.m@intel.com", "intel-chrome-system-automation-team@intel.com"}, SoftwareDeps: []string{"chrome"}, ServiceDeps: []string{"tast.cros.security.BootLockboxService"}, Vars: []string{"servo"}, HardwareDeps: hwdep.D(hwdep.ChromeEC(), hwdep.Battery()), Fixture: fixture.NormalMode, }) } func BootWithOnlyBattery(ctx context.Context, s *testing.State) { cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 30*time.Second) defer cancel() dut := s.DUT() h := s.FixtValue().(*fixture.Value).Helper if err := h.RequireServo(ctx); err != nil { s.Fatal("Failed to connect to servo: ", err) } if err := h.RequireConfig(ctx); err != nil { s.Fatal("Failed to connect to servo: ", err) } s.Log("Stopping power supply") if err := h.SetDUTPower(ctx, false); err != nil { s.Fatal("Failed to remove charger: ", err) } if err := testing.Poll(ctx, func(ctx context.Context) error { if attached, err := h.Servo.GetChargerAttached(ctx); err != nil { return err } else if attached { return errors.New("charger is still attached - use Servo V4 Type-C or supply RPM vars") } return nil }, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil { s.Fatal("Failed to check if charger is disconnected via Servo V4: ", err) } defer func(ctx context.Context) { s.Log("Performing cleanup") if err := h.SetDUTPower(ctx, true); err != nil { s.Fatal("Failed to attach charger: ", err) } if err := testing.Poll(ctx, func(ctx context.Context) error { if attached, err := h.Servo.GetChargerAttached(ctx); err != nil { return err } else if !attached { return errors.New("charger is not attached at cleanup - use Servo V4 Type-C or supply RPM vars") } return nil }, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil { s.Fatal("Failed to check if charger is connected via Servo V4: ", err) } }(cleanupCtx) // Perform a Chrome login. s.Log("Login to Chrome") if err := powercontrol.ChromeOSLogin(ctx, dut, s.RPCHint()); err != nil { s.Fatal("Failed to login to chrome: ", err) } // If DUT has EC support check battery status with ectool commands. if hasECAccess(ctx, dut) { if err := verifyEctoolBattery(ctx, dut); err != nil { s.Fatal("Failed to verify ectool battery: ", err) } if err := verifyEctoolChargeState(ctx, dut); err != nil { s.Fatal("Failed to verify ectool chargestate show: ", err) } } // Check battery info with power_supply_info command. if err := verifyPowerSupplyInfo(ctx, dut); err != nil { s.Fatal("Failed to verify power supply info: ", err) } // Even after unplugging AC power supply, DUT has to be in S0 state. if err := verifyECPowerInfo(ctx, h); err != nil { s.Fatal("Failed to verify EC power state info via servo: ", err) } // Check battery info via servo. if err := verifyECBattery(ctx, h); err != nil { s.Fatal("Failed to verify EC battery info via servo: ", err) } } // verifyEctoolBattery checks ectool battery flag is discharging or not. func verifyEctoolBattery(ctx context.Context, dut *dut.DUT) error { out, err := dut.Conn().CommandContext(ctx, "ectool", "battery").Output() if err != nil { return errors.Wrap(err, "failed to get ectool battery info") } dischargeFlagRe := regexp.MustCompile(`Flags.*BATT_PRESENT.*DISCHARGING`) if !dischargeFlagRe.MatchString(string(out)) { return errors.New("unexpected battery flag: got charging, want discharging") } return nil } // verifyPowerSupplyInfo checks battery power supply status is discharging or not. func verifyPowerSupplyInfo(ctx context.Context, dut *dut.DUT) error { out, err := dut.Conn().CommandContext(ctx, "power_supply_info").Output() if err != nil { return errors.Wrap(err, "failed to get power supply info") } dischargeStateRe := regexp.MustCompile(`state.*Discharging`) if !dischargeStateRe.MatchString(string(out)) { return errors.New("unexpected power_supply_info state: got charging, want discharging") } return nil } // verifyEctoolChargeState checks ectool chargestate AC status is zero or not. func verifyEctoolChargeState(ctx context.Context, dut *dut.DUT) error { out, err := dut.Conn().CommandContext(ctx, "ectool", "chargestate", "show").Output() if err != nil { return errors.Wrap(err, "failed to get charge state info") } batteryACPowerRe := regexp.MustCompile("ac.*0") if !batteryACPowerRe.MatchString(string(out)) { return errors.New("unexpected AC flag in chargestate info: got 1, want 0") } return nil } // verifyECPowerInfo checks whether DUT is in S0 EC power state or not via servo. func verifyECPowerInfo(ctx context.Context, h *firmware.Helper) error { got, err := h.Servo.GetECSystemPowerState(ctx) if err != nil { return errors.Wrap(err, "failed to get power state via servo") } if want := "S0"; got != want { return errors.Errorf("unexpected EC power state: got %s, want %s", got, want) } return nil } // verifyECBattery checks whether battery status is dicharge or not via servo. func verifyECBattery(ctx context.Context, h *firmware.Helper) error { out, err := h.Servo.RunECCommandGetOutput(ctx, "battery", []string{`Status:.*DCHG.*`}) if err != nil { return errors.Wrap(err, "failed to run command in EC console") } want := "DCHG" if len(out) == 0 { return errors.Wrap(err, "failed to get EC command output") } got := out[0][0] if !strings.Contains(got, want) { return errors.Errorf("unexpected EC battery info: got %s, want %s", got, want) } return nil } // hasECAccess return true if DUT has EC access to execute ectool commands. func hasECAccess(ctx context.Context, dut *dut.DUT) bool { if err := dut.Conn().CommandContext(ctx, "ectool", "version").Run(); err != nil { return false } return true }
package requests type AuthLoginOrSignupEmail struct { Email string `json:"email"` } type AuthSignupEmail struct { Email string `json:"email"` Password string `json:"password"` } type AuthLoginEmail struct { Email string `json:"email"` Password string `json:"password"` } type AuthLoginOrSignupSSO struct { Token string `json:"token"` Provider string `json:"provider"` }
package main func main() { { sum := 0 //for (i := 0; i < 10; i++) { for i := 0; i < 10; i++ { sum += i } println(sum) } { sum := 1 for sum < 1000 { sum += sum } println(sum) } { sum := 1 for sum < 1000 { sum += sum } println(sum) } { // IDEA detects infinite loop! //for { //} } }
package maths import ( "github.com/gonum/matrix/mat64" "github.com/gonum/stat" ) func Cov(mat *mat64.Dense) *mat64.SymDense { return stat.CovarianceMatrix(nil, mat, nil) }
package misc // IfElse 模拟三元操作符 func IfElse(condition bool, positiveVal, negativeVal interface{}) interface{} { if condition { return positiveVal } return negativeVal }
package main import ( "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/app" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/autorun" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/geoip" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/info" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/list" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/onboard" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/reset" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/rm" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/run" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/show" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/upload" _ "github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/version" ) func main() { app.Run() }
package main import ( "fmt" "github.com/humin09/demo/example" "github.com/humin09/helloworld/hello" "rsc.io/quote" ) // Hello is func of hello func Hello() { fmt.Println("demo hello begin") hello.Hello() quote.Hello() i := example.Add(1, 2) fmt.Printf("%d", i) } //World is func of world func World() { fmt.Println("demo world begin") hello.World() } func main() { Hello() World() }
package main //990. 等式方程的可满足性 //给定一个由表示变量之间关系的字符串方程组成的数组,每个字符串方程 equations[i] 的长度为 4,并采用两种不同的形式之一:"a==b" 或"a!=b"。在这里,a 和 b 是小写字母(不一定不同),表示单字母变量名。 // //只有当可以将整数分配给变量名,以便满足所有给定的方程时才返回true,否则返回 false。 // // // //示例 1: // //输入:["a==b","b!=a"] //输出:false //解释:如果我们指定,a = 1 且 b = 1,那么可以满足第一个方程,但无法满足第二个方程。没有办法分配变量同时满足这两个方程。 //示例 2: // //输出:["b==a","a==b"] //输入:true //解释:我们可以指定 a = 1 且 b = 1 以满足满足这两个方程。 //["c==c","f!=a","f==b","b==c"] // 并查集 func equationsPossible(equations []string) bool { var parents [26]byte for i, _ := range parents { parents[i] = byte(i) } find := func(index byte) byte { for parents[index] != index { parents[index] = parents[parents[index]] index = parents[index] } return index } union := func(i1, i2 byte) { parents[find(i1)] = find(i2) } for _, v := range equations { if v[1] == '=' { union(v[0]-'a', v[3]-'a') } } for _, v := range equations { if v[1] == '!' { if find(v[0]-'a') == find(v[3]-'a') { return false } } } return true }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package text_test import ( "io" "testing" "github.com/google/gapid/core/fault" "github.com/google/gapid/core/text" ) func TestWriter(t *testing.T) { got := []string{} w := text.Writer(func(s string) error { got = append(got, s) return nil }) input := []string{ "A short part", " of a long line\n", "And now", " a split line\nAnd another one too\n", "And finally", " fragments", " with no", " newlines", } expect := []string{ "A short part of a long line", "And now a split line", "And another one too", "And finally fragments with no newlines", } for _, in := range input { io.WriteString(w, in) } w.Close() if len(got) != len(expect) { t.Errorf("Incorrect number of lines, got %d expected %d", len(got), len(expect)) } for i, expect := range expect { if got[i] != expect { t.Errorf("Got %q expected %q", got[i], expect) } } } func TestFailWriter(t *testing.T) { limit := 2 w := text.Writer(func(s string) error { limit-- if limit < 0 { return fault.Const("Failed") } return nil }) _, err := io.WriteString(w, "A two part string\nThat should be fine\n") if err != nil { t.Errorf("First write failed") } _, err = io.WriteString(w, "But the next one should fail\n") if err == nil { t.Errorf("Second write should have failed") } }
package main import ( "encoding/json" "net/http" "strconv" ) func reqInvalid(w http.ResponseWriter, r *http.Request) { response := &Json_decode_error{ Status: "400", Details: "Invalid TokenReview ( Json decode failed )", } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusBadRequest) // unprocessable entity if err := json.NewEncoder(w).Encode(response); err != nil { panic(err) } } func invalidLogin(w http.ResponseWriter, r *http.Request) { response := &Auth_unsuccessfull{ APIVersion: APIVERSION, Kind: "TokenReview", Status: &Authenticated{ Authenticated: false, }, } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) if err := json.NewEncoder(w).Encode(response); err != nil { panic(err) } } func loginSuccess(w http.ResponseWriter, r *http.Request, r_id int, r_username string, r_groups []string) { response := &Auth_response_successfull{ APIVersion: APIVERSION, Kind: "TokenReview", Status: &AStatus{ Authenticated: true, Userinfo: &Userinfo{ Groups: r_groups, UID: strconv.Itoa(r_id), Username: r_username, }, }, } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) if err := json.NewEncoder(w).Encode(response); err != nil { panic(err) } }
package schema import ( "github.com/facebook/ent" "github.com/facebook/ent/schema/field" "github.com/google/uuid" ) // Users holds the schema definition for the Users entity. type Users struct { ent.Schema } // Fields of the Users. func (Users) Fields() []ent.Field { return []ent.Field{ field.UUID("id", uuid.UUID{}).Default(uuid.New), field.String("name").Optional(), field.String("username").NotEmpty().Unique(), field.String("password").NotEmpty().Sensitive(), field.String("role").Default("UserA"), } } // Edges of the Users. func (Users) Edges() []ent.Edge { return nil }
package utils import ( "fmt" "log" "math/rand" "net/http" "os" "sync" "time" ) var in chan string var out chan string var quit chan bool func SiegeMake(limNum int, limSec int) { wg := new(sync.WaitGroup) wg.Add(2) const worker = 12 in = make(chan string, 2*worker) out = make(chan string, 2*worker) quit = make(chan bool, 2) for i := 0; i < worker; i++ { wg.Add(1) go func() { defer wg.Done() for url := range in { out <- getRespBody(url) } }() } rand.Seed(time.Now().UnixNano()) go SiegeSendAll(limNum, limSec, wg) go SiegeReceiveAll(limNum, limSec, wg) wg.Wait() close(in) close(out) } func SiegeRand() string { // defer Elapsed(time.Now(), "SiegeRand") req, err := http.NewRequest("GET", "http://127.0.0.1:8000/names", nil) if err != nil { log.Print(err) os.Exit(1) } q := req.URL.Query() q.Add("name", randName()) q.Add("yearOfBirth", randYearOfBirth()) req.URL.RawQuery = q.Encode() // resp, err = http.Get(req.URL.String()) url := req.URL.String() return url } func SiegeSendAll(limNum int, limSec int, wg2 *sync.WaitGroup) { defer Elapsed(time.Now(), "SiegeSendAll") defer wg2.Done() timeStart := time.Now() var timeEnd time.Duration cnt := 0 for { cnt++ timeEnd = time.Since(timeStart) select { case <-quit: fmt.Println("Send ", cnt, "requests") return default: url := SiegeRand() in <- url } if limNum != 0 && cnt >= limNum { fmt.Println("Send ", cnt, "requests") return } if limSec != 0 && timeEnd.Seconds() >= float64(limSec) { fmt.Println("Send ", cnt, "requests") return } } } func SiegeReceiveAll(limNum int, limSec int, wg2 *sync.WaitGroup) { defer Elapsed(time.Now(), "SiegeReceiveAll") defer wg2.Done() timeStart := time.Now() var timeEnd time.Duration cnt := 0 for { timeEnd = time.Since(timeStart) if body, ok := <-out; ok { cnt += 1 if false { fmt.Println(body) } } if limNum != 0 && cnt >= limNum { fmt.Println("Received ", cnt, "requests") quit <- true return } if limSec != 0 && timeEnd.Seconds() >= float64(limSec) { fmt.Println("Received ", cnt, "requests") quit <- true return } } }
package dcrlibwallet func (mw *MultiWallet) AllWallets() (wallets []*Wallet) { for _, wallet := range mw.wallets { wallets = append(wallets, wallet) } return wallets } func (mw *MultiWallet) WalletsIterator() *WalletsIterator { return &WalletsIterator{ currentIndex: 0, wallets: mw.AllWallets(), } } func (walletsIterator *WalletsIterator) Next() *Wallet { if walletsIterator.currentIndex < len(walletsIterator.wallets) { wallet := walletsIterator.wallets[walletsIterator.currentIndex] walletsIterator.currentIndex++ return wallet } return nil } func (walletsIterator *WalletsIterator) Reset() { walletsIterator.currentIndex = 0 }
package main import ( "context" "database/sql" "encoding/json" "errors" "fmt" _ "github.com/denisenkom/go-mssqldb" "github.com/mongodb/mongo-go-driver/bson" "github.com/mongodb/mongo-go-driver/mongo" "github.com/mongodb/mongo-go-driver/mongo/options" "log" "strconv" "strings" "time" ) type database interface { init(string) error loadCourses(string, string) ([]*courseObj, error) registerCourse(string, string, string, string, int64) error unRegisterCourse(string, string, string) error getRegisterHistory(string, string) ([]byte, error) getStudentProfile(string, string) (string, string, error) } type MongoDb struct { dbClient *mongo.Client } type SqlDb struct { dbClient *sql.DB } func (self *MongoDb) init(ds string) (err error) { self.dbClient, err = mongo.NewClient(fmt.Sprintf(`mongodb://%s:27017`, ds)) ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) err = self.dbClient.Connect(ctx) if err != nil { log.Println(err) return err } err = self.dbClient.Ping(ctx, nil) if err != nil { log.Fatal("Can't connect to db server.", err) return err } return nil } func (self *MongoDb) loadCourses(dbName, table string) ([]*courseObj, error) { ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) collection := self.dbClient.Database(dbName).Collection(table) cur, err := collection.Find(nil, bson.M{}) if err != nil { log.Println(err) return nil, err } defer cur.Close(ctx) courses := make([]*courseObj, 0) for cur.Next(ctx) { result := course{} err := cur.Decode(&result) if err != nil { log.Println(err) return nil, err } courses = append(courses, NewCourseObj(result.Name, result.Teacher, result.Total, result.Grade)) } return courses, nil } func (self *MongoDb) registerCourse(dbName, student, course, teacher string, timestamp int64) error { collection := self.dbClient.Database(dbName).Collection("register-info") _, err := collection.InsertOne(nil, bson.M{ "student": student, "course": course, "teacher": teacher, "timestamp": timestamp, }) return err } func (self *MongoDb) unRegisterCourse(dbName, student, course string) error { collection := self.dbClient.Database(dbName).Collection("register-info") cur, err := collection.Find(nil, bson.M{"student": student, "course": course}, options.Find().SetSort(bson.M{"timestamp": -1}).SetLimit(1)) if err != nil { log.Println(err) return err } defer cur.Close(nil) for cur.Next(nil) { result := registerData{} cur.Decode(&result) collection.DeleteOne(nil, bson.M{"student": result.Student, "timestamp": result.TimeStamp}) break } return nil } func (self *MongoDb) getRegisterHistory(dbName, student string) ([]byte, error) { registerHistory := struct { Data []registerData `json:"data"` }{[]registerData{}} collection := self.dbClient.Database(dbName).Collection("register-info") cur, err := collection.Find(nil, bson.M{"student": student}, options.Find().SetSort(bson.M{"timestamp": -1})) if err != nil { return nil, err } defer cur.Close(nil) for cur.Next(nil) { result := registerData{} cur.Decode(&result) registerHistory.Data = append(registerHistory.Data, result) } return json.Marshal(registerHistory) } func (self *MongoDb) getStudentProfile(dbName, student string) (string, string, error) { profile := struct { Name string `json:"name"` Avatar string `json:"avatar"` }{} collection := self.dbClient.Database(dbName).Collection("profile") cur, err := collection.Find(nil, bson.M{"student": student}) if err != nil { return "", "", err } defer cur.Close(nil) if !cur.Next(nil) { err = errors.New("not found") } else { cur.Decode(&profile) } return profile.Name, profile.Avatar, err } func (self *SqlDb) init(ds string) (err error) { var user = "sa" var password = "Password" var database = "mbxsj" connString := fmt.Sprintf("server=%s;database=%s;user id=%s;password=%s", ds, database, user, password) db, err := sql.Open("mssql", connString) if err != nil { log.Fatal(err) return err } err = db.Ping() if err != nil { log.Fatal(err) return err } self.dbClient = db return nil } func parseGrade(grade string) []int { s := strings.Split(grade, ",") if len(s) == 0 { return nil } g := make([]int, len(s)) for i, v := range s { n, _ := strconv.ParseInt(strings.TrimSpace(v), 10, 32) g[i] = int(n) } return g } func (self *SqlDb) loadCourses(dbName, table string) ([]*courseObj, error) { ctx := context.Background() sqlString := fmt.Sprintf("SELECT * FROM %s", table) rows, err := self.dbClient.QueryContext(ctx, sqlString) if err != nil { log.Println(err) return nil, err } defer rows.Close() courses := make([]*courseObj, 0) for rows.Next() { result := course{} grade := "" err := rows.Scan(&result.Name, &result.Teacher, &result.Total, &grade) if err != nil { log.Println(err) return nil, err } courses = append(courses, NewCourseObj(result.Name, result.Teacher, result.Total, parseGrade(grade))) } return courses, nil } func (self *SqlDb) registerCourse(dbName, student, course, teacher string, timestamp int64) error { sqlString := fmt.Sprintf(`INSERT INTO register_info_test VALUES (N'%s', N'%s', N'%s', %d)`, student, course, teacher, timestamp) _, err := self.dbClient.Exec(sqlString) if err != nil { log.Println(err) } return err } func (self *SqlDb) unRegisterCourse(dbName, student, course string) error { ctx := context.Background() sqlString := fmt.Sprintf(`SELECT TOP 1 timestamp FROM register_info_test WHERE student='%s' AND course='%s' ORDER BY timestamp DESC`, student, course) rows, err := self.dbClient.QueryContext(ctx, sqlString) if err != nil { log.Println(err) return err } defer rows.Close() var timestamp int64 if !rows.Next() { err = errors.New("not found") return err } err = rows.Scan(&timestamp) if err != nil { log.Println(err) return err } sqlString = fmt.Sprintf(`DELETE FROM register_info WHERE student='%s' AND course='%s' AND timestamp=%d`, student, course, timestamp) _, err = self.dbClient.Exec(sqlString) if err != nil { log.Println(err) } return err } func (self *SqlDb) getRegisterHistory(dbName, student string) ([]byte, error) { registerHistory := struct { Data []registerData `json:"data"` }{[]registerData{}} ctx := context.Background() sqlString := fmt.Sprintf(`SELECT * FROM register_info WHERE student='%s' ORDER BY timestamp DESC`, student) rows, err := self.dbClient.QueryContext(ctx, sqlString) if err != nil { log.Println(err) return nil, err } defer rows.Close() for rows.Next() { result := registerData{} err = rows.Scan(&result.Student, &result.Course, &result.Teacher, &result.TimeStamp) if err != nil { log.Println(err) } registerHistory.Data = append(registerHistory.Data, result) } return json.Marshal(registerHistory) } func (self *SqlDb) getStudentProfile(dbName, student string) (string, string, error) { ctx := context.Background() sqlString := fmt.Sprintf("SELECT name, avatar FROM profile WHERE student='%s'", student) rows, err := self.dbClient.QueryContext(ctx, sqlString) if err != nil { log.Println(err) return "", "", err } defer rows.Close() name, avatar := "", "" if !rows.Next() { err = errors.New("not found") } else { err = rows.Scan(&name, &avatar) if err != nil { log.Println(err) } //avatar = "https://xsj.chneic.sh.cn/avatar/" + avatar } return name, avatar, err } var _dbs = map[string]database{ "mongo": &MongoDb{}, "sql": &SqlDb{}, } var dbClient = _dbs["mongo"] func initDb(ds string) (err error) { return dbClient.init(ds) }
/* Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "bytes" "context" "fmt" "os" "os/exec" "sort" "strings" "text/template" "github.com/Masterminds/sprig" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log" ) // For testing var ( OSEnviron = os.Environ funcsMap = template.FuncMap{ "cmd": runCmdFunc, } ) // ExpandEnvTemplate parses and executes template s with an optional environment map func ExpandEnvTemplate(s string, envMap map[string]string) (string, error) { tmpl, err := ParseEnvTemplate(s) if err != nil { return "", fmt.Errorf("unable to parse template: %q: %w", s, err) } return ExecuteEnvTemplate(tmpl, envMap) } // ExpandEnvTemplateOrFail parses and executes template s with an optional environment map, and errors if a reference cannot be satisfied. func ExpandEnvTemplateOrFail(s string, envMap map[string]string) (string, error) { tmpl, err := ParseEnvTemplate(s) if err != nil { return "", fmt.Errorf("unable to parse template: %q: %w", s, err) } tmpl = tmpl.Option("missingkey=error") return ExecuteEnvTemplate(tmpl, envMap) } // ParseEnvTemplate is a simple wrapper to parse an env template func ParseEnvTemplate(t string) (*template.Template, error) { return template.New("envTemplate").Funcs(funcsMap).Funcs(sprig.FuncMap()).Parse(t) } // ExecuteEnvTemplate executes an envTemplate based on OS environment variables and a custom map func ExecuteEnvTemplate(envTemplate *template.Template, customMap map[string]string) (string, error) { envMap := map[string]string{} for _, env := range OSEnviron() { kvp := strings.SplitN(env, "=", 2) envMap[kvp[0]] = kvp[1] } for k, v := range customMap { envMap[k] = v } var buf bytes.Buffer log.Entry(context.TODO()).Debugf("Executing template %v with environment %v", envTemplate, envMap) if err := envTemplate.Execute(&buf, envMap); err != nil { return "", err } return buf.String(), nil } // EvaluateEnvTemplateMap parses and executes all map values as templates based on OS environment variables func EvaluateEnvTemplateMap(args map[string]*string) (map[string]*string, error) { return EvaluateEnvTemplateMapWithEnv(args, nil) } // EvaluateEnvTemplateMapWithEnv parses and executes all map values as templates based on OS and custom environment variables func EvaluateEnvTemplateMapWithEnv(args map[string]*string, env map[string]string) (map[string]*string, error) { if args == nil { return nil, nil } evaluated := map[string]*string{} for k, v := range args { if v == nil { evaluated[k] = nil continue } value, err := ExpandEnvTemplate(*v, env) if err != nil { return nil, fmt.Errorf("unable to get value for key %q: %w", k, err) } evaluated[k] = &value } return evaluated, nil } // MapToFlag parses all map values and returns them as `key=value` with the given flag // Example: --my-flag key0=value0 --my-flag key1=value1 --my-flag key2=value2 func MapToFlag(m map[string]*string, flag string) ([]string, error) { kv, err := EvaluateEnvTemplateMap(m) if err != nil { return nil, fmt.Errorf("unable to evaluate build args: %w", err) } var keys []string for k := range kv { keys = append(keys, k) } sort.Strings(keys) var kvFlags []string for _, k := range keys { v := kv[k] if v == nil { kvFlags = append(kvFlags, flag, k) } else { kvFlags = append(kvFlags, flag, fmt.Sprintf("%s=%s", k, *v)) } } return kvFlags, nil } func runCmdFunc(name string, args ...string) (string, error) { cmd := exec.Command(name, args...) out, err := RunCmdOut(context.TODO(), cmd) return strings.TrimSpace(string(out)), err }
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2020 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package avl_test import ( "crypto/rand" "encoding/binary" "fmt" "sort" "strings" "testing" "github.com/bitmark-inc/bitmarkd/avl" ) type stringItem struct { s string } func (s stringItem) String() string { return s.s } func (s stringItem) Compare(x interface{}) int { return strings.Compare(s.s, x.(stringItem).s) } func TestListShort(t *testing.T) { addList := []stringItem{ {"4201"}, {"1254"}, {"8608"}, {"1639"}, {"8950"}, {"6740"}, } doList(t, addList) doTraverse(t, addList) doGet(t, addList) } // to make sure that lots of duplicates do not increment the node // count incorrectly func TestListDuplicates(t *testing.T) { addList := []stringItem{ {"1720"}, {"0506"}, {"8382"}, {"6774"}, {"1247"}, {"1250"}, {"1264"}, {"1258"}, {"1255"}, {"2247"}, {"2004"}, {"2194"}, {"2644"}, {"2169"}, {"8133"}, {"2136"}, {"9651"}, {"4079"}, {"1042"}, {"3579"}, {"3630"}, {"1427"}, {"5843"}, {"9549"}, {"5433"}, {"1274"}, {"9034"}, {"4724"}, {"6179"}, {"5072"}, {"9272"}, {"4030"}, {"4205"}, {"3363"}, {"8582"}, {"1720"}, {"0506"}, {"8382"}, {"6774"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, {"1042"}, } doList(t, addList) doTraverse(t, addList) doGet(t, addList) } func TestListLong(t *testing.T) { addList := []stringItem{ {"8133"}, {"2136"}, {"9651"}, {"4079"}, {"1042"}, {"3579"}, {"3630"}, {"1427"}, {"5843"}, {"9549"}, {"5433"}, {"1274"}, {"9034"}, {"4724"}, {"6179"}, {"5072"}, {"9272"}, {"4030"}, {"4205"}, {"3363"}, {"8582"}, {"1720"}, {"0506"}, {"8382"}, {"6774"}, {"3088"}, {"2329"}, {"9039"}, {"6703"}, {"1027"}, {"7297"}, {"6063"}, {"4156"}, {"1005"}, {"0982"}, {"3065"}, {"2553"}, {"0795"}, {"8426"}, {"2377"}, {"0877"}, {"9085"}, {"5918"}, {"2581"}, {"7797"}, {"3028"}, {"5880"}, {"3061"}, {"5212"}, {"6539"}, {"1320"}, {"3581"}, {"3334"}, {"4348"}, {"2934"}, {"8342"}, {"8814"}, {"8736"}, {"1353"}, {"3082"}, {"9620"}, {"0056"}, {"5063"}, {"1245"}, {"7066"}, {"7435"}, {"2999"}, {"7803"}, {"1303"}, {"1697"}, {"0017"}, {"4314"}, {"9926"}, {"7587"}, {"2531"}, {"8123"}, {"5693"}, {"7495"}, {"9975"}, {"5465"}, {"4342"}, {"7958"}, {"7138"}, {"9382"}, {"0672"}, {"5402"}, {"0204"}, {"2397"}, {"2712"}, {"0938"}, {"9610"}, {"3611"}, {"2140"}, {"4289"}, {"9271"}, {"4786"}, {"4145"}, {"1066"}, {"4366"}, {"6716"}, {"8579"}, {"1012"}, {"5935"}, {"8278"}, {"5761"}, {"1871"}, {"6257"}, {"2649"}, {"8643"}, {"1239"}, {"3416"}, {"6146"}, {"7127"}, {"9517"}, {"5788"}, {"9025"}, {"6880"}, {"9064"}, {"4849"}, {"4503"}, {"4898"}, {"6815"}, {"8811"}, {"6745"}, {"6907"}, {"7503"}, {"9869"}, {"5491"}, {"9940"}, {"5955"}, {"3764"}, {"3254"}, {"8048"}, {"5339"}, {"2406"}, {"3137"}, {"0251"}, {"0486"}, {"4202"}, {"1844"}, {"1741"}, {"7154"}, {"4286"}, {"5160"}, {"9472"}, {"2998"}, {"1935"}, {"4758"}, {"6478"}, {"9572"}, {"9254"}, {"6848"}, {"3126"}, {"1848"}, {"7692"}, {"2791"}, {"1504"}, {"3469"}, {"9701"}, {"5077"}, {"7928"}, {"7978"}, {"5383"}, {"4319"}, {"8197"}, {"9227"}, {"1166"}, {"4216"}, {"0866"}, {"1791"}, {"5395"}, {"4310"}, {"4452"}, {"6140"}, {"1494"}, {"8859"}, {"3394"}, {"5507"}, {"7295"}, {"5408"}, {"7789"}, {"8237"}, {"6990"}, {"6882"}, {"8243"}, {"8894"}, {"4352"}, {"6727"}, {"7019"}, {"3126"}, {"3102"}, {"2948"}, {"8242"}, {"5027"}, {"8892"}, {"3492"}, {"1323"}, {"1101"}, {"4526"}, {"5177"}, {"6175"}, {"6664"}, {"2742"}, {"6094"}, {"9877"}, {"2534"}, {"2105"}, {"6588"}, {"9982"}, {"3696"}, {"3480"}, {"2244"}, {"7487"}, {"2844"}, {"3199"}, {"5829"}, {"6952"}, {"6915"}, {"0905"}, {"7615"}, } doList(t, addList) doTraverse(t, addList) doGet(t, addList) } func doList(t *testing.T, addList []stringItem) { for i := 0; i < len(addList)+1; i += 1 { //t.Logf("delete size: %d", i) alreadyDeleted := make(map[stringItem]struct{}) tree := avl.New() for _, key := range addList { //t.Logf("add item: %q", key) tree.Insert(key, "data:"+key.String()) } if !tree.CheckUp() { t.Errorf("add: inconsistent tree") depth := tree.Print(true) t.Logf("depth: %q", depth) t.Fatal("inconsistent tree") } delete_items: for _, key := range addList[:i] { //t.Logf("delete item: %q", key) if _, ok := alreadyDeleted[key]; ok { continue delete_items } alreadyDeleted[key] = struct{}{} dv := tree.Delete(key) ev := "data:" + key.String() if dv != ev { t.Fatalf("delete returned: %q expected: %q", dv, ev) } } if !tree.CheckUp() { t.Errorf("delete: inconsistent tree") depth := tree.Print(true) t.Logf("depth: %q", depth) t.Fatal("inconsistent tree") } delete_remainder: for _, key := range addList[i:] { //t.Logf("delete item: %q", key) if _, ok := alreadyDeleted[key]; ok { continue delete_remainder } alreadyDeleted[key] = struct{}{} dv := tree.Delete(key) ev := "data:" + key.String() if dv != ev { t.Fatalf("delete returned: %q expected: %q", dv, ev) } } if !tree.IsEmpty() { t.Errorf("remainder:remaining nodes") depth := tree.Print(true) t.Logf("depth: %q", depth) t.Fatal("remaining nodes") } } } // traverse the tree forwards and backwards to check iterators func doTraverse(t *testing.T, addList []stringItem) { unique := make(map[string]struct{}) tree := avl.New() for _, key := range addList { unique[key.String()] = struct{}{} tree.Insert(key, "data:"+key.String()) } p := tree.First() if nil == p { t.Fatalf("no first item") } expected := make([]string, 0, len(unique)) for key := range unique { expected = append(expected, key) } sort.Strings(expected) n := 0 for i := 0; nil != p; i += 1 { if 0 != p.Key().Compare(stringItem{expected[i]}) { t.Fatalf("next item: actual: %q expected: %q", p.Key(), expected[i]) } n += 1 p = p.Next() } if n != len(expected) { t.Fatalf("item count: actual: %q expected: %q", n, len(addList)) } p = tree.Last() if nil == p { t.Fatalf("no last item") } n = 0 for i := len(expected) - 1; nil != p; i -= 1 { if 0 != p.Key().Compare(stringItem{expected[i]}) { t.Fatalf("prev item: actual: %q expected: %q", p.Key(), expected[i]) } n += 1 p = p.Prev() } if n != len(expected) { t.Fatalf("item count: actual: %d expected: %d", n, len(addList)) } if n != tree.Count() { t.Fatalf("tree count: actual: %d expected: %d", tree.Count(), len(addList)) } // delete remainder for _, key := range expected { //t.Logf("delete item: %q", key) tree.Delete(stringItem{key}) } if !tree.IsEmpty() { t.Errorf("remainder:remaining nodes") depth := tree.Print(true) t.Logf("depth: %d", depth) t.Fatalf("remaining nodes") } if 0 != tree.Count() { t.Fatalf("remaining count not zero: %d", tree.Count()) } } // use indeixng to fetch each item func doGet(t *testing.T, addList []stringItem) { unique := make(map[string]struct{}) tree := avl.New() for _, key := range addList { unique[key.String()] = struct{}{} tree.Insert(key, "data:"+key.String()) } expected := make([]string, 0, len(unique)) for key := range unique { expected = append(expected, key) } sort.Strings(expected) if len(expected) != tree.Count() { t.Fatalf("expected: %d items, but tree count: %d", len(expected), tree.Count()) } // print the full tree if false { depth := tree.Print(true) t.Logf("depth: %d", depth) } for index, key := range expected { node := tree.Get(index) if nil == node { t.Fatalf("[%d] key: %q not it tree (nil result)", index, key) } if 0 != node.Key().Compare(stringItem{key}) { t.Fatalf("[%d]: expected: %q but found: %q", index, key, node.Key()) } //t.Logf("[%d]: expected: %q found: %q", index, key, node.Key()) node1, index1 := tree.Search(stringItem{key}) if nil == node1 { t.Fatalf("[%d]: search: %q returned nil", index, key) } if index != index1 { t.Errorf("[%d]: search: %q index: %d expected: %d", index, key, index1, index) } } if !tree.CheckCounts() { t.Fatal("tree Checkcounts failed") } // delete even elements for index, key := range expected { if 0 == index%2 { tree.Delete(stringItem{key}) } } // print tree after some deletions if false { depth := tree.Print(true) t.Logf("after delete depth: %d", depth) } // check odd elements are all present odd_scan: for index, key := range expected { if 0 == index%2 { continue odd_scan } index >>= 1 // 1,3,5, … → 0,1,2, … node := tree.Get(index) if nil == node { t.Fatalf("[%d] key: %q not it tree (nil result)", index, key) } if 0 != node.Key().Compare(stringItem{key}) { t.Fatalf("[%d]: expected: %q but found: %q", index, key, node.Key()) } //t.Logf("[%d]: expected: %q found: %q", index, key, node.Key()) } if !tree.CheckCounts() { t.Fatal("tree Checkcounts failed") } } func makeKey() stringItem { b := make([]byte, 4) _, err := rand.Read(b) if nil != err { panic("rand failed") } n := int(binary.BigEndian.Uint32(b)) return stringItem{fmt.Sprintf("%04d", n%10000)} } func TestRandomTree(t *testing.T) { randomTree(t, 2200, 2000) randomTree(t, 3400, 2760) randomTree(t, 5467, 1234) for i := 0; i < 5; i += 1 { randomTree(t, 2100, 2000) } } func randomTree(t *testing.T, total int, toDelete int) { if toDelete > total { t.Fatalf("failed: total: %d < deletions: %d", total, toDelete) } tree := avl.New() d := make([]stringItem, toDelete) for i := 0; i < total; i += 1 { key := makeKey() if i < len(d) { d[i] = key } //t.Logf("add item: %q", key) tree.Insert(key, "data:"+key.String()) } if !tree.CheckUp() { depth := tree.Print(true) t.Logf("depth: %d", depth) t.Fatalf("inconsistent tree") } for _, key := range d { //t.Logf("delete item: %q", key) tree.Delete(key) if !tree.CheckUp() { depth := tree.Print(true) t.Logf("depth: %d", depth) t.Fatalf("inconsistent tree") } } // add back the test value testKey := stringItem{"500"} const testValue = "just testing data: test 500 value" tree.Insert(testKey, testValue) if !tree.CheckUp() { depth := tree.Print(true) t.Logf("depth: %d", depth) t.Fatalf("inconsistent tree") } if !tree.CheckCounts() { depth := tree.Print(true) t.Logf("depth: %d", depth) t.Fatal("tree Checkcounts failed") } doTraverse(t, d) doGet(t, d) // check that test value is searchable tv, _ := tree.Search(testKey) if nil == tv { t.Fatalf("could not find test key: %q", testKey) } if testKey != tv.Key() { t.Fatalf("test key mismatch: actual: %q expected: %q", tv.Key(), testKey) } if testValue != tv.Value() { t.Fatalf("test value mismatch: actual: %q expected: %q", tv.Value(), testValue) } // check iterators n := tv.Next() p := tv.Prev() if nil == n { t.Fatal("could not find next") } if nil == p { t.Fatal("could not find prev") } //t.Logf("test: %q previous: %q next: %q", tv.Value(), p.Value(), n.Value()) // delete the test value, and check it return the correct // value and is no longer in the tree value := tree.Delete(testKey) if value != testValue { t.Fatalf("delete value mismatch: actual: %q expected: %q", value, testValue) } tv, _ = tree.Search(testKey) if nil != tv { t.Fatalf("test key not deleted and contains: %q", tv.Value()) } } // check that inserted nodes can be overwritten // and that nodes keep constant address when tree is re-balanced func TestOverwriteAndNodeStability(t *testing.T) { addList := []stringItem{ {"01"}, {"02"}, {"03"}, {"04"}, {"05"}, {"06"}, {"07"}, {"08"}, {"09"}, {"10"}, } tree := avl.New() for _, key := range addList { //t.Logf("add item: %q", key) tree.Insert(key, "data:"+key.String()) } if !tree.CheckUp() { t.Errorf("add: inconsistent tree") depth := tree.Print(true) t.Logf("depth: %q", depth) t.Fatalf("inconsistent tree") } // overwrite a key oKey := stringItem{"05"} oIndex := 4 // zero based index const newData = "new content for 05" tree.Insert(oKey, newData) if !tree.CheckUp() { t.Errorf("add: inconsistent tree") depth := tree.Print(true) t.Logf("depth: %q", depth) t.Fatalf("inconsistent tree") } // check overwrite node1, index1 := tree.Search(oKey) //t.Logf("v:%p → %+v @[%d]", node1, node1, index1) if oIndex != index1 { t.Errorf("index1: %d expected %d", index1, oIndex) } if newData != node1.Value() { t.Fatalf("node data actual: %q expected: %q", node1.Value(), newData) } // delete a node so the oKey node moves dKey := stringItem{"06"} //t.Logf("delete item: %q", dKey) tree.Delete(dKey) // ensure node did not move node2, index2 := tree.Search(oKey) //t.Logf("v:%p → %+v @[%d]", node2, node2, index2) if oIndex != index2 { t.Errorf("index1: %d expected %d", index2, oIndex) } if node1 != node2 { t.Fatalf("node moved from: %p → %p", node1, node2) } if !tree.CheckUp() { t.Errorf("delete: inconsistent tree") depth := tree.Print(true) t.Logf("depth: %d", depth) t.Fatalf("inconsistent tree") } } func TestGetDepthInTree(t *testing.T) { addList := []stringItem{ {"01"}, {"02"}, {"03"}, {"04"}, {"05"}, {"06"}, {"07"}, } tree := avl.New() for _, key := range addList { tree.Insert(key, "data:"+key.String()) } if d := tree.First().Next().Depth(); d != 1 { t.Fatalf("incorrect node depth: %d", d) } if d := tree.First().Next().Next().Depth(); d != 2 { t.Fatalf("incorrect node depth: %d", d) } } func TestGetChildrenByDepth(t *testing.T) { addList := []stringItem{ {"01"}, {"02"}, {"03"}, {"04"}, {"05"}, {"06"}, {"07"}, } tree := avl.New() for _, key := range addList { tree.Insert(key, "data:"+key.String()) } if len(tree.Root().GetChildrenByDepth(1)) != 2 { t.Fatalf("incorrect children numner in depth 1") } if len(tree.Root().GetChildrenByDepth(2)) != 4 { t.Fatalf("incorrect children numner in depth 2") } }
// Copyright 2017 Brian Starkey <stark3y@gmail.com> package rpcconn import ( "net" "net/rpc" "github.com/usedbytes/bot_matrix/datalink" ) type RPCEndpoint struct { transactor datalink.Transactor } type RPCServ struct { endpoint RPCEndpoint srv *rpc.Server } func (r *RPCEndpoint) RPCTransact(tx []datalink.Packet, rx *[]datalink.Packet) error { pkts, err := r.transactor.Transact(tx) *rx = pkts return err } func (r *RPCServ) Serve(l net.Listener) { r.srv.Accept(l) } func NewRPCServ(conn datalink.Transactor) (*RPCServ, error) { srv := &RPCServ{ endpoint: RPCEndpoint{ conn } } srv.srv = rpc.NewServer() srv.srv.Register(&srv.endpoint) return srv, nil } type RPCClient struct { client *rpc.Client } func NewRPCClient(server string) (datalink.Transactor, error) { client, err := rpc.Dial("tcp", server) if err != nil { return nil, err } return &RPCClient{ client }, nil } func (c *RPCClient) Transact(tx []datalink.Packet) ([]datalink.Packet, error) { rx := make([]datalink.Packet, 0, len(tx)) err := c.client.Call("RPCEndpoint.RPCTransact", tx, &rx) return rx, err }
package errors type codeError struct { *baseError code int } func newCodeError(err error, code int) *codeError { return &codeError{ baseError: cause(err), code: code, } } func (e *codeError) Code() int { return e.code } func (e *codeError) Trace() string { return "" } func (e *codeError) Is(err error) bool { return e.code != defaultCode && e.code == Code(err) }
/* MIT License Copyright (c) 2018 IBM Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package jsonlogic import ( "reflect" "strconv" ) //TODO: See who uses this, and consider the error handling func interfaceToFloat(value interface{}) float64 { metaValue := reflect.ValueOf(value) switch metaValue.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(metaValue.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return float64(metaValue.Uint()) case reflect.Float32, reflect.Float64: return metaValue.Float() case reflect.String: floatVal, _ := strconv.ParseFloat(metaValue.String(), 64) return floatVal } return 0 } func isNumeric(value interface{}) bool { switch value.(type) { case float32, float64, int, int8, int16, int32, int64, uint, uint16, uint32, uint64, uint8: return true case string: _, err := strconv.ParseFloat(value.(string), 64) return err == nil default: return false } }
package errors import ( "bytes" "errors" "fmt" "runtime" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // WithCaller sets the position at which the error was formed. If this is // false, errors will be wrapped with no location information. var WithCaller = true // WithCallerVerbose just adds file:line components alongside the caller information. var WithCallerVerbose = false // WithStack provides a stack trace at the point the error was yielded, either // via Error() method or direct function call. This may be set at any time to // get a stack trace when calling Error() on a WithLocation error. var WithStack = false // StackBufferSize can impact performance and should be adjusted if you have // small or larger stack traces than the default size, this is the bytes // buffer that will be created for calculating stack traces. var StackBufferSize = 8192 // WithLocation is a kind of error that stores the location at which it was // created at. Dereference `Err` to get at the actual error. type WithLocation struct { File string Line int Loc uintptr Err error } func (wl WithLocation) Error() string { s := wl.Err.Error() if wl.Loc != 0 { verbose := "" if wl.File != "" && wl.Line != 0 && WithCallerVerbose { verbose = fmt.Sprintf("[%v:%v]", wl.File, wl.Line) } s = fmt.Sprintf("[%v]%v: %v", runtime.FuncForPC(wl.Loc).Name(), verbose, s) } sbuf := bytes.NewBufferString(s) if WithStack { buf := make([]byte, StackBufferSize) sbuf.WriteString("\nSTACK TRACE:\n-----------\n") sbuf.Write(buf[:runtime.Stack(buf, false)]) } return sbuf.String() } // New creates a new error with caller identification. func New(s string) error { return mkError(errors.New(s)) } // Errorf returns a formatted error with caller identification. func Errorf(f string, args ...interface{}) error { return mkError(fmt.Errorf(f, args...)) } // WithError combines two errors to be compatible with errors.As() and // similar functions in the stdlib errors package. func WithError(err error, f string, args ...interface{}) error { return mkError(fmt.Errorf("%v: %w", fmt.Sprintf(f, args...), err)) } // GRPC formats a GRPC error in our caller ID pattern. func GRPC(code codes.Code, f string, args ...interface{}) error { return transformWithCaller(2, status.Errorf(code, f, args...)) } func transformWithCaller(depth int, e error) error { if WithCaller { pc, file, line, ok := runtime.Caller(depth) if ok { return WithLocation{ File: file, Line: line, Loc: pc, Err: e, } } } return WithLocation{Err: e} } func mkError(e error) error { return transformWithCaller(3, e) }
package middlewares import ( "github.com/go-playground/validator/v10" "github.com/labstack/echo/v4" "net/http" ) type CustomValidator struct { Validator *validator.Validate } func (cv CustomValidator) Validate(i interface{}) error { //if err := cv.Validator.Struct(i); err != nil { // return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) //} return nil } func ValidateRequest(p interface{}) func(next echo.HandlerFunc) echo.HandlerFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if err := c.Bind(p); err != nil { return echo.ErrBadRequest } if err := c.Validate(p); err != nil { return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } c.Set("body", p) return next(c) } } }
package main import ( "fmt" "log" "net/http" ) func main() { http.HandleFunc("/", func (w http.ResponseWriter, r *http.Request) { for k, v := range r.Header { w.Write([]byte(fmt.Sprintf("%s = %s\n", k, v))) } w.Write([]byte(fmt.Sprintf("RemoteAddr = %s\n", r.RemoteAddr))) w.WriteHeader(http.StatusOK) }) log.Fatal(http.ListenAndServe(":8080", nil)) }
package main import ( "bufio" "flag" "fmt" "os" "strings" "github.com/zippy/internal/parser" "github.com/zippy/pkg/store" ) func main() { path := flag.String("path", "./log", "path of the zippy store to open") flag.Parse() store.Open(*path) fmt.Println("Welcome to zippy!") scanner := bufio.NewScanner(os.Stdin) for { fmt.Print(">>> ") scanner.Scan() args := strings.Split(scanner.Text(), " ") parser.Parse(args) } }
package factogo import ( "errors" "fmt" "reflect" "time" ) /* Design ends the process to design a new Factory instance. When a Factory instance is designed then it is registered and can be produced calling the method Produce(). Example: Factory("staff").Design(&Staff{}) // Design the Factory Instance Factory("staff").Produce() // Now is possible to produce from the designed Factory */ func (fi *factoryInstance) Design(object interface{}) error { if !fi.isAnonymous { if _, ok := factories[fi.name]; ok { return errors.New( fmt.Sprintf("A designed Factory named '%s' already exists", fi.name)) } } err := fi.checkValues(object) if err != nil { return err } if !fi.isAnonymous { factories[fi.name] = fi } return nil } func factoryAnonymous() *factoryInstance { fi := &factoryInstance{isAnonymous: true, isAuto: true} fi.values = make(map[string]*factoryValue) return fi } func (fi *factoryInstance) checkValues(object interface{}) error { objectValue := reflect.ValueOf(object) objectType := reflect.Indirect(objectValue).Type() if fi.isAuto { for i := 0; i < objectType.NumField(); i++ { nameOfField := objectType.Field(i).Name if _, ok := fi.values[nameOfField]; !ok { fi.values[nameOfField] = &factoryValue{name: nameOfField} } } } for _, _factoryValue := range fi.values { fieldName := _factoryValue.name field, ok := objectType.FieldByName(fieldName) if !ok { return errors.New(fmt.Sprintf( "The struct '%v' doesn't have a field '%v'", objectType.Name(), fieldName)) } if _factoryValue.value != nil { value := reflect.TypeOf(_factoryValue.value) var targetKind reflect.Kind if value.Kind() == reflect.Func { targetKind = value.Out(0).Kind() } else { targetKind = value.Kind() } if field.Type.Kind() != targetKind { return errors.New(fmt.Sprintf( "The type of value of '%v' not match the type in the struct %v", fieldName, objectType.Name())) } } else { value, err := getDefaultFunctionFor(fieldName, field.Type) if err != nil { return err } else { _factoryValue.value = value } } } return nil } func getDefaultFunctionFor(name string, _type reflect.Type) (interface{}, error) { switch _type.Kind() { case reflect.String: return ProduceString, nil case reflect.Int: return ProduceInt, nil case reflect.Int8: return ProduceInt8, nil case reflect.Int16: return ProduceInt16, nil case reflect.Int32: return ProduceInt32, nil case reflect.Int64: return ProduceInt64, nil case reflect.Uint: return ProduceUint, nil case reflect.Uint8: return ProduceUint8, nil case reflect.Uint16: return ProduceUint16, nil case reflect.Uint32: return ProduceUint32, nil case reflect.Uint64: return ProduceUint64, nil case reflect.Float32: return ProduceFloat32, nil case reflect.Float64: return ProduceFloat64, nil case reflect.Bool: return ProduceBool, nil case reflect.Ptr: switch _type.String() { case "*string": return ProduceStringPointer, nil case "*int": return ProduceIntPointer, nil case "*int8": return ProduceInt8Pointer, nil case "*int16": return ProduceInt16Pointer, nil case "*int32": return ProduceInt32Pointer, nil case "*int64": return ProduceInt64Pointer, nil case "*uint": return ProduceUintPointer, nil case "*uint8": return ProduceUint8Pointer, nil case "*uint16": return ProduceUint16Pointer, nil case "*uint32": return ProduceUint32Pointer, nil case "*uint64": return ProduceUint64Pointer, nil case "*float32": return ProduceFloat32Pointer, nil case "*float64": return ProduceFloat64Pointer, nil case "*bool": return ProduceBoolPointer, nil default: if _type.Elem().AssignableTo(reflect.TypeOf(time.Time{})) { return ProduceTimePointer, nil } return func(object interface{}) { valuePointer := reflect.New(_type.Elem()).Interface() Factory(name).Produce(valuePointer) field := reflect.Indirect(reflect.ValueOf(object)).FieldByName(name) field.Set(reflect.ValueOf(valuePointer)) }, nil } default: if _type.AssignableTo(reflect.TypeOf(time.Time{})) { return ProduceTime, nil } return func(object interface{}) { valuePointer := reflect.New(_type).Interface() Factory(name).Produce(valuePointer) field := reflect.Indirect(reflect.ValueOf(object)).FieldByName(name) field.Set(reflect.ValueOf(valuePointer).Elem()) }, nil } return nil, errors.New(fmt.Sprintf( "There is not a default function for type %v", _type.String())) }
package marky_test import ( "github.com/serkansipahi/marky" "io/ioutil" "testing" ) func TestNewMarkdown(t *testing.T) { markdownTemplate, _ := ioutil.ReadFile("./markdown_test.md") expectedHeaders, _ := ioutil.ReadFile("./markdown_test_expected.txt") markdown := marky.NewMarkdown(string(markdownTemplate)) code := markdown.Compile() if code != string(expectedHeaders) { t.Error( "expected", string(expectedHeaders), "got", code, ) } } func TestCreateHeaderTag(t *testing.T) { // without newLine option and limited h tag size according specification expectedTag := "<h1>Hello World</h1>" createdTags := marky.CreateHeaderTag("Hello World", 1, false) if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } expectedTag = "<h6>Hello World</h6>" createdTags = marky.CreateHeaderTag("Hello World", 6, false) if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } expectedTag = "<h6>Hello World</h6>" createdTags = marky.CreateHeaderTag("Hello World", 12, false) if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } // with newLine option expectedTag = "<h3>Hello World</h3>\n" createdTags = marky.CreateHeaderTag("Hello World", 3, true) if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } } func TestCreatePTag(t *testing.T) { // without newLine option expectedTag := "<p>Hello World</p>" createdTags := marky.CreatePTag("Hello World", false) if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } // with newLine option expectedTag = "<p>Hello World</p>\n" createdTags = marky.CreatePTag("Hello World", true) if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } } func TestCreateLinkTag(t *testing.T) { expectedTag := "<a href='http://example.com'>Hello World</a>" createdTags := marky.CreateLinkTag("Hello World", "http://example.com") if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } expectedTag = "<a href='http://example.com/%3Cscript%3Ealert%28%27xss%27%29%3C/script%3E'>Hello World</a>" createdTags = marky.CreateLinkTag("Hello World", "http://example.com/<script>alert('xss')</script>") if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } } func TestCreateEmTag(t *testing.T) { expectedTag := "<em>Hello World</em>" createdTags := marky.CreateEmTag("Hello World") if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } } func TestCreateStrongTag(t *testing.T) { expectedTag := "<strong>Hello World</strong>" createdTags := marky.CreateStrongTag("Hello World") if createdTags != expectedTag { t.Error( "expected", expectedTag, "got", createdTags, ) } }