text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestPathKV(t *testing.T) { tests := []struct { desc string in string wantK string wantV string wantErr error }{ { desc: "valid", in: "[1:2]", wantK: "1", wantV: "2", wantErr: nil, }, { desc: "invalid", in: "[1:", wantErr: errors.New(""), }, { desc: "empty", in: "", wantErr: errors.New(""), }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { if k, v, err := PathKV(tt.in); k != tt.wantK || v != tt.wantV || errNilCheck(err, tt.wantErr) { t.Errorf("%s: expect %v %v %v got %v %v %v", tt.desc, tt.wantK, tt.wantV, tt.wantErr, k, v, err) } }) } }
explode_data.jsonl/70573
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 386 }
[ 2830, 3393, 1820, 82707, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 41653, 262, 914, 198, 197, 17430, 414, 914, 198, 197, 50780, 42, 256, 914, 198, 197, 50780, 53, 256, 914, 198, 197, 50780, 7747, 1465, 198, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestReconcileServiceInstanceFailureOnFinalRetry(t *testing.T) { fakeKubeClient, fakeCatalogClient, fakeClusterServiceBrokerClient, testController, sharedInformers := newTestController(t, fakeosb.FakeClientConfiguration{ ProvisionReaction: &fakeosb.ProvisionReaction{ Error: errors.New("fake creation failure"), }, }) sharedInformers.ClusterServiceBrokers().Informer().GetStore().Add(getTestClusterServiceBroker()) sharedInformers.ClusterServiceClasses().Informer().GetStore().Add(getTestClusterServiceClass()) sharedInformers.ClusterServicePlans().Informer().GetStore().Add(getTestClusterServicePlan()) instance := getTestServiceInstanceWithClusterRefs() instance.Status.CurrentOperation = v1beta1.ServiceInstanceOperationProvision instance.Status.InProgressProperties = &v1beta1.ServiceInstancePropertiesState{ ClusterServicePlanExternalID: testClusterServicePlanGUID, ClusterServicePlanExternalName: testClusterServicePlanName, } startTime := metav1.NewTime(time.Now().Add(-7 * 24 * time.Hour)) instance.Status.OperationStartTime = &startTime instance.Status.ObservedGeneration = instance.Generation if err := reconcileServiceInstance(t, testController, instance); err != nil { t.Fatalf("Should have returned no error because the retry duration has elapsed: %v", err) } brokerActions := fakeClusterServiceBrokerClient.Actions() assertNumberOfBrokerActions(t, brokerActions, 1) assertProvision(t, brokerActions[0], &osb.ProvisionRequest{ AcceptsIncomplete: true, InstanceID: testServiceInstanceGUID, ServiceID: testClusterServiceClassGUID, PlanID: testClusterServicePlanGUID, OrganizationGUID: testClusterID, SpaceGUID: testNamespaceGUID, Context: testContext}) // verify no kube resources created // One single action comes from getting namespace uid kubeActions := fakeKubeClient.Actions() if err := checkKubeClientActions(kubeActions, []kubeClientAction{ {verb: "get", resourceName: "namespaces", checkType: checkGetActionType}, }); err != nil { t.Fatal(err) } actions := fakeCatalogClient.Actions() assertNumberOfActions(t, actions, 1) updatedServiceInstance := assertUpdateStatus(t, actions[0], instance) assertServiceInstanceProvisionRequestFailingErrorNoOrphanMitigation( t, updatedServiceInstance, v1beta1.ServiceInstanceOperationProvision, errorErrorCallingProvisionReason, errorReconciliationRetryTimeoutReason, instance, ) events := getRecordedEvents(testController) expectedEventPrefixes := []string{ corev1.EventTypeWarning + " " + errorErrorCallingProvisionReason, corev1.EventTypeWarning + " " + errorReconciliationRetryTimeoutReason, } if err := checkEventPrefixes(events, expectedEventPrefixes); err != nil { t.Fatal(err) } }
explode_data.jsonl/58171
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 901 }
[ 2830, 3393, 693, 40446, 457, 1860, 2523, 17507, 1925, 19357, 51560, 1155, 353, 8840, 836, 8, 341, 1166, 726, 42, 3760, 2959, 11, 12418, 41606, 2959, 11, 12418, 28678, 1860, 65545, 2959, 11, 1273, 2051, 11, 6094, 37891, 388, 1669, 501, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestEquals(t *testing.T) { r1 := NewResourceBuilder(). AddResource(constants.Memory, 1). AddResource(constants.CPU, 1). Build() r2 := NewResourceBuilder(). AddResource(constants.Memory, 1). AddResource(constants.CPU, 1). Build() assert.Equal(t, Equals(r1, r2), true) r1 = NewResourceBuilder(). AddResource(constants.Memory, 1). AddResource(constants.CPU, 1). Build() r2 = NewResourceBuilder(). AddResource(constants.Memory, 2). AddResource(constants.CPU, 1). Build() assert.Equal(t, Equals(r1, r2), false) r1 = NewResourceBuilder(). AddResource(constants.Memory, 1). Build() r2 = NewResourceBuilder(). AddResource(constants.Memory, 1). AddResource(constants.CPU, 1). Build() assert.Equal(t, Equals(r1, r2), false) r1 = nil r2 = nil assert.Equal(t, Equals(r1, r2), true) r1 = nil r2 = NewResourceBuilder(). AddResource(constants.Memory, 1). AddResource(constants.CPU, 1). Build() assert.Equal(t, Equals(r1, r2), false) r1 = NewResourceBuilder(). AddResource(constants.Memory, 1). AddResource(constants.CPU, 1). Build() r2 = nil assert.Equal(t, Equals(r1, r2), false) }
explode_data.jsonl/63793
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 465 }
[ 2830, 3393, 4315, 1155, 353, 8840, 836, 8, 341, 7000, 16, 1669, 1532, 4783, 3297, 25829, 197, 37972, 4783, 80368, 71162, 11, 220, 16, 4292, 197, 37972, 4783, 80368, 727, 6325, 11, 220, 16, 4292, 197, 197, 11066, 741, 7000, 17, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPendingConnsAfterErr(t *testing.T) { const ( maxOpen = 2 tryOpen = maxOpen*2 + 2 ) // No queries will be run. db, err := Open("test", fakeDBName) if err != nil { t.Fatalf("Open: %v", err) } defer closeDB(t, db) defer func() { for k, v := range db.lastPut { t.Logf("%p: %v", k, v) } }() db.SetMaxOpenConns(maxOpen) db.SetMaxIdleConns(0) errOffline := errors.New("db offline") defer func() { setHookOpenErr(nil) }() errs := make(chan error, tryOpen) var opening sync.WaitGroup opening.Add(tryOpen) setHookOpenErr(func() error { // Wait for all connections to enqueue. opening.Wait() return errOffline }) for i := 0; i < tryOpen; i++ { go func() { opening.Done() // signal one connection is in flight _, err := db.Exec("will never run") errs <- err }() } opening.Wait() // wait for all workers to begin running const timeout = 5 * time.Second to := time.NewTimer(timeout) defer to.Stop() // check that all connections fail without deadlock for i := 0; i < tryOpen; i++ { select { case err := <-errs: if got, want := err, errOffline; got != want { t.Errorf("unexpected err: got %v, want %v", got, want) } case <-to.C: t.Fatalf("orphaned connection request(s), still waiting after %v", timeout) } } // Wait a reasonable time for the database to close all connections. tick := time.NewTicker(3 * time.Millisecond) defer tick.Stop() for { select { case <-tick.C: db.mu.Lock() if db.numOpen == 0 { db.mu.Unlock() return } db.mu.Unlock() case <-to.C: // Closing the database will check for numOpen and fail the test. return } } }
explode_data.jsonl/16005
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 675 }
[ 2830, 3393, 32027, 1109, 4412, 6025, 7747, 1155, 353, 8840, 836, 8, 341, 4777, 2399, 197, 22543, 5002, 284, 220, 17, 198, 197, 6799, 5002, 284, 1932, 5002, 9, 17, 488, 220, 17, 198, 197, 692, 197, 322, 2308, 19556, 686, 387, 1598, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestReconcileWithWhenExpressionsWithResultRefs(t *testing.T) { names.TestingSeed() ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` metadata: name: test-pipeline namespace: foo spec: tasks: # a-task is executed and produces a result aResult with value aResultValue - name: a-task taskRef: name: a-task # b-task is skipped because it has when expressions, with result reference to a-task, that evaluate to false - name: b-task taskRef: name: b-task when: - input: $(tasks.a-task.results.aResult) operator: in values: - notResultValue # c-task is executed regardless of running after skipped b-task because when expressions are scoped to task - name: c-task runAfter: - b-task taskRef: name: c-task `)} prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` metadata: name: test-pipeline-run-different-service-accs namespace: foo spec: pipelineRef: name: test-pipeline serviceAccountName: test-sa-0 `)} ts := []*v1beta1.Task{ parse.MustParseTask(t, ` metadata: name: a-task namespace: foo spec: results: - description: a result name: aResult `), {ObjectMeta: baseObjectMeta("b-task", "foo")}, {ObjectMeta: baseObjectMeta("c-task", "foo")}, } trs := []*v1beta1.TaskRun{mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-different-service-accs-a-task-xxyyy", "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "a-task", true), ` spec: resources: {} serviceAccountName: test-sa taskRef: name: hello-world timeout: 1h0m0s status: conditions: - status: "True" type: Succeeded taskResults: - name: aResult value: aResultValue `)} d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, TaskRuns: trs, } prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ "Normal Started", "Normal Running Tasks Completed: 1 \\(Failed: 0, Cancelled 0\\), Incomplete: 1, Skipped: 1", } pipelineRun, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", wantEvents, false) actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: "tekton.dev/pipelineTask=c-task,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", Limit: 1, }) if err != nil { t.Fatalf("Failure to list TaskRuns %s", err) } if len(actual.Items) != 1 { t.Fatalf("Expected 1 TaskRun got %d", len(actual.Items)) } actualSkippedTasks := pipelineRun.Status.SkippedTasks expectedSkippedTasks := []v1beta1.SkippedTask{{ // its when expressions evaluate to false Name: "b-task", WhenExpressions: v1beta1.WhenExpressions{{ Input: "aResultValue", Operator: "in", Values: []string{"notResultValue"}, }}, }} if d := cmp.Diff(expectedSkippedTasks, actualSkippedTasks); d != "" { t.Errorf("expected to find Skipped Tasks %v. Diff %s", expectedSkippedTasks, diff.PrintWantGot(d)) } // confirm that there are no taskruns created for the skipped tasks skippedTasks := []string{"b-task"} for _, skippedTask := range skippedTasks { labelSelector := fmt.Sprintf("tekton.dev/pipelineTask=%s,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", skippedTask) actualSkippedTask, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: labelSelector, Limit: 1, }) if err != nil { t.Fatalf("Failure to list TaskRun's %s", err) } if len(actualSkippedTask.Items) != 0 { t.Fatalf("Expected 0 TaskRuns got %d", len(actualSkippedTask.Items)) } } }
explode_data.jsonl/27315
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1486 }
[ 2830, 3393, 693, 40446, 457, 2354, 4498, 40315, 2354, 2077, 82807, 1155, 353, 8840, 836, 8, 341, 93940, 8787, 287, 41471, 741, 35009, 1669, 29838, 85, 16, 19127, 16, 1069, 8790, 90, 6400, 50463, 14463, 34656, 1155, 11, 22074, 17637, 510...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestIssue29434(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t1;") tk.MustExec("create table t1(c1 datetime);") tk.MustExec("insert into t1 values('2021-12-12 10:10:10.000');") tk.MustExec("set tidb_enable_vectorized_expression = on;") tk.MustQuery("select greatest(c1, '99999999999999') from t1;").Check(testkit.Rows("99999999999999")) tk.MustExec("set tidb_enable_vectorized_expression = off;") tk.MustQuery("select greatest(c1, '99999999999999') from t1;").Check(testkit.Rows("99999999999999")) tk.MustExec("set tidb_enable_vectorized_expression = on;") tk.MustQuery("select least(c1, '99999999999999') from t1;").Check(testkit.Rows("2021-12-12 10:10:10")) tk.MustExec("set tidb_enable_vectorized_expression = off;") tk.MustQuery("select least(c1, '99999999999999') from t1;").Check(testkit.Rows("2021-12-12 10:10:10")) }
explode_data.jsonl/65614
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 369 }
[ 2830, 3393, 42006, 17, 24, 19, 18, 19, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1273, 8226, 7251, 11571, 6093, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3553, 340, 3244, 74, 50...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContextDefault(t *testing.T) { c := NewParallelE2eCLI(t, binDir) t.Run("show", func(t *testing.T) { res := c.RunDockerCmd("context", "show") res.Assert(t, icmd.Expected{Out: "default"}) }) t.Run("ls", func(t *testing.T) { res := c.RunDockerCmd("context", "ls") golden.Assert(t, res.Stdout(), GoldenFile("ls-out-default")) }) t.Run("inspect", func(t *testing.T) { res := c.RunDockerCmd("context", "inspect", "default") res.Assert(t, icmd.Expected{Out: `"Name": "default"`}) }) t.Run("inspect current", func(t *testing.T) { res := c.RunDockerCmd("context", "inspect") res.Assert(t, icmd.Expected{Out: `"Name": "default"`}) }) }
explode_data.jsonl/5425
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 290 }
[ 2830, 3393, 1972, 3675, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 1532, 16547, 36, 17, 68, 63959, 1155, 11, 9544, 6184, 692, 3244, 16708, 445, 3445, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 10202, 1669, 272, 16708, 35, 13659, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAddMoreTxThanPoolSize(t *testing.T) { q, mem := initEnv(4) defer q.Close() defer mem.Close() err := add4Tx(mem.client) require.Nil(t, err) msg5 := mem.client.NewMessage("mempool", types.EventTx, tx5) mem.client.Send(msg5, true) mem.client.Wait(msg5) if mem.Size() != 4 || mem.cache.Exist(string(tx5.Hash())) { t.Error("TestAddMoreTxThanPoolSize failed", mem.Size(), mem.cache.Exist(string(tx5.Hash()))) } }
explode_data.jsonl/16821
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 182 }
[ 2830, 3393, 2212, 7661, 31584, 26067, 10551, 1695, 1155, 353, 8840, 836, 8, 341, 18534, 11, 1833, 1669, 2930, 14359, 7, 19, 340, 16867, 2804, 10421, 741, 16867, 1833, 10421, 2822, 9859, 1669, 912, 19, 31584, 39908, 6581, 340, 17957, 596...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestOneMigration(t *testing.T) { defer func(v []migration, s string) { schemaMigrations = v DbSchemaCurrent = s }(schemaMigrations, DbSchemaCurrent) DbSchemaCurrent = DbSchemaCode dbSchemaNext := "dbSchemaNext" ran := false shouldNotRun := false schemaMigrations = []migration{ {name: DbSchemaCode, fn: func(db *DB) error { shouldNotRun = true // this should not be executed return nil }}, {name: dbSchemaNext, fn: func(db *DB) error { ran = true return nil }}, } dir, err := ioutil.TempDir("", "localstore-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) baseKey := make([]byte, 32) if _, err := rand.Read(baseKey); err != nil { t.Fatal(err) } logger := logging.New(ioutil.Discard, 0) // start the fresh localstore with the sanctuary schema name db, err := New(dir, baseKey, nil, logger) if err != nil { t.Fatal(err) } err = db.Close() if err != nil { t.Fatal(err) } DbSchemaCurrent = dbSchemaNext // start the existing localstore and expect the migration to run db, err = New(dir, baseKey, nil, logger) if err != nil { t.Fatal(err) } schemaName, err := db.schemaName.Get() if err != nil { t.Fatal(err) } if schemaName != dbSchemaNext { t.Errorf("schema name mismatch. got '%s', want '%s'", schemaName, dbSchemaNext) } if !ran { t.Errorf("expected migration did not run") } if shouldNotRun { t.Errorf("migration ran but shouldnt have") } err = db.Close() if err != nil { t.Error(err) } }
explode_data.jsonl/79611
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 614 }
[ 2830, 3393, 3966, 20168, 1155, 353, 8840, 836, 8, 341, 16867, 2915, 3747, 3056, 80227, 11, 274, 914, 8, 341, 197, 1903, 3416, 44, 17824, 284, 348, 198, 197, 197, 7994, 8632, 5405, 284, 274, 198, 197, 25547, 17349, 44, 17824, 11, 119...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPodMetadataDeDot(t *testing.T) { tests := []struct { pod *Pod meta common.MapStr }{ { pod: &Pod{ Metadata: ObjectMeta{ Labels: map[string]string{"a.key": "a.value"}, }, }, meta: common.MapStr{"labels": common.MapStr{"a_key": "a.value"}}, }, } for _, test := range tests { assert.Equal(t, NewMetaGenerator(nil, nil, nil).PodMetadata(test.pod)["labels"], test.meta["labels"]) } }
explode_data.jsonl/59543
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 192 }
[ 2830, 3393, 23527, 14610, 1912, 34207, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 3223, 347, 220, 353, 23527, 198, 197, 84004, 4185, 10104, 2580, 198, 197, 59403, 197, 197, 515, 298, 3223, 347, 25, 609, 23527, 51...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSignCommitmentFailNotLockedIn(t *testing.T) { t.Parallel() // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. aliceChannel, _, cleanUp, err := CreateTestChannels() if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() // Next, we'll modify Alice's internal state to omit knowledge of Bob's // next revocation point. aliceChannel.channelState.RemoteNextRevocation = nil // If we now try to initiate a state update, then it should fail as // Alice is unable to actually create a new state. _, _, err = aliceChannel.SignNextCommitment() if err != ErrNoWindow { t.Fatalf("expected ErrNoWindow, instead have: %v", err) } }
explode_data.jsonl/46443
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 249 }
[ 2830, 3393, 7264, 33441, 478, 19524, 2623, 49010, 641, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 322, 4230, 264, 1273, 5496, 892, 686, 387, 1483, 369, 279, 8090, 315, 419, 198, 197, 322, 19905, 13, 576, 5496, 686, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestServerBoundHandshake_UpgradeToOldRealIP(t *testing.T) { tt := []struct { addr string clientAddr net.TCPAddr timestamp time.Time }{ { addr: "example.com", clientAddr: net.TCPAddr{ IP: net.IPv4(127, 0, 0, 1), Port: 12345, }, timestamp: time.Now(), }, { addr: "sub.example.com:25565", clientAddr: net.TCPAddr{ IP: net.IPv4(127, 0, 1, 1), Port: 25565, }, timestamp: time.Now(), }, { addr: "example.com:25565", clientAddr: net.TCPAddr{ IP: net.IPv4(127, 0, 2, 1), Port: 6543, }, timestamp: time.Now(), }, { addr: "example.com", clientAddr: net.TCPAddr{ IP: net.IPv4(127, 0, 3, 1), Port: 7467, }, timestamp: time.Now(), }, } for _, tc := range tt { hs := mc.ServerBoundHandshake{ServerAddress: tc.addr} hs.UpgradeToOldRealIP(tc.clientAddr.String()) if hs.ParseServerAddress() != tc.addr { t.Errorf("got: %v; want: %v", hs.ParseServerAddress(), tc.addr) } realIpSegments := strings.Split(string(hs.ServerAddress), mc.RealIPSeparator) if realIpSegments[1] != tc.clientAddr.String() { t.Errorf("got: %v; want: %v", realIpSegments[1], tc.addr) } unixTimestamp, err := strconv.ParseInt(realIpSegments[2], 10, 64) if err != nil { t.Error(err) } if unixTimestamp != tc.timestamp.Unix() { t.Errorf("timestamp is invalid: got: %d; want: %d", unixTimestamp, tc.timestamp.Unix()) } } }
explode_data.jsonl/45006
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 704 }
[ 2830, 3393, 5475, 19568, 2314, 29661, 62, 43861, 1249, 18284, 12768, 3298, 1155, 353, 8840, 836, 8, 341, 3244, 83, 1669, 3056, 1235, 341, 197, 53183, 981, 914, 198, 197, 25291, 13986, 4179, 836, 7123, 13986, 198, 197, 3244, 4702, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestCLI_StartStopCPUProfile(t *testing.T) { f, err := startCPUProfile() require.Nil(t, err) stopCPUProfile() err = f.Close() require.Nil(t, err) err = os.Remove(f.Name()) require.Nil(t, err) }
explode_data.jsonl/34035
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 88 }
[ 2830, 3393, 63959, 38056, 10674, 31615, 8526, 1155, 353, 8840, 836, 8, 341, 1166, 11, 1848, 1669, 1191, 31615, 8526, 741, 17957, 59678, 1155, 11, 1848, 692, 62644, 31615, 8526, 741, 9859, 284, 282, 10421, 741, 17957, 59678, 1155, 11, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestOrderBy(t *testing.T) { qs := dORM.QueryTable("user") num, err := qs.OrderBy("-status").Filter("user_name", "nobody").Count() throwFail(t, err) throwFail(t, AssertIs(num, 1)) num, err = qs.OrderBy("status").Filter("user_name", "slene").Count() throwFail(t, err) throwFail(t, AssertIs(num, 1)) num, err = qs.OrderBy("-profile__age").Filter("user_name", "astaxie").Count() throwFail(t, err) throwFail(t, AssertIs(num, 1)) num, err = qs.OrderClauses( order_clause.Clause( order_clause.Column(`profile__age`), order_clause.SortDescending(), ), ).Filter("user_name", "astaxie").Count() throwFail(t, err) throwFail(t, AssertIs(num, 1)) if IsMysql { num, err = qs.OrderClauses( order_clause.Clause( order_clause.Column(`rand()`), order_clause.Raw(), ), ).Filter("user_name", "astaxie").Count() throwFail(t, err) throwFail(t, AssertIs(num, 1)) } }
explode_data.jsonl/18133
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 394 }
[ 2830, 3393, 34605, 1155, 353, 8840, 836, 8, 341, 18534, 82, 1669, 294, 4365, 15685, 2556, 445, 872, 1138, 22431, 11, 1848, 1669, 32421, 43040, 13645, 2829, 1827, 5632, 445, 872, 1269, 497, 330, 77, 42340, 1827, 2507, 741, 9581, 19524, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestChannelArbitratorAnchors(t *testing.T) { log := &mockArbitratorLog{ state: StateDefault, newStates: make(chan ArbitratorState, 5), } chanArbCtx, err := createTestChannelArbitrator(t, log) if err != nil { t.Fatalf("unable to create ChannelArbitrator: %v", err) } // Replace our mocked put report function with one which will push // reports into a channel for us to consume. We update this function // because our resolver will be created from the existing chanArb cfg. reports := make(chan *channeldb.ResolverReport) chanArbCtx.chanArb.cfg.PutResolverReport = putResolverReportInChannel( reports, ) // Add a dummy payment hash to the preimage lookup. rHash := [lntypes.PreimageSize]byte{1, 2, 3} mockPreimageDB := newMockWitnessBeacon() mockPreimageDB.lookupPreimage[rHash] = rHash // Attack a mock PreimageDB and Registry to channel arbitrator. chanArb := chanArbCtx.chanArb chanArb.cfg.PreimageDB = mockPreimageDB chanArb.cfg.Registry = &mockRegistry{} // Setup two pre-confirmation anchor resolutions on the mock channel. chanArb.cfg.Channel.(*mockChannel).anchorResolutions = &lnwallet.AnchorResolutions{ Local: &lnwallet.AnchorResolution{ AnchorSignDescriptor: input.SignDescriptor{ Output: &wire.TxOut{Value: 1}, }, }, Remote: &lnwallet.AnchorResolution{ AnchorSignDescriptor: input.SignDescriptor{ Output: &wire.TxOut{Value: 1}, }, }, } if err := chanArb.Start(nil); err != nil { t.Fatalf("unable to start ChannelArbitrator: %v", err) } defer func() { if err := chanArb.Stop(); err != nil { t.Fatal(err) } }() signals := &ContractSignals{ ShortChanID: lnwire.ShortChannelID{}, } chanArb.UpdateContractSignals(signals) // Set current block height. heightHint := uint32(1000) chanArbCtx.chanArb.blocks <- int32(heightHint) // Create testing HTLCs. htlcExpiryBase := heightHint + uint32(10) htlcWithPreimage := channeldb.HTLC{ HtlcIndex: 99, RefundTimeout: htlcExpiryBase + 2, RHash: rHash, Incoming: true, } htlc := channeldb.HTLC{ HtlcIndex: 100, RefundTimeout: htlcExpiryBase + 3, } // We now send two HTLC updates, one for local HTLC set and the other // for remote HTLC set. newUpdate := &ContractUpdate{ HtlcKey: LocalHtlcSet, // This will make the deadline of the local anchor resolution // to be htlcWithPreimage's CLTV minus heightHint since the // incoming HTLC (toLocalHTLCs) has a lower CLTV value and is // preimage available. Htlcs: []channeldb.HTLC{htlc, htlcWithPreimage}, } err = chanArb.notifyContractUpdate(newUpdate) require.NoError(t, err) newUpdate = &ContractUpdate{ HtlcKey: RemoteHtlcSet, // This will make the deadline of the remote anchor resolution // to be htlcWithPreimage's CLTV minus heightHint because the // incoming HTLC (toRemoteHTLCs) has a lower CLTV. Htlcs: []channeldb.HTLC{htlc, htlcWithPreimage}, } err = chanArb.notifyContractUpdate(newUpdate) require.NoError(t, err) errChan := make(chan error, 1) respChan := make(chan *wire.MsgTx, 1) // With the channel found, and the request crafted, we'll send over a // force close request to the arbitrator that watches this channel. chanArb.forceCloseReqs <- &forceCloseReq{ errResp: errChan, closeTx: respChan, } // The force close request should trigger broadcast of the commitment // transaction. chanArbCtx.AssertStateTransitions( StateBroadcastCommit, StateCommitmentBroadcasted, ) // With the commitment tx still unconfirmed, we expect sweep attempts // for all three versions of the commitment transaction. <-chanArbCtx.sweeper.sweptInputs <-chanArbCtx.sweeper.sweptInputs select { case <-respChan: case <-time.After(5 * time.Second): t.Fatalf("no response received") } select { case err := <-errChan: if err != nil { t.Fatalf("error force closing channel: %v", err) } case <-time.After(5 * time.Second): t.Fatalf("no response received") } // Now notify about the local force close getting confirmed. closeTx := &wire.MsgTx{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{}, Witness: [][]byte{ {0x1}, {0x2}, }, }, }, } anchorResolution := &lnwallet.AnchorResolution{ AnchorSignDescriptor: input.SignDescriptor{ Output: &wire.TxOut{ Value: 1, }, }, } chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{ SpendDetail: &chainntnfs.SpendDetail{}, LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ CloseTx: closeTx, HtlcResolutions: &lnwallet.HtlcResolutions{}, AnchorResolution: anchorResolution, }, ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, CommitSet: CommitSet{ ConfCommitKey: &LocalHtlcSet, HtlcSets: map[HtlcSetKey][]channeldb.HTLC{}, }, } chanArbCtx.AssertStateTransitions( StateContractClosed, StateWaitingFullResolution, ) // We expect to only have the anchor resolver active. if len(chanArb.activeResolvers) != 1 { t.Fatalf("expected single resolver, instead got: %v", len(chanArb.activeResolvers)) } resolver := chanArb.activeResolvers[0] _, ok := resolver.(*anchorResolver) if !ok { t.Fatalf("expected anchor resolver, got %T", resolver) } // The anchor resolver is expected to re-offer the anchor input to the // sweeper. <-chanArbCtx.sweeper.sweptInputs // The mock sweeper immediately signals success for that input. This // should transition the channel to the resolved state. chanArbCtx.AssertStateTransitions(StateFullyResolved) select { case <-chanArbCtx.resolvedChan: case <-time.After(5 * time.Second): t.Fatalf("contract was not resolved") } anchorAmt := btcutil.Amount( anchorResolution.AnchorSignDescriptor.Output.Value, ) spendTx := chanArbCtx.sweeper.sweepTx.TxHash() expectedReport := &channeldb.ResolverReport{ OutPoint: anchorResolution.CommitAnchor, Amount: anchorAmt, ResolverType: channeldb.ResolverTypeAnchor, ResolverOutcome: channeldb.ResolverOutcomeClaimed, SpendTxID: &spendTx, } assertResolverReport(t, reports, expectedReport) // We expect two anchor inputs, the local and the remote to be swept. // Thus we should expect there are two deadlines used, both are equal // to htlcWithPreimage's CLTV minus current block height. require.Equal(t, 2, len(chanArbCtx.sweeper.deadlines)) require.EqualValues(t, htlcWithPreimage.RefundTimeout-heightHint, chanArbCtx.sweeper.deadlines[0], ) require.EqualValues(t, htlcWithPreimage.RefundTimeout-heightHint, chanArbCtx.sweeper.deadlines[1], ) }
explode_data.jsonl/74589
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2539 }
[ 2830, 3393, 9629, 6953, 4489, 81, 850, 2082, 331, 1087, 1155, 353, 8840, 836, 8, 341, 6725, 1669, 609, 16712, 6953, 4489, 81, 850, 2201, 515, 197, 24291, 25, 257, 3234, 3675, 345, 197, 8638, 23256, 25, 1281, 35190, 58795, 81, 850, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_calculateEWMA(t *testing.T) { type args struct { allKLines []types.KLine priceF KLinePriceMapper window int } var input []fixedpoint.Value if err := json.Unmarshal(ethusdt5m, &input); err != nil { panic(err) } tests := []struct { name string args args want float64 }{ { name: "ETHUSDT EMA 7", args: args{ allKLines: buildKLines(input), priceF: KLineClosePriceMapper, window: 7, }, want: 571.72, // with open price, binance desktop returns 571.45, trading view returns 570.8957, for close price, binance mobile returns 571.72 }, { name: "ETHUSDT EMA 25", args: args{ allKLines: buildKLines(input), priceF: KLineClosePriceMapper, window: 25, }, want: 571.30, }, { name: "ETHUSDT EMA 99", args: args{ allKLines: buildKLines(input), priceF: KLineClosePriceMapper, window: 99, }, want: 577.62, // binance mobile uses 577.58 }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := CalculateKLinesEMA(tt.args.allKLines, tt.args.priceF, tt.args.window) got = math.Trunc(got*100.0) / 100.0 if got != tt.want { t.Errorf("CalculateKLinesEMA() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/18359
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 601 }
[ 2830, 3393, 24005, 11207, 37779, 4835, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 50960, 42, 16794, 3056, 9242, 11352, 2460, 198, 197, 87119, 37, 262, 730, 2460, 6972, 10989, 198, 197, 23545, 262, 526, 198, 197, 532, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestServiceErrors(t *testing.T) { requestBytes, err := json.Marshal( &Request{ Type: ResponseMsgType, ID: randomString(), Label: "test", }, ) require.NoError(t, err) msg, err := service.ParseDIDCommMsgMap(requestBytes) require.NoError(t, err) svc, err := New(&protocol.MockProvider{ ServiceMap: map[string]interface{}{ mediator.Coordination: &mockroute.MockMediatorSvc{}, }, }) require.NoError(t, err) actionCh := make(chan service.DIDCommAction, 10) err = svc.RegisterActionEvent(actionCh) require.NoError(t, err) // fetch current state error mockStore := &mockStore{get: func(s string) (bytes []byte, e error) { return nil, errors.New("error") }} prov := &protocol.MockProvider{ ProtocolStateStoreProvider: mockstorage.NewCustomMockStoreProvider( mockStore, ), ServiceMap: map[string]interface{}{ mediator.Coordination: &mockroute.MockMediatorSvc{}, }, } svc, err = New(prov) require.NoError(t, err) payload := generateRequestMsgPayload(t, prov, randomString(), "") _, err = svc.HandleInbound(payload, service.EmptyDIDCommContext()) require.Error(t, err) require.Contains(t, err.Error(), "cannot fetch state from store") svc, err = New(&protocol.MockProvider{ ServiceMap: map[string]interface{}{ mediator.Coordination: &mockroute.MockMediatorSvc{}, }, }) require.NoError(t, err) // invalid message type msg["@type"] = "invalid" svc.connectionRecorder, err = connection.NewRecorder(&protocol.MockProvider{}) require.NoError(t, err) _, err = svc.HandleInbound(msg, service.EmptyDIDCommContext()) require.Error(t, err) require.Contains(t, err.Error(), "unrecognized msgType: invalid") // test handle - invalid state name msg["@type"] = ResponseMsgType m := &message{Msg: msg, ThreadID: randomString()} err = svc.handleWithoutAction(m) require.Error(t, err) require.Contains(t, err.Error(), "invalid state name:") // invalid state name m.NextStateName = StateIDInvited m.ConnRecord = &connection.Record{ConnectionID: "abc"} err = svc.handleWithoutAction(m) require.Error(t, err) require.Contains(t, err.Error(), "failed to execute state 'invited':") }
explode_data.jsonl/30528
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 802 }
[ 2830, 3393, 1860, 13877, 1155, 353, 8840, 836, 8, 341, 23555, 7078, 11, 1848, 1669, 2951, 37271, 1006, 197, 197, 5, 1900, 515, 298, 27725, 25, 220, 5949, 6611, 929, 345, 298, 29580, 25, 262, 4194, 703, 3148, 298, 82126, 25, 330, 194...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRegisterNewCluster_NoError(t *testing.T) { testClusterName, color := "name", uint32(4) manager := GetManagerInstance() clearManager() manager.SetDBConnection(&database.MockDBConnection{}) cluster, err := manager.RegisterNewCluster(testClusterName, color) if err != nil { t.Fatalf("Unexpected error: %s", err.Error()) } if *cluster.Name != testClusterName { t.Errorf("Expected cluster with name %s got %s", testClusterName, *cluster.Name) } else if *cluster.Color != color { t.Errorf("Expected cluster with color %d got %d", color, *cluster.Color) } if cacheCluster, ok := manager.clusterCache[*cluster.Name]; ok { if cacheCluster != cluster { t.Error("Manager stored wrong cluster in cache for Name") } } else { t.Error("No cluster found in cache for ID") } }
explode_data.jsonl/24674
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 285 }
[ 2830, 3393, 8690, 3564, 28678, 36989, 1454, 1155, 353, 8840, 836, 8, 341, 18185, 28678, 675, 11, 1894, 1669, 330, 606, 497, 2622, 18, 17, 7, 19, 340, 92272, 1669, 2126, 2043, 2523, 741, 40408, 2043, 2822, 92272, 4202, 3506, 4526, 2099...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestValidTimezone(t *testing.T) { var tests = []validationTest{ { value: "Europe/Berli", shouldErr: true, }, { value: "APAC", shouldErr: true, }, { value: "Europe/Berlin", shouldErr: false, }, { value: "UTC", shouldErr: false, }, } runValidations(t, tests, "timezone", IsValidTimezone) }
explode_data.jsonl/77936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 178 }
[ 2830, 3393, 4088, 1462, 8684, 1155, 353, 8840, 836, 8, 1476, 2405, 7032, 284, 3056, 12284, 2271, 515, 197, 197, 515, 298, 16309, 25, 257, 330, 30780, 16276, 261, 742, 756, 298, 197, 5445, 7747, 25, 830, 345, 197, 197, 1583, 197, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_namespaceInformerWatchFunc(t *testing.T) { c, err := newFakeAPIClientset(k8sconfig.APIConfig{}) assert.NoError(t, err) watchFunc := namespaceInformerWatchFunc(c) opts := metav1.ListOptions{} obj, err := watchFunc(opts) assert.NoError(t, err) assert.NotNil(t, obj) }
explode_data.jsonl/41508
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 41571, 641, 34527, 14247, 9626, 1155, 353, 8840, 836, 8, 341, 1444, 11, 1848, 1669, 501, 52317, 2537, 98900, 746, 5969, 23, 82, 1676, 24922, 2648, 37790, 6948, 35699, 1155, 11, 1848, 340, 6692, 754, 9626, 1669, 4473, 641, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConvertPosts(t *testing.T) { parentPostCreator, err := sdk.AccAddressFromBech32("desmos1mmeu5t0j5284p7jkergq9hyejlhdwkzp25y84l") require.NoError(t, err) postCreator, err := sdk.AccAddressFromBech32("desmos1mmeu5t0j5284p7jkergq9hyejlhdwkzp25y84l") require.NoError(t, err) parentCreationTime := time.Now().UTC() postCreationTime := parentCreationTime.Add(time.Hour) subspace := "4e188d9c17150037d5199bbdb91ae1eb2a78a15aca04cb35530cccb81494b36e" parentID := v040.ComputeID(parentCreationTime, parentPostCreator, subspace) postID := v040.ComputeID(postCreationTime, postCreator, subspace) posts := []v040.Post{ { PostID: parentID, ParentID: "", Message: "Message", AllowsComments: true, Subspace: "4e188d9c17150037d5199bbdb91ae1eb2a78a15aca04cb35530cccb81494b36e", OptionalData: map[string]string{}, Created: parentCreationTime, LastEdited: time.Time{}, Creator: parentPostCreator, Medias: []v040.PostMedia{{URI: "https://uri.com", MimeType: "text/plain"}}, }, { PostID: postID, ParentID: parentID, Message: "Message", AllowsComments: true, Subspace: "4e188d9c17150037d5199bbdb91ae1eb2a78a15aca04cb35530cccb81494b36e", OptionalData: map[string]string{}, Created: postCreationTime, LastEdited: time.Time{}, Creator: postCreator, Medias: []v040.PostMedia{{URI: "https://uri.com", MimeType: "text/plain"}}, }, } expectedPosts := []v0100posts.Post{ { PostID: parentID, ParentID: "", Message: "Message", AllowsComments: true, Subspace: "4e188d9c17150037d5199bbdb91ae1eb2a78a15aca04cb35530cccb81494b36e", OptionalData: map[string]string{}, Created: parentCreationTime, LastEdited: time.Time{}, Creator: parentPostCreator, Attachments: []v0100posts.Attachment{{URI: "https://uri.com", MimeType: "text/plain", Tags: nil}}, }, { PostID: postID, ParentID: parentID, Message: "Message", AllowsComments: true, Subspace: "4e188d9c17150037d5199bbdb91ae1eb2a78a15aca04cb35530cccb81494b36e", OptionalData: map[string]string{}, Created: postCreationTime, LastEdited: time.Time{}, Creator: postCreator, Attachments: []v0100posts.Attachment{{URI: "https://uri.com", MimeType: "text/plain", Tags: nil}}, }, } actualPosts := v0100posts.ConvertPosts(posts) require.Equal(t, expectedPosts, actualPosts) }
explode_data.jsonl/81209
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1225 }
[ 2830, 3393, 12012, 19631, 1155, 353, 8840, 836, 8, 341, 24804, 4133, 31865, 11, 1848, 1669, 45402, 77538, 4286, 3830, 3430, 331, 18, 17, 445, 5799, 8631, 16, 76, 2660, 84, 20, 83, 15, 73, 20, 17, 23, 19, 79, 22, 41808, 2375, 80, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTriDenseZero(t *testing.T) { t.Parallel() // Elements that equal 1 should be set to zero, elements that equal -1 // should remain unchanged. for _, test := range []*TriDense{ { mat: blas64.Triangular{ Uplo: blas.Upper, N: 4, Stride: 5, Data: []float64{ 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, }, }, }, { mat: blas64.Triangular{ Uplo: blas.Lower, N: 4, Stride: 5, Data: []float64{ 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, }, }, }, } { dataCopy := make([]float64, len(test.mat.Data)) copy(dataCopy, test.mat.Data) test.Zero() for i, v := range test.mat.Data { if dataCopy[i] != -1 && v != 0 { t.Errorf("Matrix not zeroed in bounds") } if dataCopy[i] == -1 && v != -1 { t.Errorf("Matrix zeroed out of bounds") } } } }
explode_data.jsonl/25857
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 525 }
[ 2830, 3393, 21884, 35, 1117, 17999, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 322, 34157, 429, 6144, 220, 16, 1265, 387, 738, 311, 7168, 11, 5424, 429, 6144, 481, 16, 198, 197, 322, 1265, 7146, 34857, 624, 2023, 835...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func Test_BallotList(t *testing.T) { goldenBallotList := getResponse(ballotList).(*BallotList) type want struct { wantErr bool containsErr string ballotList BallotList } cases := []struct { name string inputHanler http.Handler want }{ { "handles RPC error", gtGoldenHTTPMock(ballotListHandlerMock(readResponse(rpcerrors), blankHandler)), want{ true, "failed to get ballot list", BallotList{}, }, }, { "failed to unmarshal", gtGoldenHTTPMock(ballotListHandlerMock([]byte(`junk`), blankHandler)), want{ true, "failed to unmarshal ballot list", BallotList{}, }, }, { "is successful", gtGoldenHTTPMock(ballotListHandlerMock(readResponse(ballotList), blankHandler)), want{ false, "", *goldenBallotList, }, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { server := httptest.NewServer(tt.inputHanler) defer server.Close() gt, err := New(server.URL) assert.Nil(t, err) ballotList, err := gt.BallotList("BLzGD63HA4RP8Fh5xEtvdQSMKa2WzJMZjQPNVUc4Rqy8Lh5BEY1") checkErr(t, tt.wantErr, tt.containsErr, err) assert.Equal(t, tt.want.ballotList, ballotList) }) } }
explode_data.jsonl/48371
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 572 }
[ 2830, 3393, 1668, 541, 354, 852, 1155, 353, 8840, 836, 8, 341, 3174, 813, 268, 35907, 354, 852, 1669, 633, 2582, 79319, 354, 852, 568, 4071, 35907, 354, 852, 692, 13158, 1366, 2036, 341, 197, 50780, 7747, 257, 1807, 198, 197, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSalesforceAPI_Describe(t *testing.T) { type fields struct { metadata *metadata describe *describe dml *dml query *query } type args struct { sobject string } tests := []struct { name string fields fields args args want DescribeValue wantErr bool }{ { name: "No Describe field", want: DescribeValue{}, wantErr: true, }, { name: "Invalid Args", fields: fields{ describe: &describe{ session: &session.Mock{ URL: "http://wwww.google.com", }, }, }, want: DescribeValue{}, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { a := &Resources{ metadata: tt.fields.metadata, describe: tt.fields.describe, dml: tt.fields.dml, query: tt.fields.query, } got, err := a.Describe(tt.args.sobject) if (err != nil) != tt.wantErr { t.Errorf("SalesforceAPI.Describe() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("SalesforceAPI.Describe() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/45139
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 556 }
[ 2830, 3393, 35418, 8833, 7082, 98054, 3114, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 2109, 7603, 353, 17637, 198, 197, 82860, 353, 12332, 198, 197, 2698, 1014, 414, 353, 67, 1014, 198, 197, 27274, 262, 353, 1631, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestThemeFiles(t *testing.T) { themePath, err := ioutil.TempDir("", "alice-lg-tmp-theme") if err != nil { t.Error(err) } defer os.RemoveAll(themePath) // Create some "stylesheets" and a "script" touchFile(themePath, "style.css") touchFile(themePath, "extra.css") touchFile(themePath, "script.js") // Load theme theme := NewTheme(ThemeConfig{ BasePath: "/theme", Path: themePath, }) if err != nil { t.Error(err) } // Check file presence scripts := theme.Scripts() if len(scripts) != 1 { t.Error("Expected one script file: script.js") } stylesheets := theme.Stylesheets() if len(stylesheets) != 2 { t.Error("Expected two stylesheets: {style, extra}.css") } // Check uri / path mapping script := scripts[0] if script != "script.js" { t.Error("Expected script.js to be included in scripts") } }
explode_data.jsonl/54316
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 12594, 10809, 1155, 353, 8840, 836, 8, 341, 197, 9047, 1820, 11, 1848, 1669, 43144, 65009, 6184, 19814, 330, 63195, 7510, 2385, 1307, 33185, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, 197, 532, 16867, 2643,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestWrapRateLimit(t *testing.T) { start := time.Now() first := true h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if first { t.Log(start.Unix()) w.Header().Set("X-RateLimit-Limit", "1") w.Header().Set("X-RateLimit-Remaining", "0") w.Header().Set("X-RateLimit-Reset", fmt.Sprint(start.Add(time.Second).Unix())) w.WriteHeader(http.StatusTooManyRequests) first = !first return } w.WriteHeader(http.StatusOK) }) s := httptest.NewServer(h) defer s.Close() c := Wrap(s.Client(), StaticToken(""), WithRateLimit(), WithDebug(true)) r, err := c.Get(s.URL) if err != nil { t.Error(err) } if r.StatusCode != http.StatusOK { t.Errorf("Expected status code to be %d but got %d", http.StatusOK, r.StatusCode) } elapsed := time.Since(start) if elapsed < time.Second { t.Errorf("Time since start is sooner than expected. Expected >= 1s but got %s", elapsed) } }
explode_data.jsonl/66305
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 372 }
[ 2830, 3393, 26787, 11564, 16527, 1155, 353, 8840, 836, 8, 341, 21375, 1669, 882, 13244, 741, 42190, 1669, 830, 271, 9598, 1669, 1758, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 743, 1156, 341, 298, 3244, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestResponseError_Error_WithNoMessages(t *testing.T) { err := &ResponseError{ Status: 400, } expectedMsg := "KairosDB response error: status=400, messages=[]" assert.Equal(t, expectedMsg, err.Error()) }
explode_data.jsonl/56654
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 75 }
[ 2830, 3393, 2582, 1454, 28651, 62, 2354, 2753, 15820, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 609, 2582, 1454, 515, 197, 58321, 25, 220, 19, 15, 15, 345, 197, 630, 42400, 6611, 1669, 330, 42, 2143, 3630, 3506, 2033, 1465, 25, 2639...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestTimeBeginPeriod(t *testing.T) { const TIMERR_NOERROR = 0 if *runtime.TimeBeginPeriodRetValue != TIMERR_NOERROR { t.Fatalf("timeBeginPeriod failed: it returned %d", *runtime.TimeBeginPeriodRetValue) } }
explode_data.jsonl/82617
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 79 }
[ 2830, 3393, 1462, 11135, 23750, 1155, 353, 8840, 836, 8, 341, 4777, 17742, 2650, 9100, 3682, 284, 220, 15, 198, 743, 353, 22255, 16299, 11135, 23750, 12020, 1130, 961, 17742, 2650, 9100, 3682, 341, 197, 3244, 30762, 445, 1678, 11135, 23...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestCommandHandler_WithValidationNoError(t *testing.T) { inner := &mocks.CommandHandler{} m := NewMiddleware() h := eh.UseCommandHandlerMiddleware(inner, m) cmd := &mocks.Command{ ID: uuid.New(), Content: "content", } c := CommandWithValidation(cmd, func() error { return nil }) if err := h.HandleCommand(context.Background(), c); err != nil { t.Error("there should be no error:", err) } if !reflect.DeepEqual(inner.Commands, []eh.Command{c}) { t.Error("the command should have been handled:", inner.Commands) } }
explode_data.jsonl/24292
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 195 }
[ 2830, 3393, 4062, 3050, 62, 2354, 13799, 2753, 1454, 1155, 353, 8840, 836, 8, 341, 197, 4382, 1669, 609, 16712, 82, 12714, 3050, 16094, 2109, 1669, 1532, 24684, 741, 9598, 1669, 35246, 9046, 4062, 3050, 24684, 68603, 11, 296, 340, 25920...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWrap(t *testing.T) { err := errors.New("value") Handlef("foo: %w", &err) g := err.Error() w := "foo: value" if g != w { t.Errorf("g == %q, want %q", g, w) } }
explode_data.jsonl/15636
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 26787, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 5975, 7121, 445, 957, 1138, 197, 6999, 69, 445, 7975, 25, 1018, 86, 497, 609, 615, 340, 3174, 1669, 1848, 6141, 741, 6692, 1669, 330, 7975, 25, 897, 698, 743, 342, 961, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAndMatcherMatch(t *testing.T) { tests := []struct { name string pm pathMatcher hm matcher.HeaderMatcher info iresolver.RPCInfo want bool }{ { name: "both match", pm: newPathExactMatcher("/a/b", false), hm: matcher.NewHeaderExactMatcher("th", "tv"), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), }, want: true, }, { name: "both match with path case insensitive", pm: newPathExactMatcher("/A/B", true), hm: matcher.NewHeaderExactMatcher("th", "tv"), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), }, want: true, }, { name: "only one match", pm: newPathExactMatcher("/a/b", false), hm: matcher.NewHeaderExactMatcher("th", "tv"), info: iresolver.RPCInfo{ Method: "/z/y", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), }, want: false, }, { name: "both not match", pm: newPathExactMatcher("/z/y", false), hm: matcher.NewHeaderExactMatcher("th", "abc"), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), }, want: false, }, { name: "fake header", pm: newPathPrefixMatcher("/", false), hm: matcher.NewHeaderExactMatcher("content-type", "fake"), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs( "content-type", "fake", )), }, want: true, }, { name: "binary header", pm: newPathPrefixMatcher("/", false), hm: matcher.NewHeaderPresentMatcher("t-bin", true), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata( metadata.NewOutgoingContext(context.Background(), metadata.Pairs("t-bin", "123")), metadata.Pairs( "content-type", "fake", )), }, // Shouldn't match binary header, even though it's in metadata. want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcher{tt.hm}, nil) if got := a.Match(tt.info); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/10981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1067 }
[ 2830, 3393, 3036, 37554, 8331, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 86511, 256, 1815, 37554, 198, 197, 9598, 76, 256, 36052, 15753, 37554, 198, 197, 27043, 600, 48943, 2013, 4872, 1731, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSearchArchivedTeamsByName(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() id := model.NewId() name := "name" + id displayName := "Name " + id th.CheckCommand(t, "team", "create", "--name", name, "--display_name", displayName) th.CheckCommand(t, "team", "archive", name) output := th.CheckCommand(t, "team", "search", name) assert.Contains(t, output, "(archived)", "should have archived team") }
explode_data.jsonl/59046
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 155 }
[ 2830, 3393, 5890, 18727, 2221, 60669, 16898, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 2822, 15710, 1669, 1614, 7121, 764, 741, 11609, 1669, 330, 606, 1, 488, 877, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTicker_pingPong(t *testing.T) { r := require.New(t) tm := getTime() lc := LayerConv{5 * time.Second, tm} ttl := lc.TimeToLayer(tm.Add(9 * time.Second)) r.Equal(types.LayerID(2), lc.TimeToLayer(lc.LayerToTime(ttl))) }
explode_data.jsonl/40199
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 104 }
[ 2830, 3393, 87278, 71661, 47, 644, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1373, 7121, 1155, 340, 3244, 76, 1669, 60213, 741, 8810, 66, 1669, 22735, 34892, 90, 20, 353, 882, 32435, 11, 17333, 532, 3244, 11544, 1669, 36213, 16299, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestURL(t *testing.T) { r := URL("https://example.com/foo/bah") if err := r.setupAction("GET"); err != nil { t.Error(err) } assertURI(t, "https://example.com/foo/bah", r.Request.URL.String()) }
explode_data.jsonl/24754
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 83 }
[ 2830, 3393, 3144, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 5548, 445, 2428, 1110, 8687, 905, 60555, 3470, 1466, 5130, 743, 1848, 1669, 435, 25338, 2512, 445, 3806, 5038, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, 197, 630, 694...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestCompression(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockMessageProducer := NewMockSQSProducer(ctrl) compressionProducer := CompressionSQSProducer{ Wrapped: mockMessageProducer, } originalMessage := "hello world!" encodedCompressedMessage := "H4sIAAAAAAAA/8pIzcnJVyjPL8pJUQQEAAD//23CtAMMAAAA" mockMessageProducer.EXPECT().ProduceMessage([]byte(encodedCompressedMessage)).Return(nil) e := compressionProducer.ProduceMessage([]byte(originalMessage)) assert.Nil(t, e) }
explode_data.jsonl/76880
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 191 }
[ 2830, 3393, 81411, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 77333, 2052, 45008, 1669, 1532, 11571, 64308, 50, 45008, 62100, 692, 32810, 4011, 45008, 1669, 66161, 643...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPolymorphicEvent(t *testing.T) { t.Parallel() raw := &RawKVEntry{ StartTs: 99, CRTs: 100, OpType: OpTypePut, RegionID: 2, } resolved := &RawKVEntry{ OpType: OpTypeResolved, CRTs: 101, } polyEvent := NewPolymorphicEvent(raw) require.Equal(t, raw, polyEvent.RawKV) require.Equal(t, raw.CRTs, polyEvent.CRTs) require.Equal(t, raw.StartTs, polyEvent.StartTs) require.Equal(t, raw.RegionID, polyEvent.RegionID()) rawResolved := &RawKVEntry{CRTs: resolved.CRTs, OpType: OpTypeResolved} polyEvent = NewPolymorphicEvent(resolved) require.Equal(t, rawResolved, polyEvent.RawKV) require.Equal(t, resolved.CRTs, polyEvent.CRTs) require.Equal(t, uint64(0), polyEvent.StartTs) }
explode_data.jsonl/20799
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 308 }
[ 2830, 3393, 14658, 1600, 40869, 1556, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 76559, 1669, 609, 20015, 82707, 5874, 515, 197, 65999, 52793, 25, 220, 220, 24, 24, 345, 197, 6258, 5350, 82, 25, 257, 220, 16, 15, 15, 345,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMovingAverage(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{MetricKey: "series", TagSet: api.NewTagSet()}) fakeBackend := movingAverageBackend{} timerange, err := api.NewTimerange(1200, 1500, 100) if err != nil { t.Fatalf(err.Error()) } expression := function.Memoize(&expression.FunctionExpression{ FunctionName: "transform.moving_average", GroupBy: []string{}, Arguments: []function.Expression{ function.Memoize(&expression.MetricFetchExpression{MetricName: "series", Predicate: predicate.TruePredicate{}}), function.Memoize(expression.Duration{Source: "300ms", Duration: 300 * time.Millisecond}), }, }) backend := fakeBackend result, err := function.EvaluateToSeriesList( expression, function.EvaluationContextBuilder{ MetricMetadataAPI: fakeAPI, TimeseriesStorageAPI: backend, Timerange: timerange, SampleMethod: timeseries.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), Ctx: context.Background(), }.Build(), ) if err != nil { t.Errorf(err.Error()) } expected := []float64{4, 3, 11.0 / 3, 5} if len(result.Series) != 1 { t.Fatalf("expected exactly 1 returned series") } if len(result.Series[0].Values) != len(expected) { t.Fatalf("expected exactly %d values in returned series, but got %d", len(expected), len(result.Series[0].Values)) } const eps = 1e-7 for i := range expected { if math.Abs(result.Series[0].Values[i]-expected[i]) > eps { t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values) } } }
explode_data.jsonl/57470
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 638 }
[ 2830, 3393, 39732, 26292, 1155, 353, 8840, 836, 8, 341, 1166, 726, 7082, 1669, 68909, 7121, 52317, 54310, 14610, 7082, 741, 1166, 726, 7082, 1904, 12443, 26040, 11212, 632, 24827, 23676, 3556, 54310, 90, 54310, 1592, 25, 330, 19880, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestLoadConfig(t *testing.T) { factories, err := componenttest.ExampleComponents() assert.Nil(t, err) factory := NewFactory() factories.Exporters[configmodels.Type(typeStr)] = factory cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) require.NoError(t, err) require.NotNil(t, cfg) e0 := cfg.Exporters["signalfx"] // Realm doesn't have a default value so set it directly. defaultCfg := factory.CreateDefaultConfig().(*Config) defaultCfg.Realm = "ap0" assert.Equal(t, defaultCfg, e0) expectedName := "signalfx/allsettings" e1 := cfg.Exporters[expectedName] expectedCfg := Config{ ExporterSettings: configmodels.ExporterSettings{ TypeVal: configmodels.Type(typeStr), NameVal: expectedName, }, AccessToken: "testToken", Realm: "us1", Headers: map[string]string{ "added-entry": "added value", "dot.test": "test", }, Timeout: 2 * time.Second, AccessTokenPassthroughConfig: splunk.AccessTokenPassthroughConfig{ AccessTokenPassthrough: false, }, SendCompatibleMetrics: true, TranslationRules: []translation.Rule{ { Action: translation.ActionRenameDimensionKeys, Mapping: map[string]string{ "k8s.cluster.name": "kubernetes_cluster", }, }, }, DeltaTranslationTTL: 3600, } assert.Equal(t, &expectedCfg, e1) te, err := factory.CreateMetricsExporter(context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, e1) require.NoError(t, err) require.NotNil(t, te) }
explode_data.jsonl/60395
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 587 }
[ 2830, 3393, 5879, 2648, 1155, 353, 8840, 836, 8, 341, 1166, 52893, 11, 1848, 1669, 3692, 1944, 5121, 1516, 10443, 741, 6948, 59678, 1155, 11, 1848, 692, 1166, 2919, 1669, 1532, 4153, 741, 1166, 52893, 81077, 388, 58, 1676, 6507, 10184, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTypeToPtrPtrPtrPtrType(t *testing.T) { type Type2 struct { A ****float64 } t2 := Type2{} t2.A = new(***float64) *t2.A = new(**float64) **t2.A = new(*float64) ***t2.A = new(float64) ****t2.A = 27.4 t2pppp := new(***Type2) if err := encAndDec(t2, t2pppp); err != nil { t.Fatal(err) } if ****(****t2pppp).A != ****t2.A { t.Errorf("wrong value after decode: %g not %g", ****(****t2pppp).A, ****t2.A) } }
explode_data.jsonl/43385
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 213 }
[ 2830, 3393, 929, 1249, 5348, 5348, 5348, 5348, 929, 1155, 353, 8840, 836, 8, 341, 13158, 3990, 17, 2036, 341, 197, 22985, 30704, 3649, 21, 19, 198, 197, 532, 3244, 17, 1669, 3990, 17, 16094, 3244, 17, 875, 284, 501, 7, 12210, 3649, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNewPRSignedByKeyData(t *testing.T) { testData := []byte("abc") _pr, err := NewPRSignedByKeyData(SBKeyTypeGPGKeys, testData, NewPRMMatchRepoDigestOrExact()) require.NoError(t, err) pr, ok := _pr.(*prSignedBy) require.True(t, ok) assert.Equal(t, testData, pr.KeyData) // Failure cases tested in TestNewPRSignedBy. }
explode_data.jsonl/36504
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 133 }
[ 2830, 3393, 3564, 6480, 49312, 67749, 1043, 1155, 353, 8840, 836, 8, 341, 18185, 1043, 1669, 3056, 3782, 445, 13683, 1138, 197, 5294, 11, 1848, 1669, 1532, 6480, 49312, 67749, 1043, 3759, 33, 97964, 38, 11383, 8850, 11, 67348, 11, 1532,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCache_Get(t *testing.T) { type fields struct { m *collect.SafeMap } type args struct { id string } tests := []struct { name string fields fields args args want *IO }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Cache{ m: tt.fields.m, } if got := c.Get(tt.args.id); !reflect.DeepEqual(got, tt.want) { t.Errorf("Cache.Get() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/59941
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 229 }
[ 2830, 3393, 8233, 13614, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 2109, 353, 17384, 89828, 2227, 198, 197, 532, 13158, 2827, 2036, 341, 197, 15710, 914, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 256, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMultiLookup(t *testing.T) { etc := newEtcdPlugin() etc.Zones = []string{"skydns.test.", "miek.nl."} etc.Next = test.ErrorHandler() for _, serv := range servicesMulti { set(t, etc, serv.Key, 0, serv) defer delete(t, etc, serv.Key) } for _, tc := range dnsTestCasesMulti { m := tc.Msg() rec := dnstest.NewRecorder(&test.ResponseWriter{}) _, err := etc.ServeDNS(ctxt, rec, m) if err != nil { t.Errorf("Expected no error, got %v", err) return } resp := rec.Msg if err := test.SortAndCheck(resp, tc); err != nil { t.Error(err) } } }
explode_data.jsonl/57927
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 252 }
[ 2830, 3393, 20358, 34247, 1155, 353, 8840, 836, 8, 341, 197, 12107, 1669, 501, 31860, 4385, 11546, 741, 197, 12107, 13476, 3154, 284, 3056, 917, 4913, 26684, 45226, 5958, 10465, 330, 76, 35007, 30507, 1189, 532, 197, 12107, 18501, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestUnitRenewRestfulSession(t *testing.T) { accessor := getSimpleTokenAccessor() oldToken, oldMasterToken, oldSessionID := "oldtoken", "oldmaster", int64(100) newToken, newMasterToken, newSessionID := "newtoken", "newmaster", int64(200) postTestSuccessWithNewTokens := func(_ context.Context, _ *snowflakeRestful, _ *url.URL, headers map[string]string, _ []byte, _ time.Duration, _ bool) (*http.Response, error) { if headers[headerAuthorizationKey] != fmt.Sprintf(headerSnowflakeToken, oldMasterToken) { t.Fatalf("authorization key doesn't match, %v vs %v", headers[headerAuthorizationKey], fmt.Sprintf(headerSnowflakeToken, oldMasterToken)) } tr := &renewSessionResponse{ Data: renewSessionResponseMain{ SessionToken: newToken, MasterToken: newMasterToken, SessionID: newSessionID, }, Message: "", Success: true, } ba, err := json.Marshal(tr) if err != nil { t.Fatalf("failed to serialize token response %v", err) } return &http.Response{ StatusCode: http.StatusOK, Body: &fakeResponseBody{body: ba}, }, nil } sr := &snowflakeRestful{ FuncPost: postTestAfterRenew, TokenAccessor: accessor, } err := renewRestfulSession(context.Background(), sr, time.Second) if err != nil { t.Fatalf("err: %v", err) } sr.FuncPost = postTestError err = renewRestfulSession(context.Background(), sr, time.Second) if err == nil { t.Fatal("should have failed to run post request after the renewal") } sr.FuncPost = postTestAppBadGatewayError err = renewRestfulSession(context.Background(), sr, time.Second) if err == nil { t.Fatal("should have failed to run post request after the renewal") } sr.FuncPost = postTestSuccessButInvalidJSON err = renewRestfulSession(context.Background(), sr, time.Second) if err == nil { t.Fatal("should have failed to run post request after the renewal") } accessor.SetTokens(oldToken, oldMasterToken, oldSessionID) sr.FuncPost = postTestSuccessWithNewTokens err = renewRestfulSession(context.Background(), sr, time.Second) if err != nil { t.Fatal("should not have failed to run post request after the renewal") } token, masterToken, sessionID := accessor.GetTokens() if token != newToken { t.Fatalf("unexpected new token %v", token) } if masterToken != newMasterToken { t.Fatalf("unexpected new master token %v", masterToken) } if sessionID != newSessionID { t.Fatalf("unexpected new session id %v", sessionID) } }
explode_data.jsonl/44746
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 853 }
[ 2830, 3393, 4562, 34625, 365, 12416, 1262, 5283, 1155, 353, 8840, 836, 8, 341, 197, 5211, 269, 1669, 633, 16374, 3323, 29889, 741, 61828, 3323, 11, 2310, 18041, 3323, 11, 2310, 5283, 915, 1669, 330, 813, 5839, 497, 330, 813, 13629, 49...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPerChannelLimitsSetToUnlimitedPrintedCorrectly(t *testing.T) { opts := GetDefaultOptions() opts.MaxSubscriptions = 10 opts.MaxMsgs = 10 opts.MaxBytes = 64 * 1024 opts.MaxAge = time.Hour clfoo := stores.ChannelLimits{} clfoo.MaxSubscriptions = -1 clfoo.MaxMsgs = -1 clfoo.MaxBytes = -1 clfoo.MaxAge = -1 sl := &opts.StoreLimits sl.AddPerChannel("foo", &clfoo) l := &captureNoticesLogger{} opts.EnableLogging = true opts.CustomLogger = l s := runServerWithOpts(t, opts, nil) defer s.Shutdown() var notices []string l.Lock() for _, line := range l.notices { if strings.Contains(line, "-1") { notices = l.notices break } } l.Unlock() if notices != nil { t.Fatalf("There should not be -1 values, got %v", notices) } }
explode_data.jsonl/23098
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 317 }
[ 2830, 3393, 3889, 9629, 94588, 1649, 1249, 1806, 18235, 8994, 291, 33092, 398, 1155, 353, 8840, 836, 8, 341, 64734, 1669, 2126, 3675, 3798, 741, 64734, 14535, 3136, 29966, 284, 220, 16, 15, 198, 64734, 14535, 6611, 82, 284, 220, 16, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestBasicModelWithRoot(t *testing.T) { e, _ := NewEnforcer("examples/basic_with_root_model.conf", "examples/basic_policy.csv") testEnforce(t, e, "alice", "data1", "read", true) testEnforce(t, e, "alice", "data1", "write", false) testEnforce(t, e, "alice", "data2", "read", false) testEnforce(t, e, "alice", "data2", "write", false) testEnforce(t, e, "bob", "data1", "read", false) testEnforce(t, e, "bob", "data1", "write", false) testEnforce(t, e, "bob", "data2", "read", false) testEnforce(t, e, "bob", "data2", "write", true) testEnforce(t, e, "root", "data1", "read", true) testEnforce(t, e, "root", "data1", "write", true) testEnforce(t, e, "root", "data2", "read", true) testEnforce(t, e, "root", "data2", "write", true) }
explode_data.jsonl/57117
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 321 }
[ 2830, 3393, 15944, 1712, 2354, 8439, 1155, 353, 8840, 836, 8, 341, 7727, 11, 716, 1669, 1532, 1702, 82010, 445, 51668, 77909, 6615, 12993, 5047, 13937, 497, 330, 51668, 77909, 22773, 11219, 5130, 18185, 1702, 8833, 1155, 11, 384, 11, 33...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRequest_ClientID(t *testing.T) { tests := []struct { name string request *alice.Request want string }{ { name: "", request: getReq(0), want: "ru.yandex.searchplugin/7.16 (none none; android 4.4.2)", }, { name: "", request: getReq(1), want: "ru.yandex.searchplugin/7.16 (none none; android 4.4.2)", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req := tt.request if got := req.ClientID(); got != tt.want { t.Errorf("Request.ClientID() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/18220
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 281 }
[ 2830, 3393, 1900, 46102, 915, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 262, 914, 198, 197, 23555, 353, 63195, 9659, 198, 197, 50780, 262, 914, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, 262, 8324, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestThumb_Filename(t *testing.T) { conf := config.TestConfig() thumbsPath := conf.CachePath() + "/_tmp" defer os.RemoveAll(thumbsPath) if err := conf.CreateDirectories(); err != nil { t.Error(err) } t.Run("", func(t *testing.T) { filename, err := thumb.Filename("99988", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor) if err != nil { t.Fatal(err) } assert.True(t, strings.HasSuffix(filename, "/storage/testdata/cache/_tmp/9/9/9/99988_150x150_fit.jpg")) }) t.Run("hash too short", func(t *testing.T) { _, err := thumb.Filename("999", thumbsPath, 150, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor) if err == nil { t.FailNow() } assert.Equal(t, "resample: file hash is empty or too short (999)", err.Error()) }) t.Run("invalid width", func(t *testing.T) { _, err := thumb.Filename("99988", thumbsPath, -4, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor) if err == nil { t.FailNow() } assert.Equal(t, "resample: width exceeds limit (-4)", err.Error()) }) t.Run("invalid height", func(t *testing.T) { _, err := thumb.Filename("99988", thumbsPath, 200, -1, thumb.ResampleFit, thumb.ResampleNearestNeighbor) if err == nil { t.FailNow() } assert.Equal(t, "resample: height exceeds limit (-1)", err.Error()) }) t.Run("empty thumbpath", func(t *testing.T) { path := "" _, err := thumb.Filename("99988", path, 200, 150, thumb.ResampleFit, thumb.ResampleNearestNeighbor) if err == nil { t.FailNow() } assert.Equal(t, "resample: folder is empty", err.Error()) }) }
explode_data.jsonl/1811
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 624 }
[ 2830, 3393, 62699, 1400, 4033, 1155, 353, 8840, 836, 8, 341, 67850, 1669, 2193, 8787, 2648, 2822, 70479, 15775, 1820, 1669, 2335, 46130, 1820, 368, 488, 3521, 62, 5173, 1837, 16867, 2643, 84427, 24365, 15775, 1820, 692, 743, 1848, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_SelectManyIdx_int_int(t *testing.T) { type args struct { source Enumerator[int] selector func(int, int) Enumerator[int] } tests := []struct { name string args args want Enumerator[int] }{ {name: "1", args: args{ source: NewOnSlice(1, 2, 3, 4), selector: func(i, idx int) Enumerator[int] { if idx%2 == 0 { return Empty[int]() } return NewOnSlice(i, i*i) }, }, want: NewOnSlice(2, 4, 4, 16), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got, _ := SelectManyIdx(tt.args.source, tt.args.selector); !SequenceEqualMust(got, tt.want) { got.Reset() tt.want.Reset() t.Errorf("SelectManyIdx() = '%v', want '%v'", String(got), String(tt.want)) } }) } }
explode_data.jsonl/63899
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 382 }
[ 2830, 3393, 58073, 8441, 11420, 4042, 4042, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 47418, 256, 76511, 18640, 921, 197, 197, 8925, 2915, 1548, 11, 526, 8, 76511, 18640, 921, 197, 532, 78216, 1669, 3056, 1235, 341, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnit(t *testing.T) { expectHash := func(tree *IAVLTree, hashCount int) { // ensure number of new hash calculations is as expected. hash, count := tree.HashWithCount() if count != hashCount { t.Fatalf("Expected %v new hashes, got %v", hashCount, count) } // nuke hashes and reconstruct hash, ensure it's the same. tree.root.traverse(tree, true, func(node *IAVLNode) bool { node.hash = nil return false }) // ensure that the new hash after nuking is the same as the old. newHash, _ := tree.HashWithCount() if bytes.Compare(hash, newHash) != 0 { t.Fatalf("Expected hash %v but got %v after nuking", hash, newHash) } } expectSet := func(tree *IAVLTree, i int, repr string, hashCount int) { origNode := tree.root updated := tree.Set(i2b(i), nil) // ensure node was added & structure is as expected. if updated == true || P(tree.root) != repr { t.Fatalf("Adding %v to %v:\nExpected %v\nUnexpectedly got %v updated:%v", i, P(origNode), repr, P(tree.root), updated) } // ensure hash calculation requirements expectHash(tree, hashCount) tree.root = origNode } expectRemove := func(tree *IAVLTree, i int, repr string, hashCount int) { origNode := tree.root value, removed := tree.Remove(i2b(i)) // ensure node was added & structure is as expected. if len(value) != 0 || !removed || P(tree.root) != repr { t.Fatalf("Removing %v from %v:\nExpected %v\nUnexpectedly got %v value:%v removed:%v", i, P(origNode), repr, P(tree.root), value, removed) } // ensure hash calculation requirements expectHash(tree, hashCount) tree.root = origNode } //////// Test Set cases: // Case 1: t1 := T(N(4, 20)) expectSet(t1, 8, "((4 8) 20)", 3) expectSet(t1, 25, "(4 (20 25))", 3) t2 := T(N(4, N(20, 25))) expectSet(t2, 8, "((4 8) (20 25))", 3) expectSet(t2, 30, "((4 20) (25 30))", 4) t3 := T(N(N(1, 2), 6)) expectSet(t3, 4, "((1 2) (4 6))", 4) expectSet(t3, 8, "((1 2) (6 8))", 3) t4 := T(N(N(1, 2), N(N(5, 6), N(7, 9)))) expectSet(t4, 8, "(((1 2) (5 6)) ((7 8) 9))", 5) expectSet(t4, 10, "(((1 2) (5 6)) (7 (9 10)))", 5) //////// Test Remove cases: t10 := T(N(N(1, 2), 3)) expectRemove(t10, 2, "(1 3)", 1) expectRemove(t10, 3, "(1 2)", 0) t11 := T(N(N(N(1, 2), 3), N(4, 5))) expectRemove(t11, 4, "((1 2) (3 5))", 2) expectRemove(t11, 3, "((1 2) (4 5))", 1) }
explode_data.jsonl/5012
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1031 }
[ 2830, 3393, 4562, 1155, 353, 8840, 836, 8, 1476, 24952, 6370, 1669, 2915, 21298, 353, 5863, 30698, 6533, 11, 5175, 2507, 526, 8, 341, 197, 197, 322, 5978, 1372, 315, 501, 5175, 28117, 374, 438, 3601, 624, 197, 50333, 11, 1760, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMinSyncInterval(t *testing.T) { const minSyncInterval = 100 * time.Millisecond f := &syncFile{} w := NewLogWriter(f, 0) w.SetMinSyncInterval(func() time.Duration { return minSyncInterval }) var timer fakeTimer w.afterFunc = func(d time.Duration, f func()) syncTimer { if d != minSyncInterval { t.Fatalf("expected minSyncInterval %s, but found %s", minSyncInterval, d) } timer.f = f timer.Reset(d) return &timer } syncRecord := func(n int) *sync.WaitGroup { wg := &sync.WaitGroup{} wg.Add(1) _, err := w.SyncRecord(bytes.Repeat([]byte{'a'}, n), wg, new(error)) if err != nil { t.Fatal(err) } return wg } // Sync one record which will cause the sync timer to kick in. syncRecord(1).Wait() startWritePos := atomic.LoadInt64(&f.writePos) startSyncPos := atomic.LoadInt64(&f.syncPos) // Write a bunch of large records. The sync position should not change // because we haven't triggered the timer. But note that the writes should // not block either even though syncing isn't being done. var wg *sync.WaitGroup for i := 0; i < 100; i++ { wg = syncRecord(10000) if v := atomic.LoadInt64(&f.syncPos); startSyncPos != v { t.Fatalf("expected syncPos %d, but found %d", startSyncPos, v) } // NB: we can't use syncQueue.load() here as that will return 0,0 while the // syncQueue is blocked. head, tail := w.flusher.syncQ.unpack(atomic.LoadUint64(&w.flusher.syncQ.headTail)) waiters := head - tail if waiters != uint32(i+1) { t.Fatalf("expected %d waiters, but found %d", i+1, waiters) } } err := try(time.Millisecond, 5*time.Second, func() error { v := atomic.LoadInt64(&f.writePos) if v > startWritePos { return nil } return fmt.Errorf("expected writePos > %d, but found %d", startWritePos, v) }) if err != nil { t.Fatal(err) } // Fire the timer, and then wait for the last record to sync. timer.f() wg.Wait() if w, s := atomic.LoadInt64(&f.writePos), atomic.LoadInt64(&f.syncPos); w != s { t.Fatalf("expected syncPos %d, but found %d", s, w) } }
explode_data.jsonl/58757
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 801 }
[ 2830, 3393, 6217, 12154, 10256, 1155, 353, 8840, 836, 8, 341, 4777, 1308, 12154, 10256, 284, 220, 16, 15, 15, 353, 882, 71482, 271, 1166, 1669, 609, 12996, 1703, 16094, 6692, 1669, 1532, 2201, 6492, 955, 11, 220, 15, 340, 6692, 4202, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStoreInitAndBootstrap(t *testing.T) { defer leaktest.AfterTest(t)() // We need a fixed clock to avoid LastUpdateNanos drifting on us. cfg := TestStoreConfig(hlc.NewClock(func() int64 { return 123 }, time.Nanosecond)) stopper := stop.NewStopper() ctx := context.Background() defer stopper.Stop(ctx) eng := storage.NewDefaultInMem() stopper.AddCloser(eng) cfg.Transport = NewDummyRaftTransport(cfg.Settings) factory := &testSenderFactory{} cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock) { store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) // Can't start as haven't bootstrapped. if err := store.Start(ctx, stopper); err == nil { t.Error("expected failure starting un-bootstrapped store") } require.NoError(t, WriteClusterVersion(context.Background(), eng, clusterversion.TestingClusterVersion)) // Bootstrap with a fake ident. if err := InitEngine(ctx, eng, testIdent); err != nil { t.Fatalf("error bootstrapping store: %+v", err) } // Verify we can read the store ident after a flush. if err := eng.Flush(); err != nil { t.Fatal(err) } if _, err := ReadStoreIdent(ctx, eng); err != nil { t.Fatalf("unable to read store ident: %+v", err) } // Bootstrap the system ranges. var splits []roachpb.RKey kvs, tableSplits := sqlbase.MakeMetadataSchema( keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig, ).GetInitialValues() splits = config.StaticSplits() splits = append(splits, tableSplits...) sort.Slice(splits, func(i, j int) bool { return splits[i].Less(splits[j]) }) if err := WriteInitialClusterData( ctx, eng, kvs /* initialValues */, clusterversion.TestingBinaryVersion, 1 /* numStores */, splits, cfg.Clock.PhysicalNow(), ); err != nil { t.Errorf("failure to create first range: %+v", err) } } // Now, attempt to initialize a store with a now-bootstrapped range. store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) if err := store.Start(ctx, stopper); err != nil { t.Fatalf("failure initializing bootstrapped store: %+v", err) } for i := 1; i <= store.ReplicaCount(); i++ { r, err := store.GetReplica(roachpb.RangeID(i)) if err != nil { t.Fatalf("failure fetching range %d: %+v", i, err) } rs := r.GetMVCCStats() // Stats should agree with a recomputation. now := r.store.Clock().Now() if ms, err := rditer.ComputeStatsForRange(r.Desc(), eng, now.WallTime); err != nil { t.Errorf("failure computing range's stats: %+v", err) } else if ms != rs { t.Errorf("expected range's stats to agree with recomputation: %s", pretty.Diff(ms, rs)) } } }
explode_data.jsonl/79
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 999 }
[ 2830, 3393, 6093, 3803, 3036, 45511, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 197, 322, 1205, 1184, 264, 8356, 8866, 311, 5648, 7996, 4289, 45, 43605, 84253, 389, 601, 624, 50286, 1669, 3393, 6093, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestVoteReviser(t *testing.T) { r := require.New(t) ctrl := gomock.NewController(t) sm := testdb.NewMockStateManager(ctrl) _, err := sm.PutState( &totalBucketCount{count: 0}, protocol.NamespaceOption(StakingNameSpace), protocol.KeyOption(TotalBucketKey), ) r.NoError(err) tests := []struct { cand address.Address owner address.Address amount *big.Int duration uint32 index uint64 }{ { identityset.Address(6), identityset.Address(6), unit.ConvertIotxToRau(1100000), 21, 0, }, { identityset.Address(1), identityset.Address(1), unit.ConvertIotxToRau(1200000), 21, 1, }, { identityset.Address(2), identityset.Address(2), unit.ConvertIotxToRau(1200000), 14, 2, }, { identityset.Address(3), identityset.Address(3), unit.ConvertIotxToRau(1200000), 25, 3, }, { identityset.Address(4), identityset.Address(4), unit.ConvertIotxToRau(1200000), 31, 4, }, { identityset.Address(5), identityset.Address(5), unit.ConvertIotxToRau(1199999), 31, 5, }, { identityset.Address(1), identityset.Address(2), big.NewInt(2100000000), 21, 6, }, { identityset.Address(2), identityset.Address(3), big.NewInt(1400000000), 14, 7, }, { identityset.Address(3), identityset.Address(4), big.NewInt(2500000000), 25, 8, }, { identityset.Address(4), identityset.Address(1), big.NewInt(3100000000), 31, 9, }, } // test loading with no candidate in stateDB stk, err := NewProtocol( nil, genesis.Default.Staking, nil, genesis.Default.GreenlandBlockHeight, genesis.Default.HawaiiBlockHeight, ) r.NotNil(stk) r.NoError(err) // write a number of buckets into stateDB for _, e := range tests { vb := NewVoteBucket(e.cand, e.owner, e.amount, e.duration, time.Now(), true) index, err := putBucketAndIndex(sm, vb) r.NoError(err) r.Equal(index, vb.Index) } // load candidates from stateDB and verify ctx := genesis.WithGenesisContext(context.Background(), genesis.Default) v, err := stk.Start(ctx, sm) sm.WriteView(protocolID, v) r.NoError(err) _, ok := v.(*ViewData) r.True(ok) csm, err := NewCandidateStateManager(sm, false) r.NoError(err) // load a number of candidates for _, e := range testCandidates { r.NoError(csm.Upsert(e.d)) } r.NoError(csm.Commit()) // test revise r.False(stk.voteReviser.isCacheExist(genesis.Default.GreenlandBlockHeight)) r.False(stk.voteReviser.isCacheExist(genesis.Default.HawaiiBlockHeight)) r.NoError(stk.voteReviser.Revise(csm, genesis.Default.HawaiiBlockHeight)) r.NoError(csm.Commit()) r.False(stk.voteReviser.isCacheExist(genesis.Default.GreenlandBlockHeight)) // verify self-stake and total votes match result, ok := stk.voteReviser.result(genesis.Default.HawaiiBlockHeight) r.True(ok) r.Equal(len(testCandidates), len(result)) cv := genesis.Default.Staking.VoteWeightCalConsts for _, c := range result { cand := csm.GetByOwner(c.Owner) r.True(c.Equal(cand)) for _, cand := range testCandidates { if address.Equal(cand.d.Owner, c.Owner) { r.Equal(0, cand.d.SelfStake.Cmp(c.SelfStake)) } } for _, v := range tests { if address.Equal(v.cand, c.Owner) && v.index != c.SelfStakeBucketIdx { bucket, err := getBucket(csm, v.index) r.NoError(err) total := calculateVoteWeight(cv, bucket, false) bucket, err = getBucket(csm, c.SelfStakeBucketIdx) r.NoError(err) total.Add(total, calculateVoteWeight(cv, bucket, true)) r.Equal(0, total.Cmp(c.Votes)) break } } } }
explode_data.jsonl/2014
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1647 }
[ 2830, 3393, 41412, 693, 2682, 261, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1373, 7121, 1155, 692, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 72023, 1669, 1273, 1999, 7121, 11571, 83132, 62100, 340, 197, 6878, 1848, 1669, 1525...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestMatchingConstraints(t *testing.T) { cases := []struct { constraints types.Constraints tags []string expected bool }{ // simple test: must match { constraints: types.Constraints{ { Key: "tag", MustMatch: true, Regex: "us-east-1", }, }, tags: []string{ "us-east-1", }, expected: true, }, // simple test: must match but does not match { constraints: types.Constraints{ { Key: "tag", MustMatch: true, Regex: "us-east-1", }, }, tags: []string{ "us-east-2", }, expected: false, }, // simple test: must not match { constraints: types.Constraints{ { Key: "tag", MustMatch: false, Regex: "us-east-1", }, }, tags: []string{ "us-east-1", }, expected: false, }, // complex test: globbing { constraints: types.Constraints{ { Key: "tag", MustMatch: true, Regex: "us-east-*", }, }, tags: []string{ "us-east-1", }, expected: true, }, // complex test: multiple constraints { constraints: types.Constraints{ { Key: "tag", MustMatch: true, Regex: "us-east-*", }, { Key: "tag", MustMatch: false, Regex: "api", }, }, tags: []string{ "api", "us-east-1", }, expected: false, }, } for i, c := range cases { provider := myProvider{ BaseProvider{ Constraints: c.constraints, }, nil, } actual, _ := provider.MatchConstraints(c.tags) if actual != c.expected { t.Fatalf("test #%v: expected %t, got %t, for %#v", i, c.expected, actual, c.constraints) } } }
explode_data.jsonl/64892
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 913 }
[ 2830, 3393, 64430, 12925, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 37203, 7458, 4494, 4801, 7458, 198, 197, 3244, 2032, 286, 3056, 917, 198, 197, 42400, 262, 1807, 198, 197, 59403, 197, 197, 322, 4285, 127...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestShadowedFlag(t *testing.T) { if ver, _ := goversion.Parse(runtime.Version()); ver.Major >= 0 && !ver.AfterOrEqual(goversion.GoVersion{Major: 1, Minor: 9, Rev: -1}) { return } withTestProcess("testshadow", t, func(p *proc.Target, fixture protest.Fixture) { assertNoError(p.Continue(), t, "Continue") scope, err := proc.GoroutineScope(p, p.CurrentThread()) assertNoError(err, t, "GoroutineScope") locals, err := scope.LocalVariables(normalLoadConfig) assertNoError(err, t, "LocalVariables") foundShadowed := false foundNonShadowed := false for _, v := range locals { if v.Flags&proc.VariableShadowed != 0 { if v.Name != "a" { t.Errorf("wrong shadowed variable %s", v.Name) } foundShadowed = true if n, _ := constant.Int64Val(v.Value); n != 0 { t.Errorf("wrong value for shadowed variable a: %d", n) } } else { if v.Name != "a" { t.Errorf("wrong non-shadowed variable %s", v.Name) } foundNonShadowed = true if n, _ := constant.Int64Val(v.Value); n != 1 { t.Errorf("wrong value for non-shadowed variable a: %d", n) } } } if !foundShadowed { t.Error("could not find any shadowed variable") } if !foundNonShadowed { t.Error("could not find any non-shadowed variable") } }) }
explode_data.jsonl/56292
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 529 }
[ 2830, 3393, 23667, 291, 12135, 1155, 353, 8840, 836, 8, 341, 743, 2739, 11, 716, 1669, 728, 4366, 8937, 89467, 35842, 13426, 2739, 1321, 3035, 2604, 220, 15, 1009, 753, 423, 36892, 2195, 2993, 3268, 859, 1325, 67131, 5637, 90, 34475, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func Test(t *testing.T) { segs := strings.Split(*testPort, ",") for _, seg := range segs { Suite(&clientTestSuite{port: seg}) } TestingT(t) }
explode_data.jsonl/74665
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 65 }
[ 2830, 3393, 1155, 353, 8840, 836, 8, 341, 84686, 5857, 1669, 9069, 19823, 4071, 1944, 7084, 11, 3670, 1138, 2023, 8358, 4810, 1669, 2088, 4810, 82, 341, 197, 7568, 9302, 2099, 2972, 2271, 28000, 90, 403, 25, 4810, 3518, 197, 532, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func Test_Hoverfly_GetFilteredSimulation_ReturnBlankSimulation_IfThereIsNoMatch(t *testing.T) { RegisterTestingT(t) unit := NewHoverflyWithConfiguration(&Configuration{}) unit.Simulation.AddPair(&models.RequestMatcherResponsePair{ RequestMatcher: models.RequestMatcher{ Destination: []models.RequestFieldMatchers{ { Matcher: matchers.Exact, Value: "foo.com", }, }, }, }) simulation, err := unit.GetFilteredSimulation("test-(.+).com") Expect(err).To(BeNil()) Expect(simulation.RequestResponsePairs).To(HaveLen(0)) Expect(simulation.GlobalActions.Delays).To(HaveLen(0)) Expect(simulation.GlobalActions.DelaysLogNormal).To(HaveLen(0)) Expect(simulation.MetaView.SchemaVersion).To(Equal("v5")) Expect(simulation.MetaView.HoverflyVersion).To(MatchRegexp(`v\d+.\d+.\d+(-rc.\d)*`)) Expect(simulation.MetaView.TimeExported).ToNot(BeNil()) }
explode_data.jsonl/45373
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 346 }
[ 2830, 3393, 2039, 1975, 21642, 13614, 67310, 64554, 53316, 22770, 64554, 62, 2679, 3862, 3872, 2753, 8331, 1155, 353, 8840, 836, 8, 341, 79096, 16451, 51, 1155, 692, 81189, 1669, 1532, 34379, 21642, 2354, 7688, 2099, 7688, 6257, 692, 8118...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNumberConversionsFailForNonNumbers(t *testing.T) { t.Parallel() ctx := NewIsolate().NewContext() res, err := ctx.Eval(`undefined`, "test.js") if err != nil { t.Fatal(err) } if res.IsKind(KindNumber) { t.Errorf("Expected %q to NOT be a number kind, but it is: %q", res, res.kindMask) } if f64 := res.Float64(); !math.IsNaN(f64) { t.Errorf("Expected %q to be NaN, but got %f", res, f64) } if i64 := res.Int64(); i64 != 0 { t.Errorf("Expected %q to eq 0, but got %d", res, i64) } }
explode_data.jsonl/81541
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 223 }
[ 2830, 3393, 2833, 1109, 28290, 19524, 2461, 8121, 27237, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 20985, 1669, 1532, 3872, 33066, 1005, 3564, 1972, 2822, 10202, 11, 1848, 1669, 5635, 5142, 831, 5809, 9614, 7808, 330, 1944, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test_HTTPClientFactory_Suite(t *testing.T) { timeoutS := uint(2) setupForTest := func(listenerCert, listenerKey string, trustSystemCerts bool) (*HorizonConfig, string, string) { dir, listener := setupTesting(listenerCert, listenerKey, trustSystemCerts, t) t.Logf("listening on %s", listener.Addr().String()) cfg, err := Read(filepath.Join(dir, "config.json")) if err != nil { t.Error(nil) } return cfg, strings.Split(listener.Addr().String(), ":")[1], dir } cfg, port, dir := setupForTest(collaboratorsTestCert, collaboratorsTestKey, false) t.Run("HTTP client rejects trusted cert for wrong domain", func(t *testing.T) { client := cfg.Collaborators.HTTPClientFactory.NewHTTPClient(&timeoutS) // this'll fail b/c we're making a request to 127.0.1.1 but that isn't the CN or subjectAltName IP in the cert _, err := client.Get(fmt.Sprintf("https://%s:%s/boosh", "127.0.1.1", port)) if err == nil { t.Error("Expected TLS error for sending request to wrong domain") } }) t.Run("HTTP client accepts trusted cert for right domain", func(t *testing.T) { client := cfg.Collaborators.HTTPClientFactory.NewHTTPClient(&timeoutS) // all of these should pass b/c they are the subjectAltNames of the cert (either names or IPs) note that Golang doesn't verify the CA of the cert if it's localhost or an IP for _, dom := range []string{listenOn, "localhost"} { resp, err := client.Get(fmt.Sprintf("https://%s:%s/boosh", dom, port)) if err != nil { t.Error("Unxpected error sending request to trusted domain", err) } if resp != nil { if resp.StatusCode != 200 { t.Errorf("Unexpected error from HTTP request (wanted 200). HTTP response status code: %v", resp.StatusCode) } content, err := ioutil.ReadAll(resp.Body) if err != nil { t.Error("Unexpected error reading response from HTTP server", err) } if string(content) != "yup" { t.Error("Unexpected returned content from test") } } } cleanup(dir, t) }) t.Run("HTTP client rejects untrusted cert", func(t *testing.T) { // need a new config and setup cfg, port, dir := setupForTest(collaboratorsOtherTestCert, collaboratorsOtherTestKey, false) client := cfg.Collaborators.HTTPClientFactory.NewHTTPClient(&timeoutS) // this should fail b/c even though we're sending a request to a trusted domain, the CA trust doesn't contain the cert _, err := client.Get(fmt.Sprintf("https://%s:%s/boosh", listenOn, port)) if err == nil { t.Error("Expected TLS error for sending request to untrusted domain") } cleanup(dir, t) }) t.Run("HTTP client trusts system certs", func(t *testing.T) { // important that the cert and key match for setup to succeed even though that's not what we're testing setupForTest(collaboratorsTestCert, collaboratorsTestKey, true) cleanup(dir, t) // if we got this far we're ok (an error gets raised during setup if the system ca certs couldn't be loaded) }) }
explode_data.jsonl/70614
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1020 }
[ 2830, 3393, 34954, 2959, 4153, 1098, 9302, 1155, 353, 8840, 836, 8, 341, 78395, 50, 1669, 2622, 7, 17, 692, 84571, 2461, 2271, 1669, 2915, 40610, 36934, 11, 11446, 1592, 914, 11, 6950, 2320, 34, 15546, 1807, 8, 4609, 39601, 16973, 264...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_sequencer_getNACKSeqNo(t *testing.T) { type args struct { seqNo []uint16 } type fields struct { input []uint16 offset uint16 } tests := []struct { name string fields fields args args want []uint16 }{ { name: "Should get correct seq numbers", fields: fields{ input: []uint16{2, 3, 4, 7, 8}, offset: 5, }, args: args{ seqNo: []uint16{4 + 5, 5 + 5, 8 + 5}, }, want: []uint16{4, 8}, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { n := newSequencer() for _, i := range tt.fields.input { n.push(i, i+tt.fields.offset, 123, 3, true) } g := n.getSeqNoPairs(tt.args.seqNo) var got []uint16 for _, sn := range g { got = append(got, sn.getSourceSeqNo()) } if !reflect.DeepEqual(got, tt.want) { t.Errorf("getSeqNoPairs() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/15571
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 445 }
[ 2830, 3393, 3453, 446, 19529, 3062, 45, 4032, 20183, 2753, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 78561, 2753, 3056, 2496, 16, 21, 198, 197, 532, 13158, 5043, 2036, 341, 197, 22427, 220, 3056, 2496, 16, 21, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestConfiguration(t *testing.T) { for _, envCmd := range []cli.Command{ *getConfigurationCommand(), *getVersionCommand(), } { assert.Equal(t, envCmd.Category, "Configuration") assert.NotEmpty(t, envCmd.Name) assert.NotEmpty(t, envCmd.Usage) } }
explode_data.jsonl/61891
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 98 }
[ 2830, 3393, 7688, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 6105, 15613, 1669, 2088, 3056, 19521, 12714, 515, 197, 197, 9, 455, 7688, 4062, 3148, 197, 197, 9, 455, 5637, 4062, 3148, 197, 92, 341, 197, 6948, 12808, 1155, 11, 6105, 15...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStudentTProbs(t *testing.T) { src := rand.New(rand.NewSource(1)) for _, test := range []struct { nu float64 mu []float64 sigma *mat.SymDense x [][]float64 probs []float64 }{ { nu: 3, mu: []float64{0, 0}, sigma: mat.NewSymDense(2, []float64{1, 0, 0, 1}), x: [][]float64{ {0, 0}, {1, -1}, {3, 4}, {-1, -2}, }, // Outputs compared with WolframAlpha. probs: []float64{ 0.159154943091895335768883, 0.0443811199724279860006777747927, 0.0005980371870904696541052658, 0.01370560783418571283428283, }, }, { nu: 4, mu: []float64{2, -3}, sigma: mat.NewSymDense(2, []float64{8, -1, -1, 5}), x: [][]float64{ {0, 0}, {1, -1}, {3, 4}, {-1, -2}, {2, -3}, }, // Outputs compared with WolframAlpha. probs: []float64{ 0.007360810111491788657953608191001, 0.0143309905845607117740440592999, 0.0005307774290578041397794096037035009801668903, 0.0115657422475668739943625904793879, 0.0254851872062589062995305736215, }, }, } { s, ok := NewStudentsT(test.mu, test.sigma, test.nu, src) if !ok { t.Fatal("bad test") } for i, x := range test.x { xcpy := make([]float64, len(x)) copy(xcpy, x) p := s.Prob(x) if !floats.Same(x, xcpy) { t.Errorf("X modified during call to prob, %v, %v", x, xcpy) } if !floats.EqualWithinAbsOrRel(p, test.probs[i], 1e-10, 1e-10) { t.Errorf("Probability mismatch. X = %v. Got %v, want %v.", x, p, test.probs[i]) } } } }
explode_data.jsonl/3126
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 858 }
[ 2830, 3393, 14096, 51, 1336, 1279, 1155, 353, 8840, 836, 8, 341, 41144, 1669, 10382, 7121, 37595, 7121, 3608, 7, 16, 1171, 2023, 8358, 1273, 1669, 2088, 3056, 1235, 341, 197, 197, 8933, 262, 2224, 21, 19, 198, 197, 2109, 84, 262, 30...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestPrescalingWaitsForBackends(t *testing.T) { // Create 3 stacks with traffic 0.0, 50.0 & 50.0 // Switch traffic to be 0.0, 30, 70.0 // Verify that actual traffic changes correctly t.Parallel() stacksetName := "stackset-prescale-backends-wait" specFactory := NewTestStacksetSpecFactory(stacksetName).Ingress().StackGC(3, 15).Replicas(3) // create stack with 3 replicas firstStack := "v1" spec := specFactory.Create(firstStack) err := createStackSet(stacksetName, 1, spec) require.NoError(t, err) _, err = waitForStack(t, stacksetName, firstStack) require.NoError(t, err) // create second stack with 3 replicas secondStack := "v2" spec = specFactory.Create(secondStack) err = updateStackset(stacksetName, spec) require.NoError(t, err) _, err = waitForStack(t, stacksetName, secondStack) require.NoError(t, err) // create third stack with 3 replicas thirdStack := "v3" spec = specFactory.Create(thirdStack) err = updateStackset(stacksetName, spec) require.NoError(t, err) _, err = waitForStack(t, stacksetName, thirdStack) require.NoError(t, err) _, err = waitForIngress(t, stacksetName) require.NoError(t, err) // switch traffic so that all three stacks are receiving 0%, 50% & 50% traffic and verify traffic has actually switched fullFirstStack := fmt.Sprintf("%s-%s", stacksetName, firstStack) fullSecondStack := fmt.Sprintf("%s-%s", stacksetName, secondStack) fullThirdStack := fmt.Sprintf("%s-%s", stacksetName, thirdStack) desiredTraffic := map[string]float64{ fullFirstStack: 0, fullSecondStack: 50, fullThirdStack: 50, } err = setDesiredTrafficWeightsIngress(stacksetName, desiredTraffic) require.NoError(t, err) err = trafficWeightsUpdatedIngress(t, stacksetName, weightKindActual, desiredTraffic, nil).withTimeout(time.Minute * 4).await() require.NoError(t, err) // switch traffic so that all three stacks are receiving 0%, 30% & 70% traffic respectively desiredTraffic = map[string]float64{ fullFirstStack: 0, fullSecondStack: 30, fullThirdStack: 70, } err = setDesiredTrafficWeightsIngress(stacksetName, desiredTraffic) require.NoError(t, err) err = trafficWeightsUpdatedIngress(t, stacksetName, weightKindActual, desiredTraffic, func(actualTraffic map[string]float64) error { // err out if the traffic for any of the stacks is outside of the expected range if actualTraffic[fullFirstStack] > 0 { return fmt.Errorf("%v traffic not exactly %v", actualTraffic[fullFirstStack], 0) } if actualTraffic[fullSecondStack] > 50 || actualTraffic[fullSecondStack] < 30 { return fmt.Errorf("%v traffic not between %v and %v", actualTraffic[fullSecondStack], 30, 50) } if actualTraffic[fullThirdStack] > 70 || actualTraffic[fullThirdStack] < 50 { return fmt.Errorf("%v traffic not between %v and %v", actualTraffic[fullThirdStack], 50, 70) } return nil }).withTimeout(time.Minute * 4).await() require.NoError(t, err) }
explode_data.jsonl/4349
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1002 }
[ 2830, 3393, 14367, 81552, 54, 56479, 2461, 3707, 1412, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 220, 18, 39950, 448, 9442, 220, 15, 13, 15, 11, 220, 20, 15, 13, 15, 609, 220, 20, 15, 13, 15, 198, 197, 322, 15586, 9442, 311,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestDescribeLoadBalancerOnDelete(t *testing.T) { awsServices := NewFakeAWSServices() c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices) awsServices.elb.expectDescribeLoadBalancers("aid") c.EnsureLoadBalancerDeleted(TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}}) }
explode_data.jsonl/12857
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 74785, 5879, 93825, 1925, 6435, 1155, 353, 8840, 836, 8, 341, 197, 8635, 11025, 1669, 1532, 52317, 14419, 1220, 2161, 741, 1444, 11, 716, 1669, 501, 14419, 3540, 52178, 51442, 68587, 10937, 9752, 60, 3975, 31521, 11025, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPaymentFailsIfNoAssetSpecified(t *testing.T) { kp0 := newKeypair0() sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) payment := Payment{ Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", Amount: "10", } _, err := NewTransaction( TransactionParams{ SourceAccount: &sourceAccount, IncrementSequenceNum: true, Operations: []Operation{&payment}, BaseFee: MinBaseFee, Timebounds: NewInfiniteTimeout(), }, ) expectedErrMsg := "validation failed for *txnbuild.Payment operation: Field: Asset, Error: asset is undefined" require.EqualError(t, err, expectedErrMsg, "An asset is required") }
explode_data.jsonl/20659
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 304 }
[ 2830, 3393, 20188, 37, 6209, 2679, 2753, 16604, 8327, 1870, 1155, 353, 8840, 836, 8, 341, 16463, 79, 15, 1669, 501, 6608, 1082, 1310, 15, 741, 47418, 7365, 1669, 1532, 16374, 7365, 5969, 79, 15, 26979, 1507, 526, 21, 19, 7, 24, 21, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGoogleCAClient(t *testing.T) { os.Setenv("GKE_CLUSTER_URL", "https://container.googleapis.com/v1/projects/testproj/locations/us-central1-c/clusters/cluster1") defer func() { os.Unsetenv("GKE_CLUSTER_URL") }() testCases := map[string]struct { service mock.CAService expectedCert []string expectedErr string }{ "Valid certs": { service: mock.CAService{Certs: fakeCert, Err: nil}, expectedCert: fakeCert, expectedErr: "", }, "Error in response": { service: mock.CAService{Certs: nil, Err: fmt.Errorf("test failure")}, expectedCert: nil, expectedErr: "rpc error: code = Unknown desc = test failure", }, "Empty response": { service: mock.CAService{Certs: []string{}, Err: nil}, expectedCert: nil, expectedErr: "invalid response cert chain", }, } for id, tc := range testCases { // create a local grpc server s, err := mock.CreateServer(mockServerAddress, &tc.service) if err != nil { t.Fatalf("Test case [%s]: failed to create server: %v", id, err) } defer s.Stop() cli, err := NewGoogleCAClient(s.Address, false) if err != nil { t.Errorf("Test case [%s]: failed to create ca client: %v", id, err) } resp, err := cli.CSRSign(context.Background(), []byte{01}, fakeToken, 1) if err != nil { if err.Error() != tc.expectedErr { t.Errorf("Test case [%s]: error (%s) does not match expected error (%s)", id, err.Error(), tc.expectedErr) } } else { if tc.expectedErr != "" { t.Errorf("Test case [%s]: expect error: %s but got no error", id, tc.expectedErr) } else if !reflect.DeepEqual(resp, tc.expectedCert) { t.Errorf("Test case [%s]: resp: got %+v, expected %v", id, resp, tc.expectedCert) } } } }
explode_data.jsonl/32003
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 724 }
[ 2830, 3393, 14444, 92832, 1451, 1155, 353, 8840, 836, 8, 341, 25078, 4202, 3160, 445, 38, 3390, 77871, 8000, 497, 330, 2428, 1110, 3586, 19758, 905, 5457, 16, 39606, 12697, 30386, 14, 31309, 62431, 84081, 16, 1786, 55931, 14605, 14, 188...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestJoinsWithSelect(t *testing.T) { type result struct { ID uint PetID uint Name string } user := *GetUser("joins_with_select", Config{Pets: 2}) DB.Save(&user) var results []result DB.Table("users").Select("users.id, pets.id as pet_id, pets.name").Joins("left join pets on pets.user_id = users.id").Where("users.name = ?", "joins_with_select").Scan(&results) sort.Slice(results, func(i, j int) bool { return results[i].PetID > results[j].PetID }) sort.Slice(results, func(i, j int) bool { return user.Pets[i].ID > user.Pets[j].ID }) if len(results) != 2 || results[0].Name != user.Pets[0].Name || results[1].Name != user.Pets[1].Name { t.Errorf("Should find all two pets with Join select, got %+v", results) } }
explode_data.jsonl/20076
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 291 }
[ 2830, 3393, 22493, 1330, 2354, 3379, 1155, 353, 8840, 836, 8, 341, 13158, 1102, 2036, 341, 197, 29580, 262, 2622, 198, 197, 10025, 295, 915, 2622, 198, 197, 21297, 220, 914, 198, 197, 630, 19060, 1669, 353, 1949, 1474, 445, 7305, 1330...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_runUpdateCmd(t *testing.T) { fakeKeyName1 := "runUpdateCmd_Key1" fakeKeyName2 := "runUpdateCmd_Key2" cmd := updateKeyCommand() // fails because it requests a password assert.EqualError(t, runUpdateCmd(cmd, []string{fakeKeyName1}), "EOF") // try again mockIn, _, _ := tests.ApplyMockIO(cmd) mockIn.Reset("pass1234\n") assert.EqualError(t, runUpdateCmd(cmd, []string{fakeKeyName1}), "Key runUpdateCmd_Key1 not found") // Prepare a key base // Now add a temporary keybase kbHome, cleanUp1 := tests.NewTestCaseDir(t) defer cleanUp1() viper.Set(flags.FlagHome, kbHome) kb, err := NewKeyBaseFromHomeFlag() assert.NoError(t, err) _, err = kb.CreateAccount(fakeKeyName1, tests.TestMnemonic, "", "", 0, 0) assert.NoError(t, err) _, err = kb.CreateAccount(fakeKeyName2, tests.TestMnemonic, "", "", 0, 1) assert.NoError(t, err) // Try again now that we have keys // Incorrect key type mockIn.Reset("pass1234\nNew1234\nNew1234") err = runUpdateCmd(cmd, []string{fakeKeyName1}) assert.EqualError(t, err, "locally stored key required. Received: keys.offlineInfo") // TODO: Check for other type types? }
explode_data.jsonl/17108
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 429 }
[ 2830, 3393, 14007, 4289, 15613, 1155, 353, 8840, 836, 8, 341, 1166, 726, 58660, 16, 1669, 330, 6108, 4289, 15613, 35253, 16, 698, 1166, 726, 58660, 17, 1669, 330, 6108, 4289, 15613, 35253, 17, 1837, 25920, 1669, 2647, 1592, 4062, 2822, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProviderOnMiddlewares(t *testing.T) { entryPoints := []string{"web"} staticCfg := static.Configuration{ EntryPoints: map[string]*static.EntryPoint{ "web": { Address: ":80", }, }, } rtConf := runtime.NewConfig(dynamic.Configuration{ HTTP: &dynamic.HTTPConfiguration{ Services: map[string]*dynamic.Service{ "test@file": { LoadBalancer: &dynamic.ServersLoadBalancer{ Servers: []dynamic.Server{}, }, }, }, Routers: map[string]*dynamic.Router{ "router@file": { EntryPoints: []string{"web"}, Rule: "Host(`test`)", Service: "test@file", Middlewares: []string{"chain@file", "m1"}, }, "router@docker": { EntryPoints: []string{"web"}, Rule: "Host(`test`)", Service: "test@file", Middlewares: []string{"chain", "m1@file"}, }, }, Middlewares: map[string]*dynamic.Middleware{ "chain@file": { Chain: &dynamic.Chain{Middlewares: []string{"m1", "m2", "m1@file"}}, }, "chain@docker": { Chain: &dynamic.Chain{Middlewares: []string{"m1", "m2", "m1@file"}}, }, "m1@file": {AddPrefix: &dynamic.AddPrefix{Prefix: "/m1"}}, "m2@file": {AddPrefix: &dynamic.AddPrefix{Prefix: "/m2"}}, "m1@docker": {AddPrefix: &dynamic.AddPrefix{Prefix: "/m1"}}, "m2@docker": {AddPrefix: &dynamic.AddPrefix{Prefix: "/m2"}}, }, }, }) serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport, nil, nil) middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager) responseModifierFactory := responsemodifiers.NewBuilder(map[string]*runtime.MiddlewareInfo{}) chainBuilder := middleware.NewChainBuilder(staticCfg, nil, nil) routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory, chainBuilder) _ = routerManager.BuildHandlers(context.Background(), entryPoints, false) assert.Equal(t, []string{"chain@file", "m1@file"}, rtConf.Routers["router@file"].Middlewares) assert.Equal(t, []string{"m1@file", "m2@file", "m1@file"}, rtConf.Middlewares["chain@file"].Chain.Middlewares) assert.Equal(t, []string{"chain@docker", "m1@file"}, rtConf.Routers["router@docker"].Middlewares) assert.Equal(t, []string{"m1@docker", "m2@docker", "m1@file"}, rtConf.Middlewares["chain@docker"].Chain.Middlewares) }
explode_data.jsonl/25183
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 986 }
[ 2830, 3393, 5179, 1925, 43935, 37903, 1155, 353, 8840, 836, 8, 341, 48344, 11411, 1669, 3056, 917, 4913, 2911, 63159, 6977, 42467, 1669, 1099, 17334, 515, 197, 197, 5874, 11411, 25, 2415, 14032, 8465, 1978, 22330, 2609, 515, 298, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_unescapePathParams(t *testing.T) { type Params struct { ProjectName string RepositoryName string } str := "params" type args struct { params interface{} fieldNames []string } tests := []struct { name string args args wantErr bool }{ {"non ptr", args{str, []string{"RepositoryName"}}, true}, {"non struct", args{&str, []string{"RepositoryName"}}, true}, {"ptr of struct", args{&Params{}, []string{"RepositoryName"}}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if err := unescapePathParams(tt.args.params, tt.args.fieldNames...); (err != nil) != tt.wantErr { t.Errorf("unescapePathParams() error = %v, wantErr %v", err, tt.wantErr) } }) } t.Run("ok", func(t *testing.T) { params := Params{ProjectName: "library", RepositoryName: "hello%2Fworld"} unescapePathParams(&params, "RepositoryName") if params.RepositoryName != "hello/world" { t.Errorf("unescapePathParams() not unescape RepositoryName field") } }) }
explode_data.jsonl/7936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 406 }
[ 2830, 3393, 4907, 12998, 1820, 4870, 1155, 353, 8840, 836, 8, 341, 13158, 34352, 2036, 341, 197, 197, 7849, 675, 262, 914, 198, 197, 197, 4624, 675, 914, 198, 197, 630, 11355, 1669, 330, 3519, 1837, 13158, 2827, 2036, 341, 197, 25856,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUint8(t *testing.T) { val := uint8(1) m := map[string]interface{}{"value": val, "nothing": nil} assert.Equal(t, val, New(m).Get("value").Uint8()) assert.Equal(t, val, New(m).Get("value").MustUint8()) assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) assert.Panics(t, func() { New(m).Get("age").MustUint8() }) }
explode_data.jsonl/23464
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 175 }
[ 2830, 3393, 21570, 23, 1155, 353, 8840, 836, 8, 1476, 19302, 1669, 2622, 23, 7, 16, 340, 2109, 1669, 2415, 14032, 31344, 6257, 4913, 957, 788, 1044, 11, 330, 41212, 788, 2092, 532, 6948, 12808, 1155, 11, 1044, 11, 1532, 1255, 568, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInputService6ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1(t *testing.T) { sess := session.New() svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService6TestShapeInputService6TestCaseOperation1Input{ Ascending: aws.String("true"), Config: &InputService6TestShapeStructType{ A: aws.String("one"), B: aws.String("two"), }, PageToken: aws.String("bar"), PipelineId: aws.String("foo"), } req, _ := svc.InputService6TestCaseOperation1Request(input) r := req.HTTPRequest // build request restjson.Build(req) assert.NoError(t, req.Error) // assert body assert.NotNil(t, r.Body) body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) // assert headers }
explode_data.jsonl/8008
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 368 }
[ 2830, 3393, 2505, 1860, 21, 20689, 2271, 10301, 4971, 2859, 917, 4870, 3036, 5370, 5444, 4207, 16, 1155, 353, 8840, 836, 8, 341, 1903, 433, 1669, 3797, 7121, 741, 1903, 7362, 1669, 1532, 2505, 1860, 21, 20689, 2271, 57223, 11, 609, 86...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSplit(t *testing.T) { type splitDesc struct { script string word string sep string result string } tests := map[string]splitDesc{ "space": { script: "./testdata/split.sh", word: "a b c", sep: " ", result: "a\nb\nc\n", }, "pipes": { script: "./testdata/split.sh", word: "1|2|3", sep: "|", result: "1\n2\n3\n", }, "nosplit": { script: "./testdata/split.sh", word: "nospaces", sep: " ", result: "nospaces\n", }, "splitfunc": { script: "./testdata/splitfunc.sh", word: "hah", sep: "a", result: "h\nh\n", }, } for name, desc := range tests { t.Run(name, func(t *testing.T) { var output bytes.Buffer shell, cleanup := fixture.SetupShell(t) defer cleanup() shell.SetStdout(&output) err := shell.ExecFile(desc.script, "mock cmd name", desc.word, desc.sep) if err != nil { t.Fatalf("unexpected err: %s", err) } if output.String() != desc.result { t.Fatalf("got %q expected %q", output.String(), desc.result) } }) } }
explode_data.jsonl/78937
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 525 }
[ 2830, 3393, 20193, 1155, 353, 8840, 836, 8, 341, 13158, 6718, 11065, 2036, 341, 197, 86956, 914, 198, 197, 66298, 256, 914, 198, 197, 197, 28036, 262, 914, 198, 197, 9559, 914, 198, 197, 630, 78216, 1669, 2415, 14032, 60, 6960, 11065,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNodeStack_Push(t *testing.T) { stack := new(gost.NodeStack) elem := newVector(0) stack.Push(elem) stack.Push(elem) if stack.Size() != 2 { t.Error("Enqueue() on stack failed, change undetected") } }
explode_data.jsonl/40063
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 1955, 4336, 1088, 1116, 1155, 353, 8840, 836, 8, 341, 48227, 1669, 501, 3268, 535, 21714, 4336, 340, 68779, 1669, 501, 3781, 7, 15, 340, 48227, 34981, 28880, 340, 48227, 34981, 28880, 340, 743, 5611, 2465, 368, 961, 220, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestUpdateUser(t *testing.T) { th := Setup(t) defer th.TearDown() user := th.CreateUser() th.Client.Login(user.Email, user.Password) user.Nickname = "Joram Wilander" user.Roles = model.SystemUserRoleId user.LastPasswordUpdate = 123 ruser, _, err := th.Client.UpdateUser(user) require.NoError(t, err) CheckUserSanitization(t, ruser) require.Equal(t, "Joram Wilander", ruser.Nickname, "Nickname should update properly") require.Equal(t, model.SystemUserRoleId, ruser.Roles, "Roles should not update") require.NotEqual(t, 123, ruser.LastPasswordUpdate, "LastPasswordUpdate should not update") ruser.Email = th.GenerateTestEmail() _, resp, err := th.Client.UpdateUser(ruser) require.Error(t, err) CheckBadRequestStatus(t, resp) th.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) { ruser.Email = th.GenerateTestEmail() _, _, err = client.UpdateUser(user) require.NoError(t, err) }) ruser.Password = user.Password ruser, _, err = th.Client.UpdateUser(ruser) require.NoError(t, err) CheckUserSanitization(t, ruser) ruser.Id = "junk" _, resp, err = th.Client.UpdateUser(ruser) require.Error(t, err) CheckBadRequestStatus(t, resp) ruser.Id = model.NewId() _, resp, err = th.Client.UpdateUser(ruser) require.Error(t, err) CheckForbiddenStatus(t, resp) r, err := th.Client.DoAPIPut("/users/"+ruser.Id, "garbage") require.Error(t, err) require.Equal(t, http.StatusBadRequest, r.StatusCode) session, _ := th.App.GetSession(th.Client.AuthToken) session.IsOAuth = true th.App.AddSessionToCache(session) ruser.Id = user.Id ruser.Email = th.GenerateTestEmail() _, resp, err = th.Client.UpdateUser(ruser) require.Error(t, err) CheckForbiddenStatus(t, resp) th.Client.Logout() _, resp, err = th.Client.UpdateUser(user) require.Error(t, err) CheckUnauthorizedStatus(t, resp) th.LoginBasic() _, resp, err = th.Client.UpdateUser(user) require.Error(t, err) CheckForbiddenStatus(t, resp) th.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) { _, _, err = client.UpdateUser(user) require.NoError(t, err) }) }
explode_data.jsonl/47505
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 819 }
[ 2830, 3393, 4289, 1474, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 340, 16867, 270, 836, 682, 4454, 2822, 19060, 1669, 270, 7251, 1474, 741, 70479, 11716, 32499, 4277, 24066, 11, 1196, 25690, 692, 19060, 2067, 41052, 284, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCompleteTodo(t *testing.T) { r := new(MemoryRepository) s := NewService(r) var userID int = 1 addedTodo, err := r.addTodo("todo1", nil, userID, defaultPriority()) if err != nil { t.Fatalf("failed to add todo") } err = s.CompleteTodo(userID, addedTodo.id) if err != nil { t.Fatalf(err.Error()) } completedTodo, err := r.getTodo(userID, addedTodo.id) if err != nil { t.Fatalf(err.Error()) } if completedTodo.completed == nil { t.Fatalf("expected completed todo, got incomplete todo") } }
explode_data.jsonl/21373
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 213 }
[ 2830, 3393, 12548, 24176, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 501, 3189, 4731, 4624, 340, 1903, 1669, 1532, 1860, 2601, 692, 2405, 35204, 526, 284, 220, 16, 198, 12718, 291, 24176, 11, 1848, 1669, 435, 1364, 24176, 445, 17370, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestParseOid(t *testing.T) { tests := []ParseOidTest{ ParseOidTest{"1.3.6.1.4.1.2636.3.2.3.1.20", ".1.3.6.1.4.1.2636.3.2.3.1.20", false}, ParseOidTest{".1.3.127.128", ".1.3.127.128", false}, ParseOidTest{"1.3", ".1.3", false}, ParseOidTest{".1.3.127.128.129", ".1.3.127.128.129", false}, ParseOidTest{"", ".", false}, ParseOidTest{".", ".", false}, ParseOidTest{"Donald Duck", "", true}, } for _, test := range tests { oid, err := ParseOid(test.ToParse) if (err != nil) != test.ExpectFail { t.Errorf("ParseOid '%s' got error '%s', expected '%s'", test.ToParse, err, test.ExpectFail) } if !test.ExpectFail { if fmt.Sprintf("%s", oid) != test.ExpectedCanonForm { t.Errorf("ParseOid '%s' got '%s', expected '%s'", test.ToParse, oid, test.ExpectedCanonForm) } } } }
explode_data.jsonl/71787
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 403 }
[ 2830, 3393, 14463, 46, 307, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 14463, 46, 307, 2271, 515, 197, 10025, 2583, 46, 307, 2271, 4913, 16, 13, 18, 13, 21, 13, 16, 13, 19, 13, 16, 13, 17, 21, 18, 21, 13, 18, 13, 17, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestVariance(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 ans float64 }{ { x: []float64{8, -3, 7, 8, -4}, weights: nil, ans: 37.7, }, { x: []float64{8, -3, 7, 8, -4}, weights: []float64{1, 1, 1, 1, 1}, ans: 37.7, }, { x: []float64{8, 3, 7, 8, 4}, weights: []float64{2, 1, 2, 1, 1}, ans: 4.2857142857142865, }, { x: []float64{1, 4, 9}, weights: []float64{1, 1.5, 1}, ans: 13.142857142857146, }, { x: []float64{1, 2, 3}, weights: []float64{1, 1.5, 1}, ans: .8, }, } { variance := Variance(test.x, test.weights) if math.Abs(variance-test.ans) > 1e-14 { t.Errorf("Variance mismatch case %d. Expected %v, Found %v", i, test.ans, variance) } } if !panics(func() { Variance(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Variance did not panic with x, weights length mismatch") } }
explode_data.jsonl/1787
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 526 }
[ 2830, 3393, 53, 36905, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 1273, 1669, 2088, 3056, 1235, 341, 197, 10225, 981, 3056, 3649, 21, 19, 198, 197, 197, 13327, 3056, 3649, 21, 19, 198, 197, 43579, 257, 2224, 21, 19, 198, 197, 59...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInterpretPath(t *testing.T) { t.Parallel() for _, domain := range common.AllPathDomainsByIdentifier { t.Run(fmt.Sprintf("valid: %s", domain.Name()), func(t *testing.T) { inter := parseCheckAndInterpret(t, fmt.Sprintf( ` let x = /%s/random `, domain.Identifier(), ), ) assert.Equal(t, interpreter.PathValue{ Domain: domain, Identifier: "random", }, inter.Globals["x"].Value, ) }) } }
explode_data.jsonl/51790
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 261 }
[ 2830, 3393, 3306, 8043, 1820, 1155, 353, 8840, 836, 8, 1476, 3244, 41288, 7957, 2822, 2023, 8358, 7947, 1669, 2088, 4185, 16764, 1820, 74713, 2720, 7189, 1476, 197, 3244, 16708, 28197, 17305, 445, 1891, 25, 1018, 82, 497, 7947, 2967, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMutex(t *testing.T) { m := new(Mutex); c := make(chan bool); for i := 0; i < 10; i++ { go HammerMutex(m, c); } for i := 0; i < 10; i++ { <-c; } }
explode_data.jsonl/82482
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 38099, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 501, 3189, 9371, 317, 1444, 1669, 1281, 35190, 1807, 317, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 16, 15, 26, 600, 1027, 341, 197, 30680, 36894, 38099, 1255, 11, 272,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_ConvertResourceToDeployableResource(t *testing.T) { g := NewGomegaWithT(t) ctx := context.Background() test, err := makeTestResolver() g.Expect(err).ToNot(HaveOccurred()) rg := createResourceGroup() g.Expect(test.client.Create(ctx, rg)).To(Succeed()) account := createDummyResource() g.Expect(test.client.Create(ctx, account)).To(Succeed()) resource, err := reflecthelpers.ConvertResourceToDeployableResource(ctx, test.resolver, account) g.Expect(err).ToNot(HaveOccurred()) rgResource, ok := resource.(*genruntime.ResourceGroupResource) g.Expect(ok).To(BeTrue()) g.Expect("myrg").To(Equal(rgResource.ResourceGroup())) g.Expect("azureName").To(Equal(rgResource.Spec().GetName())) g.Expect("2021-01-01").To(Equal(rgResource.Spec().GetAPIVersion())) g.Expect(string(batch.BatchAccountsSpecTypeMicrosoftBatchBatchAccounts)).To(Equal(rgResource.Spec().GetType())) }
explode_data.jsonl/45825
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 321 }
[ 2830, 3393, 15100, 1621, 4783, 1249, 69464, 480, 4783, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 1532, 38, 32696, 2354, 51, 1155, 340, 20985, 1669, 2266, 19047, 2822, 18185, 11, 1848, 1669, 1281, 2271, 18190, 741, 3174, 81893, 3964, 568...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestImportTestdata(t *testing.T) { // This package only handles gc export data. if runtime.Compiler != "gc" { t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) } tmpdir := mktmpdir(t) defer os.RemoveAll(tmpdir) compile(t, "testdata", "exports.go", filepath.Join(tmpdir, "testdata")) if pkg := testPath(t, "./testdata/exports", tmpdir); pkg != nil { // The package's Imports list must include all packages // explicitly imported by exports.go, plus all packages // referenced indirectly via exported objects in exports.go. // With the textual export format, the list may also include // additional packages that are not strictly required for // import processing alone (they are exported to err "on // the safe side"). // TODO(gri) update the want list to be precise, now that // the textual export data is gone. got := fmt.Sprint(pkg.Imports()) for _, want := range []string{"go/ast", "go/token"} { if !strings.Contains(got, want) { t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want) } } } }
explode_data.jsonl/30560
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 369 }
[ 2830, 3393, 11511, 2271, 691, 1155, 353, 8840, 836, 8, 341, 197, 322, 1096, 6328, 1172, 13469, 22122, 7485, 821, 624, 743, 15592, 35952, 961, 330, 20669, 1, 341, 197, 3244, 57776, 69, 445, 20669, 51614, 14185, 537, 2500, 320, 33620, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestTxSimulatorUnsupportedTx(t *testing.T) { testEnv := testEnvsMap[levelDBtestEnvName] testEnv.init(t, "testtxsimulatorunsupportedtx", nil) defer testEnv.cleanup() txMgr := testEnv.getTxMgr() populateCollConfigForTest(t, txMgr.(*LockBasedTxMgr), []collConfigkey{ {"ns1", "coll1"}, {"ns1", "coll2"}, {"ns1", "coll3"}, {"ns1", "coll4"}, }, version.NewHeight(1, 1)) simulator, _ := txMgr.NewTxSimulator("txid1") err := simulator.SetState("ns", "key", []byte("value")) assert.NoError(t, err) _, err = simulator.GetPrivateDataRangeScanIterator("ns1", "coll1", "startKey", "endKey") _, ok := err.(*txmgr.ErrUnsupportedTransaction) assert.True(t, ok) simulator, _ = txMgr.NewTxSimulator("txid2") _, err = simulator.GetPrivateDataRangeScanIterator("ns1", "coll1", "startKey", "endKey") assert.NoError(t, err) err = simulator.SetState("ns", "key", []byte("value")) _, ok = err.(*txmgr.ErrUnsupportedTransaction) assert.True(t, ok) queryOptions := map[string]interface{}{ "limit": int32(2), } simulator, _ = txMgr.NewTxSimulator("txid3") err = simulator.SetState("ns", "key", []byte("value")) assert.NoError(t, err) _, err = simulator.GetStateRangeScanIteratorWithMetadata("ns1", "startKey", "endKey", queryOptions) _, ok = err.(*txmgr.ErrUnsupportedTransaction) assert.True(t, ok) simulator, _ = txMgr.NewTxSimulator("txid4") _, err = simulator.GetStateRangeScanIteratorWithMetadata("ns1", "startKey", "endKey", queryOptions) assert.NoError(t, err) err = simulator.SetState("ns", "key", []byte("value")) _, ok = err.(*txmgr.ErrUnsupportedTransaction) assert.True(t, ok) }
explode_data.jsonl/63614
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 642 }
[ 2830, 3393, 31584, 14027, 10511, 41884, 31584, 1155, 353, 8840, 836, 8, 341, 18185, 14359, 1669, 1273, 1702, 11562, 2227, 64586, 3506, 1944, 14359, 675, 921, 18185, 14359, 8271, 1155, 11, 330, 1944, 3998, 14781, 10511, 81145, 3998, 497, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAbsSourceURL(t *testing.T) { jsonStr := sourceMapJSON jsonStr = strings.Replace(jsonStr, "/the/root", "", 1) jsonStr = strings.Replace(jsonStr, "one.js", "http://the/root/one.js", 1) jsonStr = strings.Replace(jsonStr, "two.js", "/another/root/two.js", 1) testAbsSourceURL(t, "", jsonStr) testAbsSourceURL(t, "http://path/to/map", jsonStr) }
explode_data.jsonl/43000
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 140 }
[ 2830, 3393, 27778, 3608, 3144, 1155, 353, 8840, 836, 8, 341, 30847, 2580, 1669, 2530, 2227, 5370, 198, 30847, 2580, 284, 9069, 20858, 9304, 2580, 11, 3521, 1782, 72074, 497, 7342, 220, 16, 340, 30847, 2580, 284, 9069, 20858, 9304, 2580,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParse_Timings_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteTimings = true fakeacc := &testutil.Accumulator{} var err error line := "timing:100|ms" err = s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } if len(s.timings) != 1 { t.Errorf("Should be 1 timing, found %d", len(s.timings)) } s.Gather(fakeacc) if len(s.timings) != 0 { t.Errorf("All timings should have been deleted, found %d", len(s.timings)) } }
explode_data.jsonl/14383
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 210 }
[ 2830, 3393, 14463, 1139, 318, 819, 57418, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 1532, 2271, 16635, 67, 741, 1903, 18872, 20217, 819, 284, 830, 198, 1166, 726, 4475, 1669, 609, 1944, 1314, 77538, 372, 10511, 16094, 2405, 1848, 1465, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestPreAuthorizeJsonFailure(t *testing.T) { runPreAuthorizeHandler( t, nil, "/authorize", regexp.MustCompile(`/authorize\z`), "not-json", 200, 500) }
explode_data.jsonl/2402
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 71 }
[ 2830, 3393, 4703, 37483, 5014, 17507, 1155, 353, 8840, 836, 8, 341, 56742, 4703, 37483, 3050, 1006, 197, 3244, 11, 2092, 11, 3521, 52022, 756, 197, 37013, 4580, 98626, 31813, 52022, 59, 89, 89746, 197, 197, 1, 1921, 56080, 756, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestPageInfo_getLimit(t *testing.T) { type fields struct { Size uint Num uint } tests := []struct { name string fields fields wantLimit int wantOffset int }{ { name: "case1", fields: fields{ Size: 0, Num: 0, }, wantLimit: 10, wantOffset: 0, }, { name: "case2", fields: fields{ Size: 10, Num: 5, }, wantLimit: 10, wantOffset: 40, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &PageInfo{ PageSize: tt.fields.Size, PageNum: tt.fields.Num, } gotLimit, gotOffset := s.GetLimit() if gotLimit != tt.wantLimit { t.Errorf("GetLimit() gotLimit = %v, want %v", gotLimit, tt.wantLimit) } if gotOffset != tt.wantOffset { t.Errorf("GetLimit() gotOffset = %v, want %v", gotOffset, tt.wantOffset) } }) } }
explode_data.jsonl/66392
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 422 }
[ 2830, 3393, 2665, 1731, 3062, 16527, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 91224, 2622, 198, 197, 197, 4651, 220, 2622, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 981, 914, 198, 197, 55276, 257, 5043...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCreateJob_ThroughPipelineID(t *testing.T) { store, _, pipeline := initWithPipeline(t) defer store.Close() manager := NewResourceManager(store) job := &api.Job{ Name: "j1", Enabled: true, PipelineSpec: &api.PipelineSpec{ PipelineId: pipeline.UUID, Parameters: []*api.Parameter{ {Name: "param1", Value: "world"}, }, }, } newJob, err := manager.CreateJob(job) expectedJob := &model.Job{ UUID: "123", DisplayName: "j1", Name: "j1", Namespace: "default", Enabled: true, CreatedAtInSec: 2, UpdatedAtInSec: 2, Conditions: "NO_STATUS", PipelineSpec: model.PipelineSpec{ PipelineId: pipeline.UUID, WorkflowSpecManifest: testWorkflow.ToStringForStore(), Parameters: "[{\"name\":\"param1\",\"value\":\"world\"}]", }, } assert.Nil(t, err) assert.Equal(t, expectedJob, newJob) }
explode_data.jsonl/28376
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 417 }
[ 2830, 3393, 4021, 12245, 62, 23857, 34656, 915, 1155, 353, 8840, 836, 8, 341, 57279, 11, 8358, 15301, 1669, 13864, 34656, 1155, 340, 16867, 3553, 10421, 741, 92272, 1669, 1532, 32498, 31200, 340, 68577, 1669, 609, 2068, 45293, 515, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLogOptions(t *testing.T) { fd, err := ioutil.TempFile("", "test") require.NoError(t, err) require.NoError(t, fd.Close()) defer os.Remove(fd.Name()) logOptions := []log.Option{ log.WithLevel("DEBUG"), log.WithFormat(log.JSONFormat), log.WithOutputFile(fd.Name()), } agentConfig, err := NewServerConfig(defaultValidConfig(), logOptions, false) require.NoError(t, err) logger := agentConfig.Log.(*log.Logger).Logger // defaultConfig() sets level to info, which should override DEBUG set above require.Equal(t, logrus.InfoLevel, logger.Level) // JSON Formatter and output file should be set from above require.IsType(t, &logrus.JSONFormatter{}, logger.Formatter) require.Equal(t, fd.Name(), logger.Out.(*os.File).Name()) }
explode_data.jsonl/51906
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 266 }
[ 2830, 3393, 2201, 3798, 1155, 353, 8840, 836, 8, 341, 61721, 11, 1848, 1669, 43144, 65009, 1703, 19814, 330, 1944, 1138, 17957, 35699, 1155, 11, 1848, 340, 17957, 35699, 1155, 11, 12414, 10421, 2398, 16867, 2643, 13270, 17609, 2967, 12367...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMatch_Or_And(t *testing.T) { var testData = []struct { rawYql string data map[string]interface{} out bool }{ { rawYql: `a=9 or c=1 and b!='1'`, data: map[string]interface{}{ "a": int64(10), "b": int64(1), "c": int64(1), }, out: false, }, { rawYql: `a=10 and b>'2' or c=1`, data: map[string]interface{}{ "a": int64(10), "b": int64(1), "c": int64(1), }, out: true, }, { rawYql: `a=10 or c=1 and b!='1'`, data: map[string]interface{}{ "a": int64(10), "b": int64(1), "c": int64(1), }, out: true, }, { rawYql: `a=10 and (c=1 or b!='1')`, data: map[string]interface{}{ "a": int64(10), "b": int64(1), "c": int64(1), }, out: true, }, { rawYql: `a=10 and (c=1 or b!='1') and d='123'`, data: map[string]interface{}{ "a": int64(10), "b": int64(1), "c": int64(1), "d": "123", }, out: true, }, } ass := assert.New(t) for _, tc := range testData { ok, err := Match(tc.rawYql, tc.data) ass.NoError(err) ass.Equal(tc.out, ok, "rawYql=%s||data=%+v", tc.rawYql, tc.data) } }
explode_data.jsonl/65943
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 649 }
[ 2830, 3393, 8331, 62, 2195, 93846, 1155, 353, 8840, 836, 8, 341, 2405, 67348, 284, 3056, 1235, 341, 197, 76559, 56, 1470, 914, 198, 197, 8924, 256, 2415, 14032, 31344, 16094, 197, 13967, 262, 1807, 198, 197, 59403, 197, 197, 515, 298,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetAll(t *testing.T) { msr := NewInMemoryTransactionStore(DummyMatcher{}, 10) m1 := Transaction{Request: &mock.Request{Host: "TEST1"}} msr.Save(m1) m2 := Transaction{Request: &mock.Request{Host: "TEST2"}} msr.Save(m2) reqs := msr.GetAll() msr.Reset() if len(reqs) != 2 { t.Fatalf("Invalid store len: %v", len(reqs)) } reqs = msr.GetAll() if len(reqs) != 0 { t.Fatalf("Invalid store len: %v", len(reqs)) } }
explode_data.jsonl/44241
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 197 }
[ 2830, 3393, 1949, 2403, 1155, 353, 8840, 836, 8, 1476, 47691, 81, 1669, 1532, 641, 10642, 8070, 6093, 5432, 8574, 37554, 22655, 220, 16, 15, 340, 2109, 16, 1669, 17869, 90, 1900, 25, 609, 16712, 9659, 90, 9296, 25, 330, 10033, 16, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSSc(t *testing.T) { url := "http://fr.ninemanga.com/chapter/The%20Prince%27s%20Private%20Child/16548-10-1.html" index := strings.LastIndex(url, "-") url = url[:index+1] + "2" + url[index+2:] log.Println(url) }
explode_data.jsonl/35708
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 98 }
[ 2830, 3393, 1220, 66, 1155, 353, 8840, 836, 8, 341, 19320, 1669, 330, 1254, 1110, 1626, 1253, 68413, 19372, 905, 21284, 2873, 47748, 4, 17, 15, 67932, 4, 17, 22, 82, 4, 17, 15, 16787, 4, 17, 15, 3652, 14, 16, 21, 20, 19, 23, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPSKConfiguration(t *testing.T) { // Check for leaking routines report := test.CheckRoutines(t) defer report() for _, test := range []struct { Name string ClientHasCertificate bool ServerHasCertificate bool ClientPSK PSKCallback ServerPSK PSKCallback ClientPSKIdentity []byte ServerPSKIdentity []byte WantClientError error WantServerError error }{ { Name: "PSK specified", ClientHasCertificate: false, ServerHasCertificate: false, ClientPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, ServerPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, ClientPSKIdentity: []byte{0x00}, ServerPSKIdentity: []byte{0x00}, WantClientError: errNoAvailableCipherSuites, WantServerError: errNoAvailableCipherSuites, }, { Name: "PSK and certificate specified", ClientHasCertificate: true, ServerHasCertificate: true, ClientPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, ServerPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, ClientPSKIdentity: []byte{0x00}, ServerPSKIdentity: []byte{0x00}, WantClientError: errPSKAndCertificate, WantServerError: errPSKAndCertificate, }, { Name: "PSK and no identity specified", ClientHasCertificate: false, ServerHasCertificate: false, ClientPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, ServerPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, ClientPSKIdentity: nil, ServerPSKIdentity: nil, WantClientError: errPSKAndIdentityMustBeSetForClient, WantServerError: errNoAvailableCipherSuites, }, { Name: "No PSK and identity specified", ClientHasCertificate: false, ServerHasCertificate: false, ClientPSK: nil, ServerPSK: nil, ClientPSKIdentity: []byte{0x00}, ServerPSKIdentity: []byte{0x00}, WantClientError: errIdentityNoPSK, WantServerError: errServerMustHaveCertificate, }, } { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() ca, cb := dpipe.Pipe() type result struct { c *Conn err error } c := make(chan result) go func() { client, err := testClient(ctx, ca, &Config{PSK: test.ClientPSK, PSKIdentityHint: test.ClientPSKIdentity}, test.ClientHasCertificate) c <- result{client, err} }() _, err := testServer(ctx, cb, &Config{PSK: test.ServerPSK, PSKIdentityHint: test.ServerPSKIdentity}, test.ServerHasCertificate) if err != nil || test.WantServerError != nil { if !(err != nil && test.WantServerError != nil && err.Error() == test.WantServerError.Error()) { t.Fatalf("TestPSKConfiguration: Server Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantServerError, err) } } res := <-c if res.err != nil || test.WantClientError != nil { if !(res.err != nil && test.WantClientError != nil && res.err.Error() == test.WantClientError.Error()) { t.Fatalf("TestPSKConfiguration: Client Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantClientError, res.err) } } } }
explode_data.jsonl/40940
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1503 }
[ 2830, 3393, 5012, 42, 7688, 1155, 353, 8840, 836, 8, 341, 197, 322, 4248, 369, 51829, 29497, 198, 69931, 1669, 1273, 10600, 49, 28628, 1155, 340, 16867, 1895, 2822, 2023, 8358, 1273, 1669, 2088, 3056, 1235, 341, 197, 21297, 338, 914, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClientOptions_deferToConnString(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } cs := testutil.ConnString(t) uri := testutil.AddOptionsToURI(cs.String(), "appname=bar") client, err := NewClientWithOptions(uri, clientopt.AppName("foo")) require.NoError(t, err) require.Equal(t, "bar", client.connString.AppName) }
explode_data.jsonl/13
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 131 }
[ 2830, 3393, 2959, 3798, 7844, 261, 1249, 9701, 703, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 741, 197, 630, 71899, 1669, 1273, 1314, 50422, 703, 1155, 340, 197, 6070, 1669, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPoolInitWithError(t *testing.T) { defer leaktest.Check(t)() p, e := NewPoolWithInit(func() (interface{}, error) { return nil, errors.New("err") }, 10) assert.NotNil(t, e, "error should be returned") assert.Nil(t, p, "result should be nil") assert.Equal(t, e.Error(), "err", "return error") }
explode_data.jsonl/59161
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 10551, 3803, 66102, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 10600, 1155, 8, 741, 3223, 11, 384, 1669, 1532, 10551, 2354, 3803, 18552, 368, 320, 4970, 22655, 1465, 8, 341, 197, 853, 2092, 11, 5975, 7121, 445, 615,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewMem(t *testing.T) { c := qt.New(t) v := config.NewWithTestDefaults() f := NewMem(v) c.Assert(f.Source, qt.Not(qt.IsNil)) c.Assert(f.Source, hqt.IsSameType, new(afero.MemMapFs)) c.Assert(f.PublishDir, qt.Not(qt.IsNil)) c.Assert(f.PublishDir, hqt.IsSameType, new(afero.BasePathFs)) c.Assert(f.Os, hqt.IsSameType, new(afero.OsFs)) c.Assert(f.WorkingDirReadOnly, qt.IsNotNil) c.Assert(IsOsFs(f.Source), qt.IsFalse) c.Assert(IsOsFs(f.WorkingDirReadOnly), qt.IsFalse) c.Assert(IsOsFs(f.PublishDir), qt.IsFalse) c.Assert(IsOsFs(f.Os), qt.IsTrue) }
explode_data.jsonl/4618
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 285 }
[ 2830, 3393, 3564, 18816, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 38949, 7121, 1155, 340, 5195, 1669, 2193, 7121, 2354, 2271, 16273, 741, 1166, 1669, 1532, 18816, 3747, 692, 1444, 11711, 955, 30350, 11, 38949, 15000, 10583, 83, 4506, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTagged_parseName_Overflow(t *testing.T) { var tag1 []string wb := RowBinary.GetWriteBuffer() tagsBuf := RowBinary.GetWriteBuffer() defer wb.Release() defer tagsBuf.Release() logger := zapwriter.Logger("upload") base := &Base{ queue: make(chan string, 1024), inQueue: make(map[string]bool), logger: logger, config: &Config{TableName: "test"}, } var sb strings.Builder sb.WriteString("very_long_name_field1.very_long_name_field2.very_long_name_field3.very_long_name_field4?") for i := 0; i < 100; i++ { if i > 0 { sb.WriteString("&") } sb.WriteString(fmt.Sprintf("very_long_tag%d=very_long_value%d", i, i)) } u := NewTagged(base) err := u.parseName(sb.String(), 10, tag1, wb, tagsBuf) assert.Equal(t, errBufOverflow, err) }
explode_data.jsonl/3479
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 5668, 3556, 21039, 675, 62, 42124, 1155, 353, 8840, 836, 8, 341, 2405, 4772, 16, 3056, 917, 198, 6692, 65, 1669, 10801, 21338, 2234, 7985, 4095, 741, 3244, 2032, 15064, 1669, 10801, 21338, 2234, 7985, 4095, 741, 16867, 37858...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestIssues(t *testing.T) { tests := []string{ "test_issue4-001", "test_issue4-002", "test_issue4-003", "test_issue4-004", } for _, test := range tests { tmpl, err := ParseFile(filepath.Join("testdir", test+".slim")) if err != nil { t.Fatal(err) } f, err := os.Open(filepath.Join("testdir", test+".json")) if err != nil { t.Fatal(err) } var values Values err = json.NewDecoder(f).Decode(&values) if err != nil { t.Fatal(err) } var buf bytes.Buffer err = tmpl.Execute(&buf, values) if err != nil { t.Fatal(err) } expect := readFile(t, filepath.Join("testdir", test+".html")) got := buf.String() if expect != got { t.Fatalf("expected %v but %v", expect, got) } } }
explode_data.jsonl/80441
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 336 }
[ 2830, 3393, 85828, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 917, 515, 197, 197, 1, 1944, 53340, 19, 12, 15, 15, 16, 756, 197, 197, 1, 1944, 53340, 19, 12, 15, 15, 17, 756, 197, 197, 1, 1944, 53340, 19, 12, 15, 15, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestAgent_Join_ACLDeny(t *testing.T) { t.Parallel() a1 := NewTestAgent(t.Name(), TestACLConfig()) defer a1.Shutdown() a2 := NewTestAgent(t.Name(), "") defer a2.Shutdown() addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=towel", addr), nil) _, err := a1.srv.AgentJoin(nil, req) if err != nil { t.Fatalf("err: %v", err) } }) t.Run("read-only token", func(t *testing.T) { ro := makeReadOnlyAgentACL(t, a1.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=%s", addr, ro), nil) if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } }) }
explode_data.jsonl/33604
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 468 }
[ 2830, 3393, 16810, 10598, 1961, 97627, 23619, 88, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 11323, 16, 1669, 1532, 2271, 16810, 1155, 2967, 1507, 3393, 55393, 2648, 2398, 16867, 264, 16, 10849, 18452, 741, 11323, 17, 1669, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRetryBase(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) defer ts.Close() c := getClient(ts.URL) c.initialDelay = time.Microsecond // One good endpoint: c.bases = []string{c.bases[0]} resp, err := c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } // Bad endpoint followed by good endpoint: c.bases = []string{"not-a-valid-base", c.bases[0]} resp, err = c.requestRetry(http.MethodGet, "/", "", nil) if err != nil { t.Errorf("Error from request: %v", err) } else if resp.StatusCode != 200 { t.Errorf("Expected status code 200, got %d", resp.StatusCode) } // One bad endpoint: c.bases = []string{"not-a-valid-base"} resp, err = c.requestRetry(http.MethodGet, "/", "", nil) if err == nil { t.Error("Expected an error from a request to an invalid base, but succeeded!?") } }
explode_data.jsonl/6246
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 391 }
[ 2830, 3393, 51560, 3978, 1155, 353, 8840, 836, 8, 341, 57441, 1669, 54320, 70334, 7121, 13470, 1220, 2836, 19886, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 4687, 1171, 16867, 10591, 10421, 741, 1444, 1669, 86287, 35864...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetTransactions(t *testing.T) { sandboxResp, _ := testClient.CreateSandboxPublicToken(sandboxInstitution, testProducts) tokenResp, _ := testClient.ExchangePublicToken(sandboxResp.PublicToken) startDate := time.Now().Add(-365 * 24 * time.Hour).Format(iso8601TimeFormat) endDate := time.Now().Format(iso8601TimeFormat) transactionsResp, err := testClient.GetTransactions(tokenResp.AccessToken, startDate, endDate) if plaidErr, ok := err.(Error); ok { for ok && plaidErr.ErrorCode == "PRODUCT_NOT_READY" { time.Sleep(5 * time.Second) transactionsResp, err = testClient.GetTransactions(tokenResp.AccessToken, startDate, endDate) plaidErr, ok = err.(Error) } } assert.Nil(t, err) assert.NotNil(t, transactionsResp.Accounts) assert.NotNil(t, transactionsResp.Transactions) }
explode_data.jsonl/49550
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 281 }
[ 2830, 3393, 1949, 48761, 1155, 353, 8840, 836, 8, 341, 1903, 31536, 36555, 11, 716, 1669, 1273, 2959, 7251, 50, 31536, 12676, 3323, 1141, 31536, 641, 10446, 11, 1273, 17746, 340, 43947, 36555, 11, 716, 1669, 1273, 2959, 86997, 12676, 33...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4