text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestBlockOverflowErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // protocol version because the test data is using bytes encoded with // that version. pver := uint32(1) tests := []struct { buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding err error // Expected error }{ // Block that claims to have ~uint64(0) transactions. { []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot 0x00, 0x00, // VoteBits 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState 0x00, 0x00, // Voters 0x00, // FreshStake 0x00, // Revocations 0x00, 0x00, 0x00, 0x00, // Poolsize 0xff, 0xff, 0x00, 0x1d, // Bits 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits 0x01, 0x00, 0x00, 0x00, // Height 0x01, 0x00, 0x00, 0x00, // Size 0x61, 0xbc, 0x66, 0x49, // Timestamp 0x01, 0xe3, 0x62, 0x99, // Nonce 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5c, 0xa1, 0xab, 0x1e, //StakeVersion 0xff, // TxnCount }, pver, &MessageError{}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Decode from wire format. var msg MsgBlock r := bytes.NewReader(test.buf) err := msg.BtcDecode(r, test.pver) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize from wire format. r = bytes.NewReader(test.buf) err = msg.Deserialize(r) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize with transaction location info from wire format. br := bytes.NewBuffer(test.buf) _, _, err = msg.DeserializeTxLoc(br) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+ "want: %v", i, err, reflect.TypeOf(test.err)) continue } } }
explode_data.jsonl/20104
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1580 }
[ 2830, 3393, 4713, 42124, 13877, 1155, 353, 8840, 836, 8, 341, 197, 322, 5443, 11507, 2319, 220, 22, 15, 15, 15, 16, 11689, 1588, 4518, 315, 279, 5535, 198, 197, 322, 11507, 2319, 1576, 279, 1273, 821, 374, 1667, 5820, 20498, 448, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestIsSpecialB256(t *testing.T) { if isSpecialB256(0) { t.Fatalf("isSpecialB256 must false (not implemented yet)") } }
explode_data.jsonl/32230
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 49 }
[ 2830, 3393, 3872, 20366, 33, 17, 20, 21, 1155, 353, 8840, 836, 8, 341, 743, 374, 20366, 33, 17, 20, 21, 7, 15, 8, 341, 197, 3244, 30762, 445, 285, 20366, 33, 17, 20, 21, 1969, 895, 320, 1921, 11537, 3602, 19107, 197, 532, 92 ]
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestCreateInstance(t *testing.T) { InternalIps := []string{"ip"} createInstanceArgs := &CreateInstanceArgs{ FlavorId: BBC_TestFlavorId, ImageId: BBC_TestImageId, RaidId: BBC_TestRaidId, RootDiskSizeInGb: 40, PurchaseCount: 1, AdminPass: "AdminPass", ZoneName: BBC_TestZoneName, SubnetId: BBC_TestSubnetId, SecurityGroupId: BBC_TestSecurityGroupId, ClientToken: BBC_TestClientToken, Billing: Billing{ PaymentTiming: PaymentTimingPostPaid, }, DeploySetId: BBC_TestDeploySetId, Name: BBC_TestName, EnableNuma: false, InternalIps: InternalIps, Tags: []model.TagModel{ { TagKey: "tag1", TagValue: "var1", }, }, } res, err := BBC_CLIENT.CreateInstance(createInstanceArgs) fmt.Println(res) ExpectEqual(t.Errorf, err, nil) }
explode_data.jsonl/4034
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 393 }
[ 2830, 3393, 4021, 2523, 1155, 353, 8840, 836, 8, 341, 197, 11569, 40, 1690, 1669, 3056, 917, 4913, 573, 16707, 39263, 2523, 4117, 1669, 609, 4021, 2523, 4117, 515, 197, 197, 3882, 3292, 764, 25, 260, 18096, 32541, 3882, 3292, 764, 345...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCAwriteFile(t *testing.T) { err := writeFile("/"+string(0)+"/", make([]byte, 1), 0777) t.Log("writeFile err: ", err) if err == nil { t.Fatal("Should have failed: ") } err = writeFile(string(0), make([]byte, 1), 0777) t.Log("writeFile err: ", err) if err == nil { t.Fatal("Should have failed: ") } }
explode_data.jsonl/82696
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 136 }
[ 2830, 3393, 5049, 4934, 1703, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 92820, 4283, 5572, 917, 7, 15, 7257, 3115, 497, 1281, 10556, 3782, 11, 220, 16, 701, 220, 15, 22, 22, 22, 340, 3244, 5247, 445, 4934, 1703, 1848, 25, 3670, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRuleInSubquery(t *testing.T) { common.Log.Debug("Entering function: %s", common.GetFunctionName()) sqls := []string{ "select col1,col2,col3 from table1 where col2 in(select col from table2)", "SELECT col1,col2,col3 from table1 where col2 =(SELECT col2 FROM `table1` limit 1)", } for _, sql := range sqls { q, err := NewQuery4Audit(sql) if err == nil { rule := q.RuleInSubquery() if rule.Item != "SUB.001" { t.Error("Rule not match:", rule.Item, "Expect : SUB.001") } } else { t.Error("sqlparser.Parse Error:", err) } } common.Log.Debug("Exiting function: %s", common.GetFunctionName()) }
explode_data.jsonl/76779
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 253 }
[ 2830, 3393, 11337, 641, 3136, 1631, 1155, 353, 8840, 836, 8, 341, 83825, 5247, 20345, 445, 82867, 729, 25, 1018, 82, 497, 4185, 2234, 5152, 675, 2398, 30633, 82, 1669, 3056, 917, 515, 197, 197, 1, 1742, 1375, 16, 51496, 17, 51496, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestStackProjectName(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "stack_project_name", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, }) }
explode_data.jsonl/76344
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 90 }
[ 2830, 3393, 4336, 7849, 675, 1155, 353, 8840, 836, 8, 341, 2084, 17376, 80254, 2271, 1155, 11, 609, 60168, 80254, 2271, 3798, 515, 197, 197, 6184, 25, 688, 330, 7693, 16352, 1269, 756, 197, 197, 48303, 25, 3056, 917, 4913, 31, 79, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestHasCodeError(t *testing.T) { someErr := errors.New("some error") h := &errHashable{err: someErr} n, err := model.HashCode(h) assert.Equal(t, uint64(0), n) assert.Equal(t, someErr, err) }
explode_data.jsonl/6546
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 10281, 2078, 1454, 1155, 353, 8840, 836, 8, 341, 1903, 635, 7747, 1669, 5975, 7121, 445, 14689, 1465, 1138, 9598, 1669, 609, 615, 6370, 480, 90, 615, 25, 1045, 7747, 532, 9038, 11, 1848, 1669, 1614, 15103, 2078, 3203, 340,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReconcile_SetsStartTime(t *testing.T) { taskRun := tb.TaskRun("test-taskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec( tb.TaskRunTaskRef(simpleTask.Name), )) d := test.Data{ TaskRuns: []*v1alpha1.TaskRun{taskRun}, Tasks: []*v1alpha1.Task{simpleTask}, } testAssets, cancel := getTaskRunController(t, d) defer cancel() if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } if taskRun.Status.StartTime == nil || taskRun.Status.StartTime.IsZero() { t.Errorf("expected startTime to be set by reconcile but was %q", taskRun.Status.StartTime) } }
explode_data.jsonl/876
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 267 }
[ 2830, 3393, 693, 40446, 457, 1098, 1415, 40203, 1155, 353, 8840, 836, 8, 341, 49115, 6727, 1669, 16363, 28258, 6727, 445, 1944, 52579, 6108, 497, 16363, 28258, 6727, 22699, 445, 7975, 3975, 16363, 28258, 6727, 8327, 1006, 197, 62842, 2825...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestUniqueAssociationEdge(t *testing.T) { edgeInfo := getTestEdgeInfo(t, "event") edge := edgeInfo.GetAssociationEdgeByName("Creator") expectedAssocEdge := &AssociationEdge{ EdgeConst: "EventToCreatorEdge", commonEdgeInfo: getCommonEdgeInfo( "Creator", schemaparser.GetEntConfigFromName("account"), ), Unique: true, TableName: "account_creator_edges", } testAssocEdge(t, edge, expectedAssocEdge) // singular version is same as plural when edge is singular assert.Equal(t, "Creator", edge.Singular()) }
explode_data.jsonl/73725
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 188 }
[ 2830, 3393, 22811, 63461, 11656, 1155, 353, 8840, 836, 8, 341, 197, 7186, 1731, 1669, 633, 2271, 11656, 1731, 1155, 11, 330, 3087, 1138, 197, 7186, 1669, 6821, 1731, 2234, 63461, 11656, 16898, 445, 31865, 5130, 42400, 98628, 11656, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetEventStream_ReadClose(t *testing.T) { _, eventMsgs := mockGetEventStreamReadEvents() sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, eventstreamtest.ServeEventStream{ T: t, Events: eventMsgs, }, true, ) if err != nil { t.Fatalf("expect no error, %v", err) } defer cleanupFn() svc := New(sess) resp, err := svc.GetEventStream(nil) if err != nil { t.Fatalf("expect no error got, %v", err) } resp.EventStream.Close() <-resp.EventStream.Events() if err := resp.EventStream.Err(); err != nil { t.Errorf("expect no error, %v", err) } }
explode_data.jsonl/13343
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 250 }
[ 2830, 3393, 1949, 1556, 3027, 38381, 7925, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1538, 6611, 82, 1669, 7860, 1949, 1556, 3027, 4418, 7900, 741, 1903, 433, 11, 21290, 24911, 11, 1848, 1669, 1538, 4027, 1944, 39820, 1556, 3027, 5283, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestControllerCreateGameServerPod(t *testing.T) { t.Parallel() newFixture := func() *v1alpha1.GameServer { fixture := &v1alpha1.GameServer{ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, Spec: newSingleContainerSpec(), Status: v1alpha1.GameServerStatus{State: v1alpha1.Creating}} fixture.ApplyDefaults() return fixture } t.Run("create pod, with no issues", func(t *testing.T) { c, m := newFakeController() fixture := newFixture() created := false m.KubeClient.AddReactor("create", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { created = true ca := action.(k8stesting.CreateAction) pod := ca.GetObject().(*corev1.Pod) assert.Equal(t, fixture.ObjectMeta.Name+"-", pod.ObjectMeta.GenerateName) assert.Equal(t, fixture.ObjectMeta.Namespace, pod.ObjectMeta.Namespace) assert.Equal(t, "gameserver", pod.ObjectMeta.Labels[stable.GroupName+"/role"]) assert.Equal(t, fixture.ObjectMeta.Name, pod.ObjectMeta.Labels[v1alpha1.GameServerPodLabel]) assert.True(t, metav1.IsControlledBy(pod, fixture)) gsContainer := pod.Spec.Containers[0] assert.Equal(t, fixture.Spec.Ports[0].HostPort, gsContainer.Ports[0].HostPort) assert.Equal(t, fixture.Spec.Ports[0].ContainerPort, gsContainer.Ports[0].ContainerPort) assert.Equal(t, corev1.Protocol("UDP"), gsContainer.Ports[0].Protocol) assert.Equal(t, "/gshealthz", gsContainer.LivenessProbe.HTTPGet.Path) assert.Equal(t, gsContainer.LivenessProbe.HTTPGet.Port, intstr.FromInt(8080)) assert.Equal(t, intstr.FromInt(8080), gsContainer.LivenessProbe.HTTPGet.Port) assert.Equal(t, fixture.Spec.Health.InitialDelaySeconds, gsContainer.LivenessProbe.InitialDelaySeconds) assert.Equal(t, fixture.Spec.Health.PeriodSeconds, gsContainer.LivenessProbe.PeriodSeconds) assert.Equal(t, fixture.Spec.Health.FailureThreshold, gsContainer.LivenessProbe.FailureThreshold) assert.Len(t, pod.Spec.Containers, 2, "Should have a sidecar container") assert.Equal(t, pod.Spec.Containers[1].Image, c.sidecarImage) assert.Len(t, pod.Spec.Containers[1].Env, 2, "2 env vars") assert.Equal(t, "GAMESERVER_NAME", pod.Spec.Containers[1].Env[0].Name) assert.Equal(t, fixture.ObjectMeta.Name, pod.Spec.Containers[1].Env[0].Value) assert.Equal(t, "POD_NAMESPACE", pod.Spec.Containers[1].Env[1].Name) return true, pod, nil }) gs, pod, err := c.createGameServerPod(fixture) assert.Nil(t, err) assert.Equal(t, fixture.Status.State, gs.Status.State) assert.True(t, created) assert.True(t, metav1.IsControlledBy(pod, gs)) agtesting.AssertEventContains(t, m.FakeRecorder.Events, "Pod") }) t.Run("invalid podspec", func(t *testing.T) { c, mocks := newFakeController() fixture := newFixture() podCreated := false gsUpdated := false mocks.KubeClient.AddReactor("create", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { podCreated = true return true, nil, k8serrors.NewInvalid(schema.GroupKind{}, "test", field.ErrorList{}) }) mocks.AgonesClient.AddReactor("update", "gameservers", func(action k8stesting.Action) (bool, runtime.Object, error) { gsUpdated = true ua := action.(k8stesting.UpdateAction) gs := ua.GetObject().(*v1alpha1.GameServer) assert.Equal(t, v1alpha1.Error, gs.Status.State) return true, gs, nil }) gs, _, err := c.createGameServerPod(fixture) assert.Nil(t, err) assert.True(t, podCreated, "attempt should have been made to create a pod") assert.True(t, gsUpdated, "GameServer should be updated") assert.Equal(t, v1alpha1.Error, gs.Status.State) }) }
explode_data.jsonl/25430
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1418 }
[ 2830, 3393, 2051, 4021, 4868, 5475, 23527, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 8638, 18930, 1669, 2915, 368, 353, 85, 16, 7141, 16, 20940, 5475, 341, 197, 1166, 12735, 1669, 609, 85, 16, 7141, 16, 20940, 5475, 90, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildTrafficConfiguration_Canary(t *testing.T) { expected := &Config{ Targets: map[string]RevisionTargets{ DefaultTarget: {{ TrafficTarget: v1.TrafficTarget{ ConfigurationName: goodConfig.Name, RevisionName: goodOldRev.Name, Percent: ptr.Int64(90), LatestRevision: ptr.Bool(false), }, Active: true, Protocol: net.ProtocolHTTP1, }, { TrafficTarget: v1.TrafficTarget{ ConfigurationName: goodConfig.Name, RevisionName: goodNewRev.Name, Percent: ptr.Int64(10), LatestRevision: ptr.Bool(true), }, Active: true, Protocol: net.ProtocolH2C, }}, }, revisionTargets: []RevisionTarget{{ TrafficTarget: v1.TrafficTarget{ ConfigurationName: goodConfig.Name, RevisionName: goodOldRev.Name, Percent: ptr.Int64(90), LatestRevision: ptr.Bool(false), }, Active: true, Protocol: net.ProtocolHTTP1, }, { TrafficTarget: v1.TrafficTarget{ ConfigurationName: goodConfig.Name, RevisionName: goodNewRev.Name, Percent: ptr.Int64(10), LatestRevision: ptr.Bool(true), }, Active: true, Protocol: net.ProtocolH2C, }}, Configurations: map[string]*v1.Configuration{ goodConfig.Name: goodConfig, }, Revisions: map[string]*v1.Revision{ goodOldRev.Name: goodOldRev, goodNewRev.Name: goodNewRev, }, } if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1.TrafficTarget{ RevisionName: goodOldRev.Name, Percent: ptr.Int64(90), }, v1.TrafficTarget{ ConfigurationName: goodConfig.Name, Percent: ptr.Int64(10), }))); err != nil { t.Errorf("Unexpected error %v", err) } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) } }
explode_data.jsonl/17610
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 879 }
[ 2830, 3393, 11066, 87229, 7688, 920, 276, 658, 1155, 353, 8840, 836, 8, 341, 42400, 1669, 609, 2648, 515, 197, 197, 49030, 25, 2415, 14032, 60, 33602, 49030, 515, 298, 91084, 6397, 25, 80505, 571, 10261, 956, 20615, 6397, 25, 348, 16,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRequestAuthentication(t *testing.T) { framework.NewTest(t). Features("security.fuzz.jwt"). Run(func(t framework.TestContext) { ns := "fuzz-jwt" namespace.ClaimOrFail(t, t, ns) t.ConfigIstio().YAML(ns, requestAuthnPolicy).ApplyOrFail(t) t.Logf("request authentication policy applied") // We don't care about the actual backend for JWT test, one backend is good enough. deploy(t, apacheServer, ns, "backends/apache/apache.yaml") deploy(t, jwtTool, ns, "fuzzers/jwt_tool/jwt_tool.yaml") waitService(t, apacheServer, ns) testCases := []struct { name string baseToken string }{ {"TokenIssuer1", jwt.TokenIssuer1}, {"TokenIssuer1WithAud", jwt.TokenIssuer1WithAud}, {"TokenIssuer1WithAzp", jwt.TokenIssuer1WithAzp}, {"TokenIssuer2", jwt.TokenIssuer2}, {"TokenIssuer1WithNestedClaims1", jwt.TokenIssuer1WithNestedClaims1}, {"TokenIssuer1WithNestedClaims2", jwt.TokenIssuer1WithNestedClaims2}, {"TokenIssuer2WithSpaceDelimitedScope", jwt.TokenIssuer2WithSpaceDelimitedScope}, } for _, tc := range testCases { t.NewSubTest(tc.name).Run(func(t framework.TestContext) { runJwtToolTest(t, ns, apacheServer, tc.baseToken) }) } }) }
explode_data.jsonl/66767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 521 }
[ 2830, 3393, 1900, 19297, 1155, 353, 8840, 836, 8, 341, 1166, 5794, 7121, 2271, 1155, 4292, 197, 197, 21336, 445, 17039, 833, 8889, 71085, 38609, 197, 85952, 18552, 1155, 12626, 8787, 1972, 8, 341, 298, 84041, 1669, 330, 69, 8889, 13333,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLimit(t *testing.T) { index, err := Open("Doc") if err != nil { t.Fatalf("err from Open: %v", err) } c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, res *pb.SearchResponse) error { limit := 20 // Default per page. if req.Params.Limit != nil { limit = int(*req.Params.Limit) } res.Status = &pb.RequestStatus{Code: pb.SearchServiceError_OK.Enum()} res.MatchedCount = proto.Int64(int64(limit)) for i := 0; i < limit; i++ { res.Result = append(res.Result, &pb.SearchResult{Document: &pb.Document{}}) res.Cursor = proto.String("moreresults") } return nil }) const maxDocs = 500 // Limit maximum number of docs. testCases := []struct { limit, want int }{ {limit: 0, want: maxDocs}, {limit: 42, want: 42}, {limit: 100, want: 100}, {limit: 1000, want: maxDocs}, } for _, tt := range testCases { it := index.Search(c, "gopher", &SearchOptions{Limit: tt.limit, IDsOnly: true}) count := 0 for ; count < maxDocs; count++ { _, err := it.Next(nil) if err == Done { break } if err != nil { t.Fatalf("err after %d: %v", count, err) } } if count != tt.want { t.Errorf("got %d results, expected %d", count, tt.want) } } }
explode_data.jsonl/27958
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 520 }
[ 2830, 3393, 16527, 1155, 353, 8840, 836, 8, 341, 26327, 11, 1848, 1669, 5264, 445, 9550, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 615, 504, 5264, 25, 1018, 85, 497, 1848, 340, 197, 532, 1444, 1669, 264, 57824, 287, 99...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetMachineSetsForDeployment(t *testing.T) { machineDeployment1 := clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withMatchingOwnerRefAndLabels", Namespace: "test", UID: "UID", }, Spec: clusterv1.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", }, }, }, } machineDeployment2 := clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withNoMatchingOwnerRef", Namespace: "test", UID: "unMatchingUID", }, Spec: clusterv1.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar2", }, }, }, } machineDeployment3 := clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withMatchingOwnerRefAndNoMatchingLabels", Namespace: "test", UID: "UID3", }, Spec: clusterv1.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", }, }, }, } ms1 := clusterv1.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, ObjectMeta: metav1.ObjectMeta{ Name: "withNoOwnerRefShouldBeAdopted2", Namespace: "test", Labels: map[string]string{ "foo": "bar2", }, }, } ms2 := clusterv1.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, ObjectMeta: metav1.ObjectMeta{ Name: "withOwnerRefAndLabels", Namespace: "test", OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(&machineDeployment1, machineDeploymentKind), }, Labels: map[string]string{ "foo": "bar", }, }, } ms3 := clusterv1.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, ObjectMeta: metav1.ObjectMeta{ Name: "withNoOwnerRefShouldBeAdopted1", Namespace: "test", Labels: map[string]string{ "foo": "bar", }, }, } ms4 := clusterv1.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, ObjectMeta: metav1.ObjectMeta{ Name: "withNoOwnerRefNoMatch", Namespace: "test", Labels: map[string]string{ "foo": "nomatch", }, }, } ms5 := clusterv1.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, ObjectMeta: metav1.ObjectMeta{ Name: "withOwnerRefAndNoMatchLabels", Namespace: "test", OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(&machineDeployment3, machineDeploymentKind), }, Labels: map[string]string{ "foo": "nomatch", }, }, } machineSetList := &clusterv1.MachineSetList{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSetList", }, Items: []clusterv1.MachineSet{ ms1, ms2, ms3, ms4, ms5, }, } testCases := []struct { name string machineDeployment clusterv1.MachineDeployment expected []*clusterv1.MachineSet }{ { name: "matching ownerRef and labels", machineDeployment: machineDeployment1, expected: []*clusterv1.MachineSet{&ms2, &ms3}, }, { name: "no matching ownerRef, matching labels", machineDeployment: machineDeployment2, expected: []*clusterv1.MachineSet{&ms1}, }, { name: "matching ownerRef, mismatch labels", machineDeployment: machineDeployment3, expected: []*clusterv1.MachineSet{&ms3, &ms5}, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { err := clusterv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) r := &MachineDeploymentReconciler{ Client: fake.NewFakeClient(machineSetList), Log: log.Log, recorder: record.NewFakeRecorder(32), } got, err := r.getMachineSetsForDeployment(&tc.machineDeployment) if err != nil { t.Errorf("Failed running getMachineSetsForDeployment: %v", err) } if len(tc.expected) != len(got) { t.Errorf("Case %s. Expected to get %d MachineSets but got %d", tc.machineDeployment.Name, len(tc.expected), len(got)) } for idx, res := range got { if res.Name != tc.expected[idx].Name || res.Namespace != tc.expected[idx].Namespace { t.Errorf("Case %s. Expected %q found %q", tc.machineDeployment.Name, res.Name, tc.expected[idx].Name) } } }) } }
explode_data.jsonl/62422
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1956 }
[ 2830, 3393, 1949, 21605, 30175, 2461, 75286, 1155, 353, 8840, 836, 8, 341, 2109, 3814, 75286, 16, 1669, 1185, 590, 648, 16, 1321, 3814, 75286, 515, 197, 23816, 12175, 25, 77520, 16, 80222, 515, 298, 21297, 25, 414, 330, 4197, 64430, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestLocalConfigOption(t *testing.T) { if service, err := NewConfigService(WithFileConfig("woot.yaml")); service != nil || err == nil { t.Errorf("file does not exist but service created: %v: %v", service, err) } service, err := NewConfigService(WithFileConfig("../testdata/schedules.yaml")) if service == nil || err != nil { t.Errorf("file exists but service not created: %v: %v", service, err) } if err := service.Stop(); err != nil { t.Errorf("fail to stop service") } }
explode_data.jsonl/16482
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 7319, 2648, 5341, 1155, 353, 8840, 836, 8, 341, 743, 2473, 11, 1848, 1669, 1532, 2648, 1860, 7, 2354, 1703, 2648, 445, 1126, 354, 33406, 32596, 2473, 961, 2092, 1369, 1848, 621, 2092, 341, 197, 3244, 13080, 445, 1192, 1558...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestEventInvalidSpec(t *testing.T) { // Test whether a WorkflowFailed event is emitted in case of invalid spec controller := newController() wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("") wf := unmarshalWF(invalidSpec) wf, err := wfcset.Create(wf) assert.NoError(t, err) woc := newWorkflowOperationCtx(wf, controller) woc.operate() events, err := controller.kubeclientset.CoreV1().Events("").List(metav1.ListOptions{}) assert.NoError(t, err) assert.Equal(t, 2, len(events.Items)) runningEvent := events.Items[0] assert.Equal(t, "WorkflowRunning", runningEvent.Reason) invalidSpecEvent := events.Items[1] assert.Equal(t, "WorkflowFailed", invalidSpecEvent.Reason) assert.Equal(t, "invalid spec: template name '123' undefined", invalidSpecEvent.Message) }
explode_data.jsonl/54392
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 283 }
[ 2830, 3393, 1556, 7928, 8327, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 3425, 264, 60173, 9408, 1538, 374, 46942, 304, 1142, 315, 8318, 1398, 198, 61615, 1669, 501, 2051, 741, 6692, 8316, 746, 1669, 6461, 1418, 69, 2972, 746, 1897...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewRollupTargetV2ProtoInvalidPipelineProto(t *testing.T) { proto := &rulepb.RollupTargetV2{ Pipeline: &pipelinepb.Pipeline{ Ops: []pipelinepb.PipelineOp{ { Type: pipelinepb.PipelineOp_TRANSFORMATION, Transformation: &pipelinepb.TransformationOp{ Type: transformationpb.TransformationType_UNKNOWN, }, }, }, }, } _, err := newRollupTargetFromV2Proto(proto) require.Error(t, err) }
explode_data.jsonl/8365
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 193 }
[ 2830, 3393, 3564, 32355, 454, 6397, 53, 17, 31549, 7928, 34656, 31549, 1155, 353, 8840, 836, 8, 341, 197, 15110, 1669, 609, 12937, 16650, 71212, 454, 6397, 53, 17, 515, 197, 10025, 8790, 25, 609, 51258, 16650, 1069, 8790, 515, 298, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOutOfOrderFlood(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() // Create a new connection with initial window size of 10. opt := tcpip.ReceiveBufferSizeOption(10) c.CreateConnected(789, 30000, &opt) if _, _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { t.Fatalf("got c.EP.Read(nil) = %v, want = %v", err, tcpip.ErrWouldBlock) } // Send 100 packets before the actual one that is expected. data := []byte{1, 2, 3, 4, 5, 6} for i := 0; i < 100; i++ { c.SendPacket(data[3:], &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck, SeqNum: 796, AckNum: c.IRS.Add(1), RcvWnd: 30000, }) checker.IPv4(t, c.GetPacket(), checker.TCP( checker.DstPort(context.TestPort), checker.SeqNum(uint32(c.IRS)+1), checker.AckNum(790), checker.TCPFlags(header.TCPFlagAck), ), ) } // Send packet with seqnum 793. It must be discarded because the // out-of-order buffer was filled by the previous packets. c.SendPacket(data[3:], &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck, SeqNum: 793, AckNum: c.IRS.Add(1), RcvWnd: 30000, }) checker.IPv4(t, c.GetPacket(), checker.TCP( checker.DstPort(context.TestPort), checker.SeqNum(uint32(c.IRS)+1), checker.AckNum(790), checker.TCPFlags(header.TCPFlagAck), ), ) // Now send the expected packet, seqnum 790. c.SendPacket(data[:3], &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck, SeqNum: 790, AckNum: c.IRS.Add(1), RcvWnd: 30000, }) // Check that only packet 790 is acknowledged. checker.IPv4(t, c.GetPacket(), checker.TCP( checker.DstPort(context.TestPort), checker.SeqNum(uint32(c.IRS)+1), checker.AckNum(793), checker.TCPFlags(header.TCPFlagAck), ), ) }
explode_data.jsonl/22283
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 882 }
[ 2830, 3393, 31731, 4431, 37, 4556, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 2266, 7121, 1155, 11, 1638, 8505, 52, 340, 16867, 272, 727, 60639, 2822, 197, 322, 4230, 264, 501, 3633, 448, 2856, 3241, 1379, 315, 220, 16, 15, 624, 6483...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestTeamTxSubteamAdmins(t *testing.T) { // Test if AddMemberTx properly keys implicit admins to teams // through the use of 'implicit_team_keys'. tt := newTeamTester(t) defer tt.cleanup() ann := tt.addUser("ann") t.Logf("Signed up user ann (%s)", ann.username) bob := tt.addUser("bob") t.Logf("Signed up user bob (%s)", bob.username) team := ann.createTeam() t.Logf("Team created (%s)", team) teamName, err := keybase1.TeamNameFromString(team) require.NoError(t, err) _, err = teams.CreateSubteam(context.Background(), ann.tc.G, "golfers", teamName, keybase1.TeamRole_NONE /* addSelfAs */) require.NoError(t, err) _, err = teams.CreateSubteam(context.Background(), ann.tc.G, "pokerpals", teamName, keybase1.TeamRole_NONE /* addSelfAs */) require.NoError(t, err) teamObj := ann.loadTeam(team, true /* admin */) tx := teams.CreateAddMemberTx(teamObj) err = tx.AddMemberByUsername(context.Background(), bob.username, keybase1.TeamRole_ADMIN) require.NoError(t, err) err = tx.Post(libkb.NewMetaContextForTest(*ann.tc)) require.NoError(t, err) }
explode_data.jsonl/27646
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 401 }
[ 2830, 3393, 14597, 31584, 3136, 9196, 7210, 82, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 421, 2691, 9366, 31584, 10277, 6894, 17995, 57095, 311, 7263, 198, 197, 322, 1526, 279, 990, 315, 364, 30940, 26532, 12631, 29636, 3244, 83, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfigDeletion(t *testing.T) { templateInput := []byte{} platformInput := []byte( `kind: List metadata: {} apiVersion: v1 items: - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: foo spec: accessModes: - ReadWriteOnce resources: requests: storage: 5Gi storageClassName: gp2 status: {}`) filter := &ResourceFilter{ Kinds: []string{"PersistentVolumeClaim"}, } changeset := getChangeset(t, filter, platformInput, templateInput, false, true, []string{}) if len(changeset.Delete) != 1 { t.Errorf("Changeset.Delete is blank but should not be") } }
explode_data.jsonl/33781
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 229 }
[ 2830, 3393, 2648, 1912, 52625, 1155, 353, 8840, 836, 8, 1476, 22832, 2505, 1669, 3056, 3782, 31483, 197, 15734, 2505, 1669, 3056, 3782, 1006, 197, 197, 63, 15314, 25, 1759, 198, 17637, 25, 5613, 2068, 5637, 25, 348, 16, 198, 3615, 510...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestHandleAbsentValidator(t *testing.T) { // initial setup ctx, ck, sk, keeper := createTestInput(t) amtInt := int64(100) addr, val, amt := addrs[0], pks[0], sdk.NewInt(amtInt) sh := stake.NewHandler(sk) slh := NewHandler(keeper) got := sh(ctx, newTestMsgCreateValidator(addr, val, amt)) require.True(t, got.IsOK()) stake.EndBlocker(ctx, sk) require.Equal(t, ck.GetCoins(ctx, addr), sdk.Coins{{sk.GetParams(ctx).BondDenom, initCoins.Sub(amt)}}) require.True(t, sdk.NewRatFromInt(amt).Equal(sk.Validator(ctx, addr).GetPower())) info, found := keeper.getValidatorSigningInfo(ctx, sdk.ValAddress(val.Address())) require.False(t, found) require.Equal(t, int64(0), info.StartHeight) require.Equal(t, int64(0), info.IndexOffset) require.Equal(t, int64(0), info.SignedBlocksCounter) require.Equal(t, int64(0), info.JailedUntil) height := int64(0) // 1000 first blocks OK for ; height < SignedBlocksWindow; height++ { ctx = ctx.WithBlockHeight(height) keeper.handleValidatorSignature(ctx, val, amtInt, true) } info, found = keeper.getValidatorSigningInfo(ctx, sdk.ValAddress(val.Address())) require.True(t, found) require.Equal(t, int64(0), info.StartHeight) require.Equal(t, SignedBlocksWindow, info.SignedBlocksCounter) // 500 blocks missed for ; height < SignedBlocksWindow+(SignedBlocksWindow-MinSignedPerWindow); height++ { ctx = ctx.WithBlockHeight(height) keeper.handleValidatorSignature(ctx, val, amtInt, false) } info, found = keeper.getValidatorSigningInfo(ctx, sdk.ValAddress(val.Address())) require.True(t, found) require.Equal(t, int64(0), info.StartHeight) require.Equal(t, SignedBlocksWindow-MinSignedPerWindow, info.SignedBlocksCounter) // validator should be bonded still validator, _ := sk.GetValidatorByPubKey(ctx, val) require.Equal(t, sdk.Bonded, validator.GetStatus()) pool := sk.GetPool(ctx) require.Equal(t, int64(amtInt), pool.BondedTokens.RoundInt64()) // 501st block missed ctx = ctx.WithBlockHeight(height) keeper.handleValidatorSignature(ctx, val, amtInt, false) info, found = keeper.getValidatorSigningInfo(ctx, sdk.ValAddress(val.Address())) require.True(t, found) require.Equal(t, int64(0), info.StartHeight) require.Equal(t, SignedBlocksWindow-MinSignedPerWindow-1, info.SignedBlocksCounter) // validator should have been revoked validator, _ = sk.GetValidatorByPubKey(ctx, val) require.Equal(t, sdk.Unbonded, validator.GetStatus()) // unrevocation should fail prior to jail expiration got = slh(ctx, NewMsgUnrevoke(addr)) require.False(t, got.IsOK()) // unrevocation should succeed after jail expiration ctx = ctx.WithBlockHeader(abci.Header{Time: DowntimeUnbondDuration + 1}) got = slh(ctx, NewMsgUnrevoke(addr)) require.True(t, got.IsOK()) // validator should be rebonded now validator, _ = sk.GetValidatorByPubKey(ctx, val) require.Equal(t, sdk.Bonded, validator.GetStatus()) // validator should have been slashed pool = sk.GetPool(ctx) require.Equal(t, int64(amtInt-1), pool.BondedTokens.RoundInt64()) // validator start height should have been changed info, found = keeper.getValidatorSigningInfo(ctx, sdk.ValAddress(val.Address())) require.True(t, found) require.Equal(t, height, info.StartHeight) require.Equal(t, SignedBlocksWindow-MinSignedPerWindow-1, info.SignedBlocksCounter) // validator should not be immediately revoked again height++ ctx = ctx.WithBlockHeight(height) keeper.handleValidatorSignature(ctx, val, amtInt, false) validator, _ = sk.GetValidatorByPubKey(ctx, val) require.Equal(t, sdk.Bonded, validator.GetStatus()) // 500 signed blocks nextHeight := height + MinSignedPerWindow + 1 for ; height < nextHeight; height++ { ctx = ctx.WithBlockHeight(height) keeper.handleValidatorSignature(ctx, val, amtInt, false) } // validator should be revoked again after 500 unsigned blocks nextHeight = height + MinSignedPerWindow + 1 for ; height <= nextHeight; height++ { ctx = ctx.WithBlockHeight(height) keeper.handleValidatorSignature(ctx, val, amtInt, false) } validator, _ = sk.GetValidatorByPubKey(ctx, val) require.Equal(t, sdk.Unbonded, validator.GetStatus()) }
explode_data.jsonl/47985
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1497 }
[ 2830, 3393, 6999, 80251, 14256, 1155, 353, 8840, 836, 8, 1476, 197, 322, 2856, 6505, 198, 20985, 11, 38613, 11, 1901, 11, 53416, 1669, 1855, 2271, 2505, 1155, 340, 197, 35225, 1072, 1669, 526, 21, 19, 7, 16, 15, 15, 340, 53183, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestCharsetContains(t *testing.T) { ranges := []Range{Range{1, 2}, {3, 22}, {23, 25}, {22, 23}, {100, 102}, {40, 42}} testdata := []struct { val int32 contains bool }{ {1, true}, {10, true}, {24, true}, {-1, false}, {2, false}, {102, false}, {50, false}, } c := NewCharset() c.UniteRangeSlice(ranges) for i, v := range testdata { v := v t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Parallel() test.EXPECT_EQ(t, c.Contains(v.val), v.contains, "") }) } }
explode_data.jsonl/51960
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 287 }
[ 2830, 3393, 78172, 23805, 1155, 353, 8840, 836, 8, 972, 7000, 5520, 1669, 3056, 6046, 90, 6046, 90, 16, 11, 220, 17, 2137, 314, 18, 11, 220, 17, 17, 2137, 314, 17, 18, 11, 220, 17, 20, 2137, 314, 17, 17, 11, 220, 17, 18, 2137,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReverseNodeInKGroup(t *testing.T) { t.Logf("list1: %v, except: %v", utils.ListNodeToArray(input1), except) r := ReverseNodeInKGroup(input1, k) t.Logf("result: %v", utils.ListNodeToArray(r)) }
explode_data.jsonl/45924
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 45695, 1955, 641, 42, 2808, 1155, 353, 8840, 836, 8, 341, 3244, 98954, 445, 1607, 16, 25, 1018, 85, 11, 3650, 25, 1018, 85, 497, 12439, 5814, 1955, 29512, 5384, 16, 701, 3650, 340, 7000, 1669, 24277, 1955, 641, 42, 2808,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestBuildReqVipRange(t *testing.T) { buildC, err := Build("req_vip_range(\"1.1.1.1\", \"4.4.4.4\")") if err != nil { t.Errorf("build failed, req_vip_range(\"1.1.1.1\", \"4.4.4.4\")") } req.Session.Vip = net.IPv4(3, 255, 255, 255).To4() if !buildC.Match(&req) { t.Errorf("3.255.255.255 not match req_vip_range(\"1.1.1.1\", \"4.4.4.4\")") } buildC, err = Build("req_vip_range(\"2001:0DB8:02de:0::e13\", \"2002:0DB8:02de:0::e13\")") if err != nil { t.Errorf("build failed, req_vip_range(\"2001:0DB8:02de:0::e13\", \"2002:0DB8:02de:0::e13\")") } req.Session.Vip = net.ParseIP("2001:ffff::ffff") if !buildC.Match(&req) { t.Errorf("2001:ffff::ffff not match req_vip_range(\"2001:0DB8:02de:0::e13\", \"2002:0DB8:02de:0::e13\")") } }
explode_data.jsonl/11124
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 385 }
[ 2830, 3393, 11066, 27234, 53, 573, 6046, 1155, 353, 8840, 836, 8, 341, 69371, 34, 11, 1848, 1669, 7854, 445, 2958, 2273, 573, 9698, 36014, 16, 13, 16, 13, 16, 13, 16, 16215, 7245, 19, 13, 19, 13, 19, 13, 19, 62705, 1138, 743, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestCachedChartsAllFromRepo(t *testing.T) { charts, err := chartsImplementation.AllFromRepo(testutil.RepoName) assert.NoErr(t, err) assert.True(t, len(charts) > 0, "returned charts") noCharts, err := chartsImplementation.AllFromRepo(testutil.BogusRepo) assert.ExistsErr(t, err, "sent bogus repo name to GetChartsInRepo") assert.True(t, len(noCharts) == 0, "empty charts slice") }
explode_data.jsonl/37971
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 150 }
[ 2830, 3393, 70293, 64878, 2403, 3830, 25243, 1155, 353, 8840, 836, 8, 341, 23049, 7038, 11, 1848, 1669, 26131, 36850, 16764, 3830, 25243, 8623, 1314, 2817, 5368, 675, 340, 6948, 16766, 7747, 1155, 11, 1848, 340, 6948, 32443, 1155, 11, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestComposeWithStreams(t *testing.T) { assert := asrt.New(t) container, _ := FindContainerByName(t.Name()) if container != nil { _ = RemoveContainer(container.ID, 20) } // Use the current actual web container for this, so replace in base docker-compose file composeBase := filepath.Join("testdata", "TestComposeWithStreams", "test-compose-with-streams.yaml") tmp, err := os.MkdirTemp("", "") assert.NoError(err) realComposeFile := filepath.Join(tmp, "replaced-compose-with-streams.yaml") err = fileutil.ReplaceStringInFile("TEST-COMPOSE-WITH-STREAMS-IMAGE", version.WebImg+":"+version.WebTag, composeBase, realComposeFile) assert.NoError(err) composeFiles := []string{realComposeFile} t.Cleanup(func() { _, _, err = ComposeCmd(composeFiles, "down") assert.NoError(err) }) _, _, err = ComposeCmd(composeFiles, "up", "-d") require.NoError(t, err) _, err = ContainerWait(30, map[string]string{"com.ddev.site-name": t.Name()}) if err != nil { logout, _ := exec.RunCommand("docker", []string{"logs", t.Name()}) inspectOut, _ := exec.RunCommandPipe("sh", []string{"-c", fmt.Sprintf("docker inspect %s|jq -r '.[0].State.Health.Log'", t.Name())}) t.Fatalf("FAIL: dockerutils_test failed to ContainerWait for container: %v, logs\n========= container logs ======\n%s\n======= end logs =======\n==== health log =====\ninspectOut\n%s\n========", err, logout, inspectOut) } // Point stdout to os.Stdout and do simple ps -ef in web container stdout := util.CaptureStdOut() err = ComposeWithStreams(composeFiles, os.Stdin, os.Stdout, os.Stderr, "exec", "-T", "web", "ps", "-ef") assert.NoError(err) output := stdout() assert.Contains(output, "supervisord") // Reverse stdout and stderr and create an error and normal stdout. We should see only the error captured in stdout stdout = util.CaptureStdOut() err = ComposeWithStreams(composeFiles, os.Stdin, os.Stderr, os.Stdout, "exec", "-T", "web", "ls", "-d", "xx", "/var/run/apache2") assert.Error(err) output = stdout() assert.Contains(output, "ls: cannot access 'xx': No such file or directory") // Flip stdout and stderr and create an error and normal stdout. We should see only the success captured in stdout stdout = util.CaptureStdOut() err = ComposeWithStreams(composeFiles, os.Stdin, os.Stdout, os.Stderr, "exec", "-T", "web", "ls", "-d", "xx", "/var/run/apache2") assert.Error(err) output = stdout() assert.Contains(output, "/var/run/apache2", output) }
explode_data.jsonl/41375
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 898 }
[ 2830, 3393, 70492, 2354, 73576, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 438, 3342, 7121, 1155, 692, 53290, 11, 716, 1669, 7379, 4502, 16898, 1155, 2967, 2398, 743, 5476, 961, 2092, 341, 197, 197, 62, 284, 10783, 4502, 28168, 9910, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReadLimitsType(t *testing.T) { for i, c := range []struct { bytes []byte exp *types.Limits }{ {bytes: []byte{0x00, 0xa}, exp: &types.Limits{Min: 10}}, {bytes: []byte{0x01, 0xa, 0xa}, exp: &types.Limits{Min: 10, Max: utils.Uint32Ptr(10)}}, } { t.Run(strconv.Itoa(i), func(t *testing.T) { actual, err := types.ReadLimits(bytes.NewReader(c.bytes)) if err != nil { t.Fail() } if !reflect.DeepEqual(c.exp, actual) { t.Fail() } }) } }
explode_data.jsonl/40315
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 234 }
[ 2830, 3393, 4418, 94588, 929, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 272, 1669, 2088, 3056, 1235, 341, 197, 70326, 3056, 3782, 198, 197, 48558, 256, 353, 9242, 1214, 22866, 198, 197, 59403, 197, 197, 90, 9651, 25, 3056, 3782, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestChannelArbitratorEmptyResolutions(t *testing.T) { // Start out with a log that will fail writing the set of resolutions. log := &mockArbitratorLog{ state: StateDefault, newStates: make(chan ArbitratorState, 5), failFetch: errNoResolutions, } chanArbCtx, err := createTestChannelArbitrator(t, log) if err != nil { t.Fatalf("unable to create ChannelArbitrator: %v", err) } chanArb := chanArbCtx.chanArb chanArb.cfg.IsPendingClose = true chanArb.cfg.ClosingHeight = 100 chanArb.cfg.CloseType = channeldb.RemoteForceClose if err := chanArb.Start(); err != nil { t.Fatalf("unable to start ChannelArbitrator: %v", err) } // It should not advance its state beyond StateContractClosed, since // fetching resolutions fails. chanArbCtx.AssertStateTransitions(StateContractClosed) // It should not advance further, however, as fetching resolutions // failed. time.Sleep(100 * time.Millisecond) if log.state != StateContractClosed { t.Fatalf("expected to stay in StateContractClosed") } chanArb.Stop() }
explode_data.jsonl/3700
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 363 }
[ 2830, 3393, 9629, 6953, 4489, 81, 850, 3522, 1061, 20201, 1155, 353, 8840, 836, 8, 341, 197, 322, 5145, 700, 448, 264, 1487, 429, 686, 3690, 4378, 279, 738, 315, 42495, 624, 6725, 1669, 609, 16712, 6953, 4489, 81, 850, 2201, 515, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestEventPayloadSerialization(t *testing.T) { max := MutableEventPayload{} max.SetEpoch(math.MaxUint32) max.SetSeq(idx.Event(math.MaxUint32)) max.SetLamport(idx.Lamport(math.MaxUint32)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) max.SetParents(hash.Events{hash.Event(h), hash.Event(h), hash.Event(h)}) max.SetTxHash(hash.Hash(h)) max.SetSig(BytesToSignature(bytes.Repeat([]byte{math.MaxUint8}, SigSize))) max.SetExtra(bytes.Repeat([]byte{math.MaxUint8}, 100)) max.SetCreationTime(math.MaxUint64) max.SetMedianTime(math.MaxUint64) tx1 := types.NewTx(&types.LegacyTx{ Nonce: math.MaxUint64, GasPrice: h.Big(), Gas: math.MaxUint64, To: nil, Value: h.Big(), Data: []byte{}, V: big.NewInt(0xff), R: h.Big(), S: h.Big(), }) tx2 := types.NewTx(&types.LegacyTx{ Nonce: math.MaxUint64, GasPrice: h.Big(), Gas: math.MaxUint64, To: &common.Address{}, Value: h.Big(), Data: max.extra, V: big.NewInt(0xff), R: h.Big(), S: h.Big(), }) txs := types.Transactions{} for i := 0; i < 200; i++ { txs = append(txs, tx1) txs = append(txs, tx2) } max.SetTxs(txs) ee := map[string]EventPayload{ "empty": emptyEvent(), "max": *max.Build(), "random": *FakeEvent(2), } t.Run("ok", func(t *testing.T) { require := require.New(t) for name, header0 := range ee { buf, err := rlp.EncodeToBytes(&header0) require.NoError(err) var header1 EventPayload err = rlp.DecodeBytes(buf, &header1) require.NoError(err, name) require.EqualValues(header0.extEventData, header1.extEventData, name) require.EqualValues(header0.sigData, header1.sigData, name) for i := range header0.payloadData.txs { require.EqualValues(header0.payloadData.txs[i].Hash(), header1.payloadData.txs[i].Hash(), name) } require.EqualValues(header0.baseEvent, header1.baseEvent, name) require.EqualValues(header0.ID(), header1.ID(), name) require.EqualValues(header0.HashToSign(), header1.HashToSign(), name) require.EqualValues(header0.Size(), header1.Size(), name) } }) t.Run("err", func(t *testing.T) { require := require.New(t) for name, header0 := range ee { bin, err := header0.MarshalBinary() require.NoError(err, name) n := rand.Intn(len(bin) - len(header0.Extra()) - 1) bin = bin[0:n] buf, err := rlp.EncodeToBytes(bin) require.NoError(err, name) var header1 Event err = rlp.DecodeBytes(buf, &header1) require.Error(err, name) } }) }
explode_data.jsonl/68977
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1182 }
[ 2830, 3393, 1556, 29683, 35865, 1155, 353, 8840, 836, 8, 341, 22543, 1669, 31143, 1556, 29683, 16094, 22543, 4202, 44338, 37270, 14535, 21570, 18, 17, 340, 22543, 4202, 20183, 19778, 6904, 37270, 14535, 21570, 18, 17, 1171, 22543, 4202, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestAccOrganisation_importBasic(t *testing.T) { parentOrganisationId := os.Getenv("FORM3_ORGANISATION_ID") organisationId := uuid.NewV4().String() resourceName := "form3_organisation.organisation" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckOrganisationDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: fmt.Sprintf(testForm3OrganisationConfigA, organisationId, parentOrganisationId), }, resource.TestStep{ ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, }, }) }
explode_data.jsonl/58663
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 250 }
[ 2830, 3393, 14603, 23227, 7923, 18434, 15944, 1155, 353, 8840, 836, 8, 1476, 24804, 23227, 7923, 764, 1669, 2643, 64883, 445, 10818, 18, 19834, 58487, 1637, 3495, 3450, 1138, 197, 57804, 764, 1669, 16040, 7121, 53, 19, 1005, 703, 2822, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEventsSuccess(t *testing.T) { sp := mockstorage.NewMockStoreProvider() k := newKMS(t, sp) ctx := &context{ kms: k, keyType: kms.ED25519Type, keyAgreementType: kms.X25519ECDHKWType, } svc, err := New(&protocol.MockProvider{ ServiceMap: map[string]interface{}{ mediator.Coordination: &mockroute.MockMediatorSvc{}, }, CustomKMS: k, KeyTypeValue: ctx.keyType, KeyAgreementTypeValue: ctx.keyAgreementType, }) require.NoError(t, err) actionCh := make(chan service.DIDCommAction, 10) err = svc.RegisterActionEvent(actionCh) require.NoError(t, err) go func() { service.AutoExecuteActionEvent(actionCh) }() statusCh := make(chan service.StateMsg, 10) err = svc.RegisterMsgEvent(statusCh) require.NoError(t, err) done := make(chan struct{}) go func() { for e := range statusCh { if e.Type == service.PostState && e.StateID == StateIDRequested { done <- struct{}{} } } }() pubKey, _ := newSigningAndEncryptionDIDKeys(t, ctx) id := randomString() invite, err := json.Marshal( &Invitation{ Type: InvitationMsgType, ID: id, Label: "test", RecipientKeys: []string{pubKey}, }, ) require.NoError(t, err) // send invite didMsg, err := service.ParseDIDCommMsgMap(invite) require.NoError(t, err) _, err = svc.HandleInbound(didMsg, service.EmptyDIDCommContext()) require.NoError(t, err) select { case <-done: case <-time.After(5 * time.Second): require.Fail(t, "tests are not validated") } }
explode_data.jsonl/30523
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 667 }
[ 2830, 3393, 7900, 7188, 1155, 353, 8840, 836, 8, 341, 41378, 1669, 7860, 16172, 7121, 11571, 6093, 5179, 741, 16463, 1669, 501, 42, 4826, 1155, 11, 978, 340, 20985, 1669, 609, 2147, 515, 197, 16463, 1011, 25, 1060, 595, 345, 197, 2363...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoad_WithEnvironment(t *testing.T) { // arrange var actualConfig complex altConfigString := ` { "stringValue": "Hello world 2", "intValue": 456, "floatValue": 456.78, "boolValue": false, "sliceValueStrings": [ "string4", "string5", "string6", "string7", "string8" ], "sliceValueFloats": [ 4.5, 5.6, 7.8, 8.9 ], "sliceValueInts": [ 4, 5 ], "sliceValueBools": [ true ], "objectValue": { "stringValue": "Hello world 2", "intValue": 456, "objectValue": { "stringValue": "Hello world 2", "intValue": 456 } }, "sliceValueObjects": [ { "stringValue": "Hello world 2", "intValue": 456 }, { "stringValue": "Hello world 2", "intValue": 456 }, { "stringValue": "Hello world 2", "intValue": 456 }, { "stringValue": "Hello world 2", "intValue": 456 } ] }` var expectedAltConfig complex err := json.Unmarshal([]byte(altConfigString), &expectedAltConfig) if err != nil { t.Fatal(err) } err = ioutil.WriteFile("complex.test.json", []byte(altConfigString), 0644) if err != nil { t.Fatal(err) } defer func() { err = os.Remove("complex.test.json") if err != nil { t.Fatal(err) } }() // act err = transfig.Load("complex.json", "test", &actualConfig) // assert if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expectedAltConfig, actualConfig) { t.Errorf("expected and actual config are different.\nExpected:\n%v\n\nActual:\n%v", expectedAltConfig, actualConfig) } }
explode_data.jsonl/44450
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 906 }
[ 2830, 3393, 5879, 62, 2354, 12723, 1155, 353, 8840, 836, 8, 341, 197, 322, 30893, 198, 2405, 5042, 2648, 6351, 271, 197, 3145, 2648, 703, 1669, 22074, 262, 341, 286, 330, 78883, 788, 330, 9707, 1879, 220, 17, 756, 286, 330, 46040, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTraitHierarchyDecode(t *testing.T) { env := createTestEnv(t, v1alpha1.IntegrationPlatformClusterOpenShift, "") env.Platform.Spec.Traits = make(map[string]v1alpha1.TraitSpec) env.Platform.Spec.Traits["knative-service"] = v1alpha1.TraitSpec{ Configuration: map[string]string{ "enabled": "false", "min-scale": "1", "max-scale": "10", "autoscaling-target": "15", }, } env.IntegrationKit.Spec.Traits = make(map[string]v1alpha1.TraitSpec) env.IntegrationKit.Spec.Traits["knative-service"] = v1alpha1.TraitSpec{ Configuration: map[string]string{ "enabled": "true", "min-scale": "5", }, } env.Integration.Spec.Traits = make(map[string]v1alpha1.TraitSpec) env.Integration.Spec.Traits["knative-service"] = v1alpha1.TraitSpec{ Configuration: map[string]string{ "max-scale": "20", }, } c := NewTraitTestCatalog() err := c.configure(env) assert.Nil(t, err) knt := c.GetTrait("knative-service") assert.NotNil(t, knt) kns, ok := knt.(*knativeServiceTrait) assert.True(t, ok) assert.NotNil(t, kns) assert.NotNil(t, kns.Enabled) assert.True(t, *kns.Enabled) assert.NotNil(t, kns.MinScale) assert.Equal(t, 5, *kns.MinScale) assert.NotNil(t, kns.MaxScale) assert.Equal(t, 20, *kns.MaxScale) assert.NotNil(t, kns.Target) assert.Equal(t, 15, *kns.Target) }
explode_data.jsonl/54528
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 604 }
[ 2830, 3393, 49257, 85264, 32564, 1155, 353, 8840, 836, 8, 341, 57538, 1669, 1855, 2271, 14359, 1155, 11, 348, 16, 7141, 16, 7371, 17376, 17296, 28678, 5002, 24841, 11, 85617, 57538, 51227, 36473, 836, 26287, 284, 1281, 9147, 14032, 60, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGrantSingleGroup(t *testing.T) { assert := assert.New(t) interceptor := stubs.NewInterceptor() interceptor.AddStub(&stubs.Stub{ Method: "GET", Path: fmt.Sprintf("/v2/auth/grant/sub-key/%s", pamConfig.SubscribeKey), Query: "channel-group=cg1&m=0&r=1&w=1&d=0", ResponseBody: `{"message":"Success","payload":{"level":"channel-group","subscribe_key":"sub-c-b9ab9508-43cf-11e8-9967-869954283fb4","ttl":1440,"channel-groups":{"cg1":{"r":1,"w":1,"m":0,"d":0}}},"service":"Access Manager","status":200}`, IgnoreQueryKeys: []string{"uuid", "pnsdk", "signature", "timestamp"}, ResponseStatusCode: 200, }) pn := pubnub.NewPubNub(pamConfigCopy()) pn.SetClient(interceptor.GetClient()) res, _, err := pn.Grant(). Read(true).Write(true). ChannelGroups([]string{"cg1"}). Execute() assert.Nil(err) assert.NotNil(res) assert.True(res.ChannelGroups["cg1"].WriteEnabled) assert.True(res.ChannelGroups["cg1"].ReadEnabled) assert.False(res.ChannelGroups["cg1"].ManageEnabled) }
explode_data.jsonl/43195
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 460 }
[ 2830, 3393, 67971, 10888, 2808, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 58915, 15349, 1669, 13633, 82, 7121, 32786, 741, 58915, 15349, 1904, 33838, 2099, 267, 15738, 7758, 392, 515, 197, 84589, 25, 1797, 330, 38...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInt64sHas(t *testing.T) { ints := []int64{2, 4, 5} assert.True(t, Int64sHas(ints, 2)) assert.True(t, Int64sHas(ints, 5)) assert.False(t, Int64sHas(ints, 3)) }
explode_data.jsonl/70781
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 81 }
[ 2830, 3393, 1072, 21, 19, 82, 10281, 1155, 353, 8840, 836, 8, 341, 2084, 82, 1669, 3056, 396, 21, 19, 90, 17, 11, 220, 19, 11, 220, 20, 532, 6948, 32443, 1155, 11, 1333, 21, 19, 82, 10281, 1548, 82, 11, 220, 17, 1171, 6948, 32...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRoutesCanBeMatchedByPath(t *testing.T) { trie := newRouteTrie() for _, routePair := range routePairs { trie.add(routePair.route) routes, _ := trie.search(routePair.path) assert.Equal(t, routePair.route, routes[0]) } }
explode_data.jsonl/42518
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 102 }
[ 2830, 3393, 26653, 69585, 8331, 97322, 1820, 1155, 353, 8840, 836, 8, 341, 197, 8927, 1669, 501, 4899, 51, 7231, 2822, 2023, 8358, 6021, 12443, 1669, 2088, 6021, 54228, 341, 197, 197, 8927, 1364, 31436, 12443, 11842, 692, 197, 7000, 549...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLessThanOrEqualToMatcherInt(t *testing.T) { logger := logging.NewLogger(&logging.LoggerOptions{}) attrName := "value" dto := &dtos.MatcherDTO{ MatcherType: "LESS_THAN_OR_EQUAL_TO", UnaryNumeric: &dtos.UnaryNumericMatcherDataDTO{ DataType: "NUMBER", Value: int64(100), }, KeySelector: &dtos.KeySelectorDTO{ Attribute: &attrName, }, } matcher, err := BuildMatcher(dto, nil, logger) if err != nil { t.Error("There should be no errors when building the matcher") t.Error(err) } matcherType := reflect.TypeOf(matcher).String() if matcherType != "*matchers.LessThanOrEqualToMatcher" { t.Errorf("Incorrect matcher constructed. Should be *matchers.LessThanOrEqualToMatcher and was %s", matcherType) } if !matcher.Match("asd", map[string]interface{}{"value": 100}, nil) { t.Error("Equal should match") } if matcher.Match("asd", map[string]interface{}{"value": 500}, nil) { t.Error("Greater should not match") } if !matcher.Match("asd", map[string]interface{}{"value": 50}, nil) { t.Error("Lower should match") } }
explode_data.jsonl/34808
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 429 }
[ 2830, 3393, 27451, 89387, 37554, 1072, 1155, 353, 8840, 836, 8, 341, 17060, 1669, 8392, 7121, 7395, 2099, 25263, 12750, 3798, 37790, 60943, 675, 1669, 330, 957, 698, 98864, 1669, 609, 8047, 436, 76452, 14923, 515, 197, 197, 37554, 929, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestStoreInitAndBootstrap(t *testing.T) { defer leaktest.AfterTest(t)() // We need a fixed clock to avoid LastUpdateNanos drifting on us. cfg := TestStoreConfig(hlc.NewClock(func() int64 { return 123 }, time.Nanosecond)) stopper := stop.NewStopper() ctx := context.TODO() defer stopper.Stop(ctx) eng := engine.NewInMem(roachpb.Attributes{}, 1<<20) stopper.AddCloser(eng) cfg.Transport = NewDummyRaftTransport(cfg.Settings) factory := &testSenderFactory{} cfg.DB = client.NewDB(cfg.AmbientCtx, factory, cfg.Clock) { store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) // Can't start as haven't bootstrapped. if err := store.Start(ctx, stopper); err == nil { t.Error("expected failure starting un-bootstrapped store") } // Bootstrap with a fake ident. if err := InitEngine(ctx, eng, testIdent, cfg.Settings.Version.BootstrapVersion()); err != nil { t.Errorf("error bootstrapping store: %s", err) } // Verify we can read the store ident after a flush. if err := eng.Flush(); err != nil { t.Fatal(err) } if _, err := ReadStoreIdent(ctx, eng); err != nil { t.Fatalf("unable to read store ident: %s", err) } // Bootstrap the system ranges. var splits []roachpb.RKey kvs, tableSplits := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig).GetInitialValues() splits = config.StaticSplits() splits = append(splits, tableSplits...) sort.Slice(splits, func(i, j int) bool { return splits[i].Less(splits[j]) }) if err := WriteInitialClusterData( ctx, eng, kvs /* initialValues */, cfg.Settings.Version.BootstrapVersion().Version, 1 /* numStores */, splits, cfg.Clock.PhysicalNow(), ); err != nil { t.Errorf("failure to create first range: %s", err) } } // Now, attempt to initialize a store with a now-bootstrapped range. store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) if err := store.Start(ctx, stopper); err != nil { t.Fatalf("failure initializing bootstrapped store: %s", err) } for i := 1; i <= store.ReplicaCount(); i++ { r, err := store.GetReplica(roachpb.RangeID(i)) if err != nil { t.Fatalf("failure fetching range %d: %s", i, err) } rs := r.GetMVCCStats() // Stats should agree with a recomputation. now := r.store.Clock().Now() if ms, err := rditer.ComputeStatsForRange(r.Desc(), eng, now.WallTime); err != nil { t.Errorf("failure computing range's stats: %s", err) } else if ms != rs { t.Errorf("expected range's stats to agree with recomputation: %s", pretty.Diff(ms, rs)) } } }
explode_data.jsonl/75743
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 972 }
[ 2830, 3393, 6093, 3803, 3036, 45511, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 197, 322, 1205, 1184, 264, 8356, 8866, 311, 5648, 7996, 4289, 45, 43605, 84253, 389, 601, 624, 50286, 1669, 3393, 6093, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFreeENI(t *testing.T) { ctrl, _, mockEC2 := setup(t) defer ctrl.Finish() attachmentID := eniAttachID attachment := &ec2.NetworkInterfaceAttachment{AttachmentId: &attachmentID} result := &ec2.DescribeNetworkInterfacesOutput{ NetworkInterfaces: []*ec2.NetworkInterface{{Attachment: attachment}}} mockEC2.EXPECT().DescribeNetworkInterfaces(gomock.Any()).Return(result, nil) mockEC2.EXPECT().DetachNetworkInterface(gomock.Any()).Return(nil, nil) mockEC2.EXPECT().DeleteNetworkInterface(gomock.Any()).Return(nil, nil) ins := &EC2InstanceMetadataCache{ec2SVC: mockEC2} err := ins.FreeENI("test-eni") assert.NoError(t, err) }
explode_data.jsonl/19294
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 234 }
[ 2830, 3393, 10940, 953, 40, 1155, 353, 8840, 836, 8, 341, 84381, 11, 8358, 7860, 7498, 17, 1669, 6505, 1155, 340, 16867, 23743, 991, 18176, 2822, 197, 21981, 915, 1669, 662, 72, 30485, 915, 198, 197, 21981, 1669, 609, 757, 17, 30149, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHttp_configsSettingHeaders(t *testing.T) { http := httpModForTests(nil) config := defaultConfig // Assign config vars config.SendHeaders = []string{"a", "b", "c"} // Set config http.setFromConfig(&config) // Check if http config is set correctly assert.True(t, http.parserConfig.sendHeaders) assert.Equal(t, len(config.SendHeaders), len(http.parserConfig.headersWhitelist)) for _, val := range http.parserConfig.headersWhitelist { assert.True(t, val) } }
explode_data.jsonl/16520
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 2905, 59150, 15400, 10574, 1155, 353, 8840, 836, 8, 341, 28080, 1669, 1758, 4459, 2461, 18200, 27907, 340, 25873, 1669, 1638, 2648, 271, 197, 322, 31639, 2193, 19942, 198, 25873, 20176, 10574, 284, 3056, 917, 4913, 64, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRegisterSubNamespaceTransactionSerialization(t *testing.T) { tx, err := NewRegisterSubNamespaceTransaction( fakeDeadline, "subnamespace", bigIntToNamespaceId(big.NewInt(4635294387305441662)), MijinTest, ) assert.Nilf(t, err, "NewRegisterSubNamespaceTransaction returned error: %s", err) b, err := tx.generateBytes() assert.Nilf(t, err, "RegisterNamespaceTransaction.generateBytes returned error: %s", err) assert.Equal(t, registerSubNamespaceTransactionSerializationCorr, b) }
explode_data.jsonl/69197
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 171 }
[ 2830, 3393, 8690, 3136, 22699, 8070, 35865, 1155, 353, 8840, 836, 8, 341, 46237, 11, 1848, 1669, 1532, 8690, 3136, 22699, 8070, 1006, 197, 1166, 726, 83593, 345, 197, 197, 1, 1966, 2231, 756, 197, 2233, 343, 1072, 1249, 22699, 764, 75...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadStore(t *testing.T) { db := dbm.NewMemDB() tree, _ := newAlohaTree(t, db) store := UnsafeNewStore(tree) // Create non-pruned height H require.True(t, tree.Set([]byte("hello"), []byte("hallo"))) hash, verH, err := tree.SaveVersion() cIDH := types.CommitID{Version: verH, Hash: hash} require.Nil(t, err) // Create pruned height Hp require.True(t, tree.Set([]byte("hello"), []byte("hola"))) hash, verHp, err := tree.SaveVersion() cIDHp := types.CommitID{Version: verHp, Hash: hash} require.Nil(t, err) // TODO: Prune this height // Create current height Hc require.True(t, tree.Set([]byte("hello"), []byte("ciao"))) hash, verHc, err := tree.SaveVersion() cIDHc := types.CommitID{Version: verHc, Hash: hash} require.Nil(t, err) // Querying an existing store at some previous non-pruned height H hStore, err := store.GetImmutable(verH) require.NoError(t, err) require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo") // Querying an existing store at some previous pruned height Hp hpStore, err := store.GetImmutable(verHp) require.NoError(t, err) require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola") // Querying an existing store at current height Hc hcStore, err := store.GetImmutable(verHc) require.NoError(t, err) require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") // Querying a new store at some previous non-pruned height H newHStore, err := LoadStore(db, cIDH, false) require.NoError(t, err) require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") // Querying a new store at some previous pruned height Hp newHpStore, err := LoadStore(db, cIDHp, false) require.NoError(t, err) require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") // Querying a new store at current height H newHcStore, err := LoadStore(db, cIDHc, false) require.NoError(t, err) require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") }
explode_data.jsonl/38057
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 741 }
[ 2830, 3393, 5879, 6093, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 2927, 76, 7121, 18816, 3506, 741, 51968, 11, 716, 1669, 501, 32, 385, 4223, 6533, 1155, 11, 2927, 340, 57279, 1669, 73067, 3564, 6093, 21298, 692, 197, 322, 4230, 2477...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBackupModule(t *testing.T) { // Random generate a string for naming resources uniqueID := strings.ToLower(random.UniqueId()) resourceName := fmt.Sprintf("test%s", uniqueID) terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{ // Path to where our Terraform code is TerraformDir: "../examples/vault", Upgrade: true, // Variables to pass using -var-file option Vars: map[string]interface{}{ "name_prefix": resourceName, }, }) // At the end of the test, run `terraform destroy` to clean up any resources that were created defer terraform.Destroy(t, terraformOptions) // Run `terraform init` and `terraform apply` and fail the test if there are any errors terraform.InitAndApply(t, terraformOptions) // Run `terraform output` to get the values of output variables backupPlanArn := terraform.Output(t, terraformOptions, "backup_plan_arn") backupVaultID := terraform.Output(t, terraformOptions, "backup_vault_id") // Verify we're getting back the outputs we expect assert.Contains(t, backupPlanArn, "arn:aws:backup:eu-west-1:") assert.Equal(t, backupVaultID, "test"+uniqueID+"-vault") }
explode_data.jsonl/6596
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 398 }
[ 2830, 3393, 56245, 3332, 1155, 353, 8840, 836, 8, 341, 197, 322, 10612, 6923, 264, 914, 369, 34948, 4963, 198, 197, 9587, 915, 1669, 9069, 29983, 25110, 87443, 764, 2398, 50346, 675, 1669, 8879, 17305, 445, 1944, 12952, 497, 4911, 915, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClient_getAccessToken(t *testing.T) { bhc := newClientMock() t.Run("token still valid", func(t *testing.T) { bhc.accessTokenAcquiredAt = time.Now() newToken, err := bhc.getAccessToken() assert.NoError(t, err) assert.Equal(t, bhc.accessToken, newToken) }) t.Run("token already expired", func(t *testing.T) { httpMock2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") io.WriteString(w, `{ "access_token": "kucing-oren", "token_type": "Bearer", "expires_in": 300 }`) })) defer httpMock2.Close() authBaseURL = httpMock2.URL // change for test bhc.accessTokenAcquiredAt = time.Now().Add(-60 * time.Minute) bhc.httpClient = httpMock2.Client() newToken, err := bhc.getAccessToken() assert.NoError(t, err) assert.Equal(t, "kucing-oren", bhc.accessToken) assert.Equal(t, bhc.accessToken, newToken) assert.True(t, time.Since(bhc.accessTokenAcquiredAt).Minutes() < 1) }) }
explode_data.jsonl/23624
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 445 }
[ 2830, 3393, 2959, 3062, 37649, 1155, 353, 8840, 836, 8, 341, 2233, 38052, 1669, 501, 2959, 11571, 741, 3244, 16708, 445, 5839, 2058, 2697, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 2233, 38052, 71792, 11654, 2931, 1655, 284, 882, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetLSB(t *testing.T) { tests := []struct { byte byte want bool }{ {byte: 0b00000000, want: false}, {byte: 0b00000001, want: true}, {byte: 0b11111111, want: true}, {byte: 0b11111110, want: false}, } for _, tt := range tests { name := fmt.Sprintf("%08b should be %t", tt.byte, tt.want) t.Run(name, func(t *testing.T) { got := GetLSB(tt.byte) assert.Equal(t, tt.want, got, "GetLSB() = %v, want %v", got, tt.want) }) } }
explode_data.jsonl/78674
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 214 }
[ 2830, 3393, 1949, 7268, 33, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 31422, 4922, 198, 197, 50780, 1807, 198, 197, 59403, 197, 197, 90, 3782, 25, 220, 15, 65, 15, 15, 15, 15, 15, 15, 15, 15, 11, 1366, 25,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGenerateName(t *testing.T) { tests := []struct { expected string spec v1alpha1.LighthouseJobSpec }{ { expected: "myorg-myrepo-", spec: v1alpha1.LighthouseJobSpec{ Refs: &v1alpha1.Refs{ Org: "myorg", Repo: "myrepo", }, }, }, { expected: "st-organsation-my-repo-", spec: v1alpha1.LighthouseJobSpec{ Refs: &v1alpha1.Refs{ Org: "1st.Organsation", Repo: "MY_REPO", }, }, }, { expected: "myorg-myrepo-main-", spec: v1alpha1.LighthouseJobSpec{ Refs: &v1alpha1.Refs{ Org: "myorg", Repo: "myrepo", BaseRef: "main", }, }, }, { expected: "myorg-myrepo-pr-123-", spec: v1alpha1.LighthouseJobSpec{ Refs: &v1alpha1.Refs{ Org: "myorg", Repo: "myrepo", Pulls: []v1alpha1.Pull{ { Number: 123, }, }, }, }, }, { expected: "repo-with-very-long-name-pr-123-", spec: v1alpha1.LighthouseJobSpec{ Refs: &v1alpha1.Refs{ Org: "organisation-with-long-name", Repo: "repo-with-very-long-name", Pulls: []v1alpha1.Pull{ { Number: 123, }, }, }, }, }, } for _, tc := range tests { spec := &tc.spec actual := GenerateName(spec) assert.Equal(t, tc.expected, actual, "for spec %#v", spec) } }
explode_data.jsonl/51121
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 736 }
[ 2830, 3393, 31115, 675, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 42400, 914, 198, 197, 98100, 257, 348, 16, 7141, 16, 1214, 57909, 12245, 8327, 198, 197, 59403, 197, 197, 515, 298, 42400, 25, 330, 2408, 1775, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDriftingClockChangeConfigs(t *testing.T) { configs := []*kronospb.DriftTimeConfig{ {DriftFactor: 1.2, Offset: 0}, {DriftFactor: 0.8, Offset: int64(time.Hour)}, {DriftFactor: 1.1, Offset: 0}, {DriftFactor: 0.9, Offset: int64(time.Minute)}, } dc := NewDriftingClock(configs[0].DriftFactor, time.Duration(configs[0].Offset)) mc := NewMonotonicClock() lastMonotonicTime := mc.Now() lastDriftedTime := dc.Now() for i := 0; i < len(configs); i++ { sleepDuration := 10 * time.Millisecond time.Sleep(sleepDuration) mt1 := mc.Now() dt1 := dc.Now() assert.InDelta( t, (float64(mt1-lastMonotonicTime))*configs[i].DriftFactor, float64(dt1-lastDriftedTime), float64(sleepDuration/100), ) if i == len(configs)-1 { break } dc.UpdateDriftConfig(configs[i+1]) time.Sleep(sleepDuration) mt2 := mc.Now() dt2 := dc.Now() assert.InDelta( t, (float64(mt2-mt1))*configs[i+1].DriftFactor+float64(configs[i+1].Offset), float64(dt2-dt1), float64(sleepDuration/100), ) lastMonotonicTime = mt2 lastDriftedTime = dt2 } }
explode_data.jsonl/71552
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 488 }
[ 2830, 3393, 8847, 17680, 26104, 4072, 84905, 1155, 353, 8840, 836, 8, 341, 25873, 82, 1669, 29838, 74, 2248, 4785, 65, 909, 41380, 1462, 2648, 515, 197, 197, 90, 8847, 2085, 20661, 25, 220, 16, 13, 17, 11, 16861, 25, 220, 15, 1583, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestHiddenMatcher(t *testing.T) { m := matcher.Hidden() for i, currCase := range []struct { path string want bool }{ {"foo/bar/regular", false}, {"foo/bar/.hidden", true}, {"foo/.bar/inHidden", true}, {"foo/.bar/inHidden", true}, } { got := m.Match(currCase.path) assert.Equal(t, currCase.want, got, "Case %d", i) } }
explode_data.jsonl/81369
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 147 }
[ 2830, 3393, 17506, 37554, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 36052, 64014, 2822, 2023, 600, 11, 9804, 4207, 1669, 2088, 3056, 1235, 341, 197, 26781, 914, 198, 197, 50780, 1807, 198, 197, 59403, 197, 197, 4913, 7975, 49513, 14, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLinearRegression(t *testing.T) { for _, test := range linearRegressionTests { alpha, beta := LinearRegression(test.x, test.y, test.weights, test.origin) var r float64 if test.origin { r = RNoughtSquared(test.x, test.y, test.weights, beta) } else { r = RSquared(test.x, test.y, test.weights, alpha, beta) ests := make([]float64, len(test.y)) for i, x := range test.x { ests[i] = alpha + beta*x } rvals := RSquaredFrom(ests, test.y, test.weights) if r != rvals { t.Errorf("%s: RSquared and RSquaredFrom mismatch: %v != %v", test.name, r, rvals) } } if !scalar.EqualWithinAbsOrRel(alpha, test.alpha, test.tol, test.tol) { t.Errorf("%s: unexpected alpha estimate: want:%v got:%v", test.name, test.alpha, alpha) } if !scalar.EqualWithinAbsOrRel(beta, test.beta, test.tol, test.tol) { t.Errorf("%s: unexpected beta estimate: want:%v got:%v", test.name, test.beta, beta) } if !scalar.EqualWithinAbsOrRel(r, test.r, test.tol, test.tol) { t.Errorf("%s: unexpected r estimate: want:%v got:%v", test.name, test.r, r) } } }
explode_data.jsonl/1771
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 479 }
[ 2830, 3393, 31898, 45200, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 1273, 1669, 2088, 13482, 45200, 18200, 341, 197, 73063, 11, 13440, 1669, 28263, 45200, 8623, 1993, 11, 1273, 2384, 11, 1273, 57618, 11, 1273, 20248, 340, 197, 2405, 435...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestIngressPathRouteWithoutHost(t *testing.T) { rh, c, done := setup(t) defer done() // add default/hello to translator. rh.OnAdd(&v1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "default"}, Spec: v1beta1.IngressSpec{ Rules: []v1beta1.IngressRule{{ IngressRuleValue: v1beta1.IngressRuleValue{ HTTP: &v1beta1.HTTPIngressRuleValue{ Paths: []v1beta1.HTTPIngressPath{{ Path: "/hello", Backend: v1beta1.IngressBackend{ ServiceName: "hello", ServicePort: intstr.FromInt(80), }, }}, }, }, }}, }, }) s1 := fixture.NewService("hello"). WithPorts(v1.ServicePort{Name: "http", Port: 80, TargetPort: intstr.FromInt(8080)}) rh.OnAdd(s1) // check that it's been translated correctly. c.Request(routeType).Equals(&v2.DiscoveryResponse{ VersionInfo: "2", Resources: routeResources(t, envoy.RouteConfiguration("ingress_http", envoy.VirtualHost("*", &envoy_api_v2_route.Route{ Match: routePrefix("/hello"), Action: routecluster("default/hello/80/da39a3ee5e"), }, ), ), ), TypeUrl: routeType, Nonce: "2", }) }
explode_data.jsonl/24104
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 531 }
[ 2830, 3393, 641, 2483, 1820, 4899, 26040, 9296, 1155, 353, 8840, 836, 8, 341, 7000, 71, 11, 272, 11, 2814, 1669, 6505, 1155, 340, 16867, 2814, 2822, 197, 322, 912, 1638, 7530, 4791, 311, 45488, 624, 7000, 71, 8071, 2212, 2099, 85, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQueueIsFullReplaceFirstElement(t *testing.T) { policy := newRetryPolicy() policy.SetMaxSize(0) bytes := []byte("test") policy.SetMaxSize(size(bytes) + 1) // send will fail and must be put in queue policy.ingest(&retryPolicyRequest{ data: bytes, ingestionURL: "", httpRequest: func(data []byte, ingestionURL string) error { return errors.New("some connection problem") }, callback: func(err error) {}, }) assert.Equal(t, len(policy.queue), 1) // send will fail and must replace the first element policy.ingest(&retryPolicyRequest{ data: []byte("tes"), ingestionURL: "", httpRequest: func(data []byte, ingestionURL string) error { return errors.New("some connection problem") }, callback: func(err error) {}, }) assert.Equal(t, len(policy.queue), 1) assert.Equal(t, policy.queue[0].data, []byte("tes")) }
explode_data.jsonl/40703
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 319 }
[ 2830, 3393, 7554, 3872, 9432, 23107, 5338, 1691, 1155, 353, 8840, 836, 8, 341, 3223, 8018, 1669, 501, 51560, 13825, 741, 3223, 8018, 4202, 5974, 1695, 7, 15, 692, 70326, 1669, 3056, 3782, 445, 1944, 1138, 3223, 8018, 4202, 5974, 1695, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBadNodes(t *testing.T) { const src = "package p\n(" const res = "package p\nBadDecl\n" f, err := parser.ParseFile(fset, "", src, parser.ParseComments) if err == nil { t.Error("expected illegal program") // error in test } var buf bytes.Buffer Fprint(&buf, fset, f) if buf.String() != res { t.Errorf("got %q, expected %q", buf.String(), res) } }
explode_data.jsonl/64597
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 145 }
[ 2830, 3393, 17082, 12288, 1155, 353, 8840, 836, 8, 341, 4777, 2286, 284, 330, 1722, 281, 1699, 70576, 4777, 592, 284, 330, 1722, 281, 1699, 17082, 21629, 1699, 698, 1166, 11, 1848, 1669, 6729, 8937, 1703, 955, 746, 11, 7342, 2286, 11,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestConsume(t *testing.T) { c, clock := newTestCache() { // Insert & consume. req := nextRequest() tok, err := c.Insert(req) require.NoError(t, err) assertCacheSize(t, c, 1) cachedReq, ok := c.Consume(tok) assert.True(t, ok) assert.Equal(t, req, cachedReq) assertCacheSize(t, c, 0) } { // Insert & consume out of order req1 := nextRequest() tok1, err := c.Insert(req1) require.NoError(t, err) assertCacheSize(t, c, 1) req2 := nextRequest() tok2, err := c.Insert(req2) require.NoError(t, err) assertCacheSize(t, c, 2) cachedReq2, ok := c.Consume(tok2) assert.True(t, ok) assert.Equal(t, req2, cachedReq2) assertCacheSize(t, c, 1) cachedReq1, ok := c.Consume(tok1) assert.True(t, ok) assert.Equal(t, req1, cachedReq1) assertCacheSize(t, c, 0) } { // Consume a second time req := nextRequest() tok, err := c.Insert(req) require.NoError(t, err) assertCacheSize(t, c, 1) cachedReq, ok := c.Consume(tok) assert.True(t, ok) assert.Equal(t, req, cachedReq) assertCacheSize(t, c, 0) _, ok = c.Consume(tok) assert.False(t, ok) assertCacheSize(t, c, 0) } { // Consume without insert _, ok := c.Consume("fooBAR") assert.False(t, ok) assertCacheSize(t, c, 0) } { // Consume expired tok, err := c.Insert(nextRequest()) require.NoError(t, err) assertCacheSize(t, c, 1) clock.Step(2 * cacheTTL) _, ok := c.Consume(tok) assert.False(t, ok) assertCacheSize(t, c, 0) } }
explode_data.jsonl/16796
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 686 }
[ 2830, 3393, 1109, 31323, 1155, 353, 8840, 836, 8, 341, 1444, 11, 8866, 1669, 501, 2271, 8233, 2822, 197, 90, 442, 17101, 609, 24057, 624, 197, 24395, 1669, 1790, 1900, 741, 197, 3244, 562, 11, 1848, 1669, 272, 23142, 6881, 340, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMaxNumberOfRoles_Is(t *testing.T) { mnr := &callbacks.MaxNumberOfRoles{} if errors.Is(nil, &callbacks.MaxNumberOfRoles{}) { t.Error(invalidErrorAssertion) } if errors.Is(fmt.Errorf(wrapMsg), &callbacks.MaxNumberOfRoles{}) { t.Error(invalidErrorAssertion) } if !errors.Is(mnr, &callbacks.MaxNumberOfRoles{}) { t.Errorf(invalidErrorAssertion) } }
explode_data.jsonl/56095
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 159 }
[ 2830, 3393, 5974, 40619, 25116, 31879, 1155, 353, 8840, 836, 8, 341, 2109, 19618, 1669, 609, 68311, 14535, 40619, 25116, 31483, 743, 5975, 4506, 27907, 11, 609, 68311, 14535, 40619, 25116, 28875, 341, 197, 3244, 6141, 5900, 1891, 1454, 68...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDb_PutDeleteGet(t *testing.T) { trun(t, func(h *dbHarness) { h.put("foo", "v1") h.getVal("foo", "v1") h.put("foo", "v2") h.getVal("foo", "v2") h.delete("foo") h.get("foo", false) h.reopenDB() h.get("foo", false) }) }
explode_data.jsonl/6003
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 130 }
[ 2830, 3393, 7994, 1088, 332, 6435, 1949, 1155, 353, 8840, 836, 8, 341, 25583, 359, 1155, 11, 2915, 3203, 353, 1999, 74248, 8, 341, 197, 9598, 3597, 445, 7975, 497, 330, 85, 16, 1138, 197, 9598, 670, 2208, 445, 7975, 497, 330, 85, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMinimalRuntimeConfig(t *testing.T) { dir, err := ioutil.TempDir(testDir, "minimal-runtime-config-") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) shimPath := path.Join(dir, "shim") proxyPath := path.Join(dir, "proxy") runtimeMinimalConfig := ` # Clear Containers runtime configuration file [proxy.cc] path = "` + proxyPath + `" [shim.cc] path = "` + shimPath + `" [agent.cc] ` configPath := path.Join(dir, "runtime.toml") err = createConfig(configPath, runtimeMinimalConfig) if err != nil { t.Fatal(err) } _, config, err := loadConfiguration(configPath, false) if err == nil { t.Fatalf("Expected loadConfiguration to fail as shim path does not exist: %+v", config) } err = createEmptyFile(shimPath) if err != nil { t.Error(err) } err = createEmptyFile(proxyPath) if err != nil { t.Error(err) } _, config, err = loadConfiguration(configPath, false) if err != nil { t.Fatal(err) } expectedHypervisorConfig := vc.HypervisorConfig{ HypervisorPath: defaultHypervisorPath, KernelPath: defaultKernelPath, ImagePath: defaultImagePath, HypervisorMachineType: defaultMachineType, DefaultVCPUs: defaultVCPUCount, DefaultMemSz: defaultMemSize, DisableBlockDeviceUse: defaultDisableBlockDeviceUse, DefaultBridges: defaultBridgesCount, Mlock: !defaultEnableSwap, BlockDeviceDriver: defaultBlockDeviceDriver, } expectedAgentConfig := vc.HyperConfig{} expectedProxyConfig := vc.ProxyConfig{ Path: proxyPath, } expectedShimConfig := vc.ShimConfig{ Path: shimPath, } expectedConfig := oci.RuntimeConfig{ HypervisorType: defaultHypervisor, HypervisorConfig: expectedHypervisorConfig, AgentType: defaultAgent, AgentConfig: expectedAgentConfig, ProxyType: defaultProxy, ProxyConfig: expectedProxyConfig, ShimType: defaultShim, ShimConfig: expectedShimConfig, } if reflect.DeepEqual(config, expectedConfig) == false { t.Fatalf("Got %v\n expecting %v", config, expectedConfig) } if err := os.Remove(configPath); err != nil { t.Fatal(err) } }
explode_data.jsonl/5123
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 823 }
[ 2830, 3393, 88328, 15123, 2648, 1155, 353, 8840, 836, 8, 341, 48532, 11, 1848, 1669, 43144, 65009, 6184, 8623, 6184, 11, 330, 92607, 68912, 25130, 12, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 16867, 2643, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestListServices(t *testing.T) { testhelper.SetupHTTP() defer testhelper.TeardownHTTP() HandleListSuccessfully(t) pages := 0 err := services.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { pages++ actual, err := services.ExtractServices(page) if err != nil { return false, err } if len(actual) != 4 { t.Fatalf("Expected 4 services, got %d", len(actual)) } testhelper.CheckDeepEquals(t, FirstFakeService, actual[0]) testhelper.CheckDeepEquals(t, SecondFakeService, actual[1]) testhelper.CheckDeepEquals(t, ThirdFakeService, actual[2]) testhelper.CheckDeepEquals(t, FourthFakeService, actual[3]) return true, nil }) testhelper.AssertNoErr(t, err) if pages != 1 { t.Errorf("Expected 1 page, saw %d", pages) } }
explode_data.jsonl/31973
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 301 }
[ 2830, 3393, 852, 11025, 1155, 353, 8840, 836, 8, 341, 18185, 18764, 39820, 9230, 741, 16867, 1273, 18764, 94849, 37496, 9230, 741, 197, 6999, 852, 35959, 1155, 692, 3223, 1134, 1669, 220, 15, 198, 9859, 1669, 3516, 5814, 12805, 13860, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestLoadDefaultConfig(t *testing.T) { testDir, err := os.Getwd() require.NoError(t, err) var cfg *Config t.Run("will find closest match", func(t *testing.T) { err = os.Chdir(filepath.Join(testDir, "testdata", "cfg", "subdir")) require.NoError(t, err) cfg, err = LoadConfigFromDefaultLocations() require.NoError(t, err) require.Equal(t, StringList{"inner"}, cfg.SchemaFilename) }) t.Run("will find config in parent dirs", func(t *testing.T) { err = os.Chdir(filepath.Join(testDir, "testdata", "cfg", "otherdir")) require.NoError(t, err) cfg, err = LoadConfigFromDefaultLocations() require.NoError(t, err) require.Equal(t, StringList{"outer"}, cfg.SchemaFilename) }) t.Run("will return error if config doesn't exist", func(t *testing.T) { err = os.Chdir(testDir) require.NoError(t, err) cfg, err = LoadConfigFromDefaultLocations() require.True(t, os.IsNotExist(err)) }) }
explode_data.jsonl/38430
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 356 }
[ 2830, 3393, 5879, 3675, 2648, 1155, 353, 8840, 836, 8, 341, 18185, 6184, 11, 1848, 1669, 2643, 2234, 6377, 741, 17957, 35699, 1155, 11, 1848, 340, 2405, 13286, 353, 2648, 271, 3244, 16708, 445, 14387, 1477, 18093, 2432, 497, 2915, 1155,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExecInContainer(t *testing.T) { testKubelet := newTestKubelet(t) kubelet := testKubelet.kubelet fakeRuntime := testKubelet.fakeRuntime fakeCommandRunner := fakeContainerCommandRunner{} kubelet.runner = &fakeCommandRunner podName := "podFoo" podNamespace := "nsFoo" containerID := "containerFoo" command := []string{"ls"} stdin := &bytes.Buffer{} stdout := &fakeReadWriteCloser{} stderr := &fakeReadWriteCloser{} tty := true fakeRuntime.PodList = []*kubecontainer.Pod{ { ID: "12345678", Name: podName, Namespace: podNamespace, Containers: []*kubecontainer.Container{ {Name: containerID, ID: kubecontainer.ContainerID{"test", containerID}, }, }, }, } err := kubelet.ExecInContainer( kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: podName, Namespace: podNamespace, }}), "", containerID, []string{"ls"}, stdin, stdout, stderr, tty, ) if err != nil { t.Fatalf("unexpected error: %s", err) } if e, a := containerID, fakeCommandRunner.ID.ID; e != a { t.Fatalf("container name: expected %q, got %q", e, a) } if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) { t.Fatalf("command: expected '%v', got '%v'", e, a) } if e, a := stdin, fakeCommandRunner.Stdin; e != a { t.Fatalf("stdin: expected %#v, got %#v", e, a) } if e, a := stdout, fakeCommandRunner.Stdout; e != a { t.Fatalf("stdout: expected %#v, got %#v", e, a) } if e, a := stderr, fakeCommandRunner.Stderr; e != a { t.Fatalf("stderr: expected %#v, got %#v", e, a) } if e, a := tty, fakeCommandRunner.TTY; e != a { t.Fatalf("tty: expected %t, got %t", e, a) } }
explode_data.jsonl/43321
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 754 }
[ 2830, 3393, 10216, 641, 4502, 1155, 353, 8840, 836, 8, 341, 18185, 42, 3760, 1149, 1669, 501, 2271, 42, 3760, 1149, 1155, 340, 16463, 3760, 1149, 1669, 1273, 42, 3760, 1149, 5202, 3760, 1149, 198, 1166, 726, 15123, 1669, 1273, 42, 376...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestTypeSystem_UnionTypesMustBeResolvable_AcceptsAUnionOfObjectTypesDefiningIsTypeOf(t *testing.T) { _, err := schemaWithFieldType(graphql.NewUnion(graphql.UnionConfig{ Name: "SomeUnion", Types: []*graphql.Object{objectWithIsTypeOf}, })) if err != nil { t.Fatalf("unexpected error: %v", err) } }
explode_data.jsonl/79164
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 929, 2320, 62, 32658, 4173, 31776, 3430, 1061, 88097, 1566, 66, 57771, 32, 32658, 2124, 1190, 4173, 2620, 5740, 3872, 929, 2124, 1155, 353, 8840, 836, 8, 1476, 197, 6878, 1848, 1669, 10802, 2354, 63733, 24312, 1470, 7121, 32...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPingWireErrors(t *testing.T) { pver := wire.ProtocolVersion tests := []struct { in *wire.MsgPing // Value to encode buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ // Latest protocol version with intentional read/write errors. { &wire.MsgPing{Nonce: 123123}, // 0x1e0f3 []byte{0xf3, 0xe0, 0x01, 0x00}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. w := newFixedWriter(test.max) err := test.in.BtcEncode(w, test.pver) if err != test.writeErr { t.Errorf("BtcEncode #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Decode from wire format. var msg wire.MsgPing r := newFixedReader(test.max, test.buf) err = msg.BtcDecode(r, test.pver) if err != test.readErr { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } }
explode_data.jsonl/1147
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 535 }
[ 2830, 3393, 69883, 37845, 13877, 1155, 353, 8840, 836, 8, 341, 3223, 423, 1669, 9067, 54096, 5637, 271, 78216, 1669, 3056, 1235, 341, 197, 17430, 981, 353, 35531, 30365, 69883, 442, 5162, 311, 16164, 198, 197, 26398, 414, 3056, 3782, 28...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestIssue239(t *testing.T) { withTestProcess("is sue239", t, func(p *proc.Target, fixture protest.Fixture) { setFileBreakpoint(p, t, fixture.Source, 17) assertNoError(p.Continue(), t, "Continue()") }) }
explode_data.jsonl/56224
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 79 }
[ 2830, 3393, 42006, 17, 18, 24, 1155, 353, 8840, 836, 8, 341, 46948, 2271, 7423, 445, 285, 33772, 17, 18, 24, 497, 259, 11, 2915, 1295, 353, 15782, 35016, 11, 12507, 8665, 991, 12735, 8, 341, 197, 8196, 1703, 22524, 2768, 1295, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildReqVipIn(t *testing.T) { buildC, err := Build("req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") if err != nil { t.Errorf("build failed, req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } req.Session.Vip = net.IPv4(1, 1, 1, 1).To4() if !buildC.Match(&req) { t.Errorf("1.1.1.1 not match req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } req.Session.Vip = net.ParseIP("2001:0DB8:02de:0::e13") if !buildC.Match(&req) { t.Errorf("2001:DB8:2de::e13 not match req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } req.Session.Vip = net.ParseIP("2002:0DB8:02de:0::e13") if buildC.Match(&req) { t.Errorf("2002:DB8:2de::e13 not match req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } }
explode_data.jsonl/11123
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 378 }
[ 2830, 3393, 11066, 27234, 53, 573, 641, 1155, 353, 8840, 836, 8, 341, 69371, 34, 11, 1848, 1669, 7854, 445, 2958, 2273, 573, 1243, 36014, 16, 13, 16, 13, 16, 13, 16, 91, 17, 15, 15, 16, 25, 3506, 23, 25, 17, 450, 486, 68, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestAPIRemovedInNextReleaseInUse(t *testing.T) { kubeConfig, err := test.NewClientConfigForTest() require.NoError(t, err) // get current major.minor version discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig) require.NoError(t, err) version, err := discoveryClient.ServerVersion() require.NoError(t, err) currentMajor, err := strconv.Atoi(version.Major) require.NoError(t, err) currentMinor, err := strconv.Atoi(regexp.MustCompile(`^\d*`).FindString(version.Minor)) // get deprecated major.minor version from alert expression // NOTE: the alert major and minor version is hardcoded // this test will fail in each version bump until the alert is updated // xref: https://github.com/openshift/cluster-kube-apiserver-operator/blob/master/bindata/assets/alerts/api-usage.yaml monitoringClient, err := monitoringclient.NewForConfig(kubeConfig) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() rule, err := monitoringClient.MonitoringV1().PrometheusRules("openshift-kube-apiserver").Get(ctx, "api-usage", v1.GetOptions{}) require.NoError(t, err) expr := func() string { for _, group := range rule.Spec.Groups { for _, rule := range group.Rules { if rule.Alert == "APIRemovedInNextReleaseInUse" { return strings.TrimSpace(rule.Expr.StrVal) } } } return "" }() require.NotEmpty(t, expr, "Unable to find the alert expression.") removedRelease := strings.Split(regexp.MustCompile(`.*removed_release="(\d+\.\d+)".*`).ReplaceAllString(expr, "$1"), ".") require.Len(t, removedRelease, 2, "Unable to parse the removed release version from the alert expression.") major, err := strconv.Atoi(removedRelease[0]) require.NoError(t, err) minor, err := strconv.Atoi(removedRelease[1]) require.NoError(t, err) // rewrite this test if the major version ever changes require.Equal(t, currentMajor, major) require.Equal(t, currentMinor+1, minor) }
explode_data.jsonl/70331
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 682 }
[ 2830, 3393, 7082, 42642, 641, 5847, 16077, 641, 10253, 1155, 353, 8840, 836, 8, 341, 16463, 3760, 2648, 11, 1848, 1669, 1273, 7121, 2959, 2648, 2461, 2271, 741, 17957, 35699, 1155, 11, 1848, 692, 197, 322, 633, 1482, 3598, 4358, 269, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestSummaryKeysGeneric(t *testing.T) { measurement := &model.Measurement{TestKeys: &tlstool.TestKeys{}} m := &tlstool.Measurer{} osk, err := m.GetSummaryKeys(measurement) if err != nil { t.Fatal(err) } sk := osk.(tlstool.SummaryKeys) if sk.IsAnomaly { t.Fatal("invalid isAnomaly") } }
explode_data.jsonl/4140
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 19237, 8850, 19964, 1155, 353, 8840, 836, 8, 341, 49294, 24359, 1669, 609, 2528, 53447, 24359, 90, 2271, 8850, 25, 609, 11544, 267, 1749, 8787, 8850, 6257, 532, 2109, 1669, 609, 11544, 267, 1749, 53447, 56294, 16094, 25078, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_PostDataFail(t *testing.T) { chain, mock33 := createBlockChain(t) subscribe := new(types.PushSubscribeReq) subscribe.Name = "push-test" subscribe.URL = "http://localhost" subscribe.Type = PushBlock err := chain.push.addSubscriber(subscribe) time.Sleep(2 * time.Second) assert.Equal(t, err, nil) createBlocks(t, mock33, chain, 10) keyStr := string(calcPushKey(subscribe.Name)) pushNotify := chain.push.tasks[keyStr] assert.Equal(t, pushNotify.subscribe.Name, subscribe.Name) assert.Equal(t, pushNotify.status, running) err = chain.push.postService.PostData(subscribe, []byte("1"), 1) assert.NotEqual(t, nil, err) mock33.Close() }
explode_data.jsonl/61723
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 249 }
[ 2830, 3393, 66726, 1043, 19524, 1155, 353, 8840, 836, 8, 341, 197, 8819, 11, 7860, 18, 18, 1669, 1855, 4713, 18837, 1155, 692, 28624, 6273, 1669, 501, 52613, 34981, 28573, 27234, 340, 28624, 6273, 2967, 284, 330, 9077, 16839, 698, 28624...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProtoMatching(t *testing.T) { tcp1, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234") tcp2, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/2345") tcp3, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/4567") utp, _ := ma.NewMultiaddr("/ip4/1.2.3.4/udp/1234/utp") if !identify.HasConsistentTransport(tcp1, []ma.Multiaddr{tcp2, tcp3, utp}) { t.Fatal("expected match") } if identify.HasConsistentTransport(utp, []ma.Multiaddr{tcp2, tcp3}) { t.Fatal("expected mismatch") } }
explode_data.jsonl/59578
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 236 }
[ 2830, 3393, 31549, 64430, 1155, 353, 8840, 836, 8, 341, 3244, 4672, 16, 11, 716, 1669, 7491, 7121, 20358, 6214, 4283, 573, 19, 14, 16, 13, 17, 13, 18, 13, 19, 95958, 14, 16, 17, 18, 19, 1138, 3244, 4672, 17, 11, 716, 1669, 7491,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_uploadRequest_doUpload(t *testing.T) { type fields struct { MediaMetadata *types.MediaMetadata Logger *log.Entry } type args struct { ctx context.Context reqReader io.Reader cfg *config.MediaAPI db storage.Database activeThumbnailGeneration *types.ActiveThumbnailGeneration } wd, err := os.Getwd() if err != nil { t.Errorf("failed to get current working directory: %v", err) } maxSize := config.FileSizeBytes(8) logger := log.New().WithField("mediaapi", "test") testdataPath := filepath.Join(wd, "./testdata") cfg := &config.MediaAPI{ MaxFileSizeBytes: &maxSize, BasePath: config.Path(testdataPath), AbsBasePath: config.Path(testdataPath), DynamicThumbnails: false, } // create testdata folder and remove when done _ = os.Mkdir(testdataPath, os.ModePerm) defer fileutils.RemoveDir(types.Path(testdataPath), nil) db, err := storage.Open(&config.DatabaseOptions{ ConnectionString: "file::memory:?cache=shared", MaxOpenConnections: 100, MaxIdleConnections: 2, ConnMaxLifetimeSeconds: -1, }) if err != nil { t.Errorf("error opening mediaapi database: %v", err) } tests := []struct { name string fields fields args args want *util.JSONResponse }{ { name: "upload ok", args: args{ ctx: context.Background(), reqReader: strings.NewReader("test"), cfg: cfg, db: db, }, fields: fields{ Logger: logger, MediaMetadata: &types.MediaMetadata{ MediaID: "1337", UploadName: "test ok", }, }, want: nil, }, { name: "upload ok (exact size)", args: args{ ctx: context.Background(), reqReader: strings.NewReader("testtest"), cfg: cfg, db: db, }, fields: fields{ Logger: logger, MediaMetadata: &types.MediaMetadata{ MediaID: "1338", UploadName: "test ok (exact size)", }, }, want: nil, }, { name: "upload not ok", args: args{ ctx: context.Background(), reqReader: strings.NewReader("test test test"), cfg: cfg, db: db, }, fields: fields{ Logger: logger, MediaMetadata: &types.MediaMetadata{ MediaID: "1339", UploadName: "test fail", }, }, want: requestEntityTooLargeJSONResponse(maxSize), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &uploadRequest{ MediaMetadata: tt.fields.MediaMetadata, Logger: tt.fields.Logger, } if got := r.doUpload(tt.args.ctx, tt.args.reqReader, tt.args.cfg, tt.args.db, tt.args.activeThumbnailGeneration); !reflect.DeepEqual(got, tt.want) { t.Errorf("doUpload() = %+v, want %+v", got, tt.want) } }) } }
explode_data.jsonl/76865
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1312 }
[ 2830, 3393, 21691, 1900, 26309, 13844, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 9209, 4495, 14610, 353, 9242, 19768, 14610, 198, 197, 55861, 286, 353, 839, 22330, 198, 197, 532, 13158, 2827, 2036, 341, 197, 20985, 5180...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestChecksum(t *testing.T) { if usr, _ := user.Current(); usr == nil || usr.Username == "root" { t.Skip("test assumes not root") } _, file, _, _ := runtime.Caller(1) for name, hash := range map[string]crypto.Hash{ "MD5": crypto.MD5, "SHA512": crypto.SHA512, "SHA1": crypto.SHA1, "SHA256": crypto.SHA256, } { t.Run(name, func(t *testing.T) { t.Run("NoFile", func(t *testing.T) { out, err := checksum(hash.New(), "") assert.Error(t, err) assert.Zero(t, out) }) t.Run("FileIsNotReadable", func(t *testing.T) { out, err := checksum(hash.New(), "/root/.bashrc") assert.Error(t, err) assert.Zero(t, out) }) t.Run("FileIsDirectory", func(t *testing.T) { out, err := checksum(hash.New(), filepath.Dir(file)) assert.Error(t, err) assert.Zero(t, out) }) t.Run("FileExists", func(t *testing.T) { out, err := checksum(hash.New(), file) assert.NoError(t, err) assert.NotZero(t, out) }) }) } t.Run("NilHash", func(t *testing.T) { assert.Panics(t, func() { out, err := checksum(nil, file) assert.Error(t, err) assert.Zero(t, out) }) }) t.Run("ChecksumFrontends", func(t *testing.T) { out, err := md5sum(file) assert.NoError(t, err) assert.NotZero(t, out) out, err = sha1sum(file) assert.NoError(t, err) assert.NotZero(t, out) }) }
explode_data.jsonl/6760
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 642 }
[ 2830, 3393, 73190, 1155, 353, 8840, 836, 8, 341, 743, 43071, 11, 716, 1669, 1196, 11517, 2129, 43071, 621, 2092, 1369, 43071, 42777, 621, 330, 2888, 1, 341, 197, 3244, 57776, 445, 1944, 21484, 537, 3704, 1138, 197, 532, 197, 6878, 103...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStepPrepareTools_nonExist(t *testing.T) { state := testState(t) step := &StepPrepareTools{ RemoteType: "", ToolsUploadFlavor: "foo", } driver := state.Get("driver").(*DriverMock) // Mock results driver.ToolsIsoPathResult = "foo" // Test the run if action := step.Run(state); action != multistep.ActionHalt { t.Fatalf("bad action: %#v", action) } if _, ok := state.GetOk("error"); !ok { t.Fatal("should have error") } // Test the driver if !driver.ToolsIsoPathCalled { t.Fatal("tools iso path should be called") } if driver.ToolsIsoPathFlavor != "foo" { t.Fatalf("bad: %#v", driver.ToolsIsoPathFlavor) } // Test the resulting state if _, ok := state.GetOk("tools_upload_source"); ok { t.Fatal("should NOT have tools_upload_source") } }
explode_data.jsonl/77418
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 306 }
[ 2830, 3393, 8304, 50590, 16583, 21637, 25613, 1155, 353, 8840, 836, 8, 341, 24291, 1669, 1273, 1397, 1155, 340, 77093, 1669, 609, 8304, 50590, 16583, 515, 197, 197, 24703, 929, 25, 286, 8324, 197, 197, 16583, 13844, 3882, 3292, 25, 330,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestVerifyChallengeTxSigners_validIgnorePreauthTxHashAndXHash(t *testing.T) { serverKP := newKeypair0() clientKP := newKeypair1() clientKP2 := newKeypair2() preauthTxHash := "TAQCSRX2RIDJNHFIFHWD63X7D7D6TRT5Y2S6E3TEMXTG5W3OECHZ2OG4" xHash := "XDRPF6NZRR7EEVO7ESIWUDXHAOMM2QSKIQQBJK6I2FB7YKDZES5UCLWD" unknownSignerType := "?ARPF6NZRR7EEVO7ESIWUDXHAOMM2QSKIQQBJK6I2FB7YKDZES5UCLWD" txSource := NewSimpleAccount(serverKP.Address(), -1) op := ManageData{ SourceAccount: clientKP.Address(), Name: "testanchor.stellar.org auth", Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), } webAuthDomainOp := ManageData{ SourceAccount: serverKP.Address(), Name: "web_auth_domain", Value: []byte("testwebauth.stellar.org"), } tx64, err := newSignedTransaction( TransactionParams{ SourceAccount: &txSource, IncrementSequenceNum: true, Operations: []Operation{&op, &webAuthDomainOp}, BaseFee: MinBaseFee, Timebounds: NewTimeout(1000), }, network.TestNetworkPassphrase, serverKP, clientKP2, ) assert.NoError(t, err) signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP2.Address(), preauthTxHash, xHash, unknownSignerType) assert.Equal(t, []string{clientKP2.Address()}, signersFound) assert.NoError(t, err) }
explode_data.jsonl/20755
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 658 }
[ 2830, 3393, 32627, 62078, 31584, 7264, 388, 8337, 12497, 4703, 3242, 31584, 6370, 3036, 55, 6370, 1155, 353, 8840, 836, 8, 341, 41057, 65036, 1669, 501, 6608, 1082, 1310, 15, 741, 25291, 65036, 1669, 501, 6608, 1082, 1310, 16, 741, 2529...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeleteJob_CustomResourceNotFound(t *testing.T) { store, manager, job := initWithJob(t) defer store.Close() // The swf CR can be missing when user reinstalled KFP using existing DB data. // Explicitly delete it to simulate the situation. manager.getScheduledWorkflowClient(job.Namespace).Delete(job.Name, &v1.DeleteOptions{}) // Now deleting job should still succeed when the swf CR is already deleted. err := manager.DeleteJob(job.UUID) assert.Nil(t, err) // And verify Job has been deleted from DB too. _, err = manager.GetJob(job.UUID) require.NotNil(t, err) assert.Equal(t, codes.NotFound, err.(*util.UserError).ExternalStatusCode()) assert.Contains(t, err.Error(), fmt.Sprintf("Job %v not found", job.UUID)) }
explode_data.jsonl/77052
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 245 }
[ 2830, 3393, 6435, 12245, 57402, 4783, 10372, 1155, 353, 8840, 836, 8, 341, 57279, 11, 6645, 11, 2618, 1669, 13864, 12245, 1155, 340, 16867, 3553, 10421, 741, 197, 322, 576, 2021, 69, 12617, 646, 387, 7402, 979, 1196, 312, 49573, 730, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMaxDocs(t *testing.T) { if testing.Verbose() { logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"mongodb", "mongodbdetailed"}) } // more docs than configured trans := transaction{ documents: []interface{}{ 1, 2, 3, 4, 5, 6, 7, 8, }, } mongodb := mongodbModForTests() mongodb.sendResponse = true mongodb.maxDocs = 3 mongodb.publishTransaction(&trans) res := expectTransaction(t, mongodb) assert.Equal(t, "1\n2\n3\n[...]", res["response"]) // exactly the same number of docs trans = transaction{ documents: []interface{}{ 1, 2, 3, }, } mongodb.publishTransaction(&trans) res = expectTransaction(t, mongodb) assert.Equal(t, "1\n2\n3", res["response"]) // less docs trans = transaction{ documents: []interface{}{ 1, 2, }, } mongodb.publishTransaction(&trans) res = expectTransaction(t, mongodb) assert.Equal(t, "1\n2", res["response"]) // unlimited trans = transaction{ documents: []interface{}{ 1, 2, 3, 4, }, } mongodb.maxDocs = 0 mongodb.publishTransaction(&trans) res = expectTransaction(t, mongodb) assert.Equal(t, "1\n2\n3\n4", res["response"]) }
explode_data.jsonl/35544
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 479 }
[ 2830, 3393, 5974, 63107, 1155, 353, 8840, 836, 8, 1476, 743, 7497, 42505, 8297, 368, 341, 197, 6725, 79, 5247, 3803, 12531, 79, 36202, 11139, 11, 7342, 895, 11, 830, 11, 3056, 917, 4913, 37197, 497, 330, 71155, 347, 8940, 10111, 23625...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDropReplicationSlot(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING")) require.NoError(t, err) defer closeConn(t, conn) _, err = pglogrepl.CreateReplicationSlot(ctx, conn, slotName, outputPlugin, pglogrepl.CreateReplicationSlotOptions{Temporary: true}) require.NoError(t, err) err = pglogrepl.DropReplicationSlot(ctx, conn, slotName, pglogrepl.DropReplicationSlotOptions{}) require.NoError(t, err) _, err = pglogrepl.CreateReplicationSlot(ctx, conn, slotName, outputPlugin, pglogrepl.CreateReplicationSlotOptions{Temporary: true}) require.NoError(t, err) }
explode_data.jsonl/45905
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 247 }
[ 2830, 3393, 19871, 18327, 1693, 19877, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 7636, 5378, 19047, 1507, 882, 32435, 9, 20, 340, 16867, 9121, 2822, 32917, 11, 1848, 1669, 17495, 5148, 43851, 7502, 11, 2643, 6488...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestYouonBSpecial(t *testing.T) { const want = "babibubebobwabyubi" for _, v := range [2]string{"ぶぁぶぃぶぅぶぇぶぉぶゎぶゅべぃ", "ブァブィブゥブェブォブヮブュベィ"} { got, err := KanaToRomaji(v) assert.Equal(t, want, got) assert.Nil(t, err) } }
explode_data.jsonl/11331
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 2610, 263, 7347, 2964, 1155, 353, 8840, 836, 8, 341, 4777, 1366, 284, 330, 47722, 579, 392, 3065, 674, 86, 6115, 54233, 1837, 2023, 8358, 348, 1669, 2088, 508, 17, 30953, 4913, 125530, 126025, 125530, 144205, 125530, 144260, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCheckAPICompat(t *testing.T) { client := &deis.Client{ControllerAPIVersion: deis.APIVersion} err := deis.ErrAPIMismatch if apiErr := CheckAPICompat(client, err); apiErr != nil { t.Errorf("api errors are non-fatal and should return nil, got '%v'", apiErr) } err = errors.New("random error") if apiErr := CheckAPICompat(client, err); apiErr == nil { t.Error("expected error to be returned, got nil") } }
explode_data.jsonl/39388
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 3973, 7082, 8712, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 609, 450, 285, 11716, 90, 2051, 7082, 5637, 25, 409, 285, 24922, 5637, 532, 9859, 1669, 409, 285, 27862, 2537, 1791, 24976, 271, 743, 6330, 7747, 1669, 4248, 708...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestManifestToString(t *testing.T) { manifest, err := bosh.NewManifest(deploymentName, networkName, false, boshDetails) Equal(t, err, nil) NotEqual(t, manifest, nil) manifest.Name = "test-deployment-name" Equal(t, strings.Contains(manifest.String(), "name: test-deployment-name"), true) Equal(t, strings.Contains(manifest.String(), "name: hyperledger-fabric"), false) Equal(t, strings.Contains(manifest.String(), "plugin: pbft"), true) }
explode_data.jsonl/52712
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 168 }
[ 2830, 3393, 38495, 5870, 1155, 353, 8840, 836, 8, 341, 197, 42315, 11, 1848, 1669, 293, 9267, 7121, 38495, 12797, 52799, 675, 11, 3922, 675, 11, 895, 11, 293, 9267, 7799, 692, 197, 2993, 1155, 11, 1848, 11, 2092, 340, 197, 2623, 299...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEnableJob(t *testing.T) { store, manager, job := initWithJob(t) defer store.Close() err := manager.EnableJob(job.UUID, false) job, err = manager.GetJob(job.UUID) expectedJob := &model.Job{ UUID: "123", DisplayName: "j1", Name: "j1", Namespace: "default", Enabled: false, CreatedAtInSec: 2, UpdatedAtInSec: 3, Conditions: "NO_STATUS", PipelineSpec: model.PipelineSpec{ WorkflowSpecManifest: testWorkflow.ToStringForStore(), }, ResourceReferences: []*model.ResourceReference{ { ResourceUUID: "123", ResourceType: common.Job, ReferenceUUID: DefaultFakeUUID, ReferenceType: common.Experiment, Relationship: common.Owner, }, }, } assert.Nil(t, err) assert.Equal(t, expectedJob, job) }
explode_data.jsonl/28381
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 352 }
[ 2830, 3393, 11084, 12245, 1155, 353, 8840, 836, 8, 341, 57279, 11, 6645, 11, 2618, 1669, 13864, 12245, 1155, 340, 16867, 3553, 10421, 741, 9859, 1669, 6645, 32287, 12245, 28329, 39636, 11, 895, 340, 68577, 11, 1848, 284, 6645, 2234, 122...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRPC_RelayCORS(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") } codec.UpgradeHeight = 7000 //kb := getInMemoryKeybase() genBZ, _, _, _ := fiveValidatorsOneAppGenesis() _, _, cleanup := NewInMemoryTendermintNode(t, genBZ) // setup the query _, stopCli, evtChan := subscribeTo(t, tmTypes.EventNewBlock) <-evtChan // Wait for block q := newCORSRequest("relay", newBody("")) rec := httptest.NewRecorder() Relay(rec, q, httprouter.Params{}) validateResponseCORSHeaders(t, rec.Result().Header) <-evtChan // Wait for block q = newCORSRequest("relay", newBody("")) rec = httptest.NewRecorder() Relay(rec, q, httprouter.Params{}) validateResponseCORSHeaders(t, rec.Result().Header) cleanup() stopCli() }
explode_data.jsonl/44727
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 295 }
[ 2830, 3393, 29528, 2568, 6895, 34, 9821, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 4886, 5654, 304, 2805, 3856, 1138, 197, 532, 43343, 66, 13, 43861, 3640, 284, 220, 22, 15, 15, 15, 198, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetRecentlyActiveUsersInTeam(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() teamId := th.BasicTeam.Id th.App.SetStatusOnline(th.BasicUser.Id, true) rusers, _, err := th.Client.GetRecentlyActiveUsersInTeam(teamId, 0, 60, "") require.NoError(t, err) for _, u := range rusers { require.NotZero(t, u.LastActivityAt, "should return last activity at") CheckUserSanitization(t, u) } rusers, _, err = th.Client.GetRecentlyActiveUsersInTeam(teamId, 0, 1, "") require.NoError(t, err) require.Len(t, rusers, 1, "should be 1 per page") th.Client.Logout() _, resp, err := th.Client.GetRecentlyActiveUsersInTeam(teamId, 0, 1, "") require.Error(t, err) CheckUnauthorizedStatus(t, resp) }
explode_data.jsonl/47516
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 279 }
[ 2830, 3393, 1949, 45137, 5728, 7137, 641, 14597, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 741, 197, 9196, 764, 1669, 270, 48868, 14597, 6444, 271, 70479, 5105, 4202, 2522...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestConfigWithMalformedURL(t *testing.T) { _, err := (&Config{PrestoURI: ":("}).FormatDSN() if err == nil { t.Fatal("dsn generated from malformed url") } }
explode_data.jsonl/62435
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 66 }
[ 2830, 3393, 2648, 2354, 29600, 10155, 3144, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1848, 1669, 15899, 2648, 90, 47, 3927, 78, 10301, 25, 13022, 445, 16630, 4061, 5936, 45, 741, 743, 1848, 621, 2092, 341, 197, 3244, 26133, 445, 75136...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestADS1x15DriverBestGainForVoltage(t *testing.T) { d, _ := initTestADS1015DriverWithStubbedAdaptor() g, err := d.BestGainForVoltage(1.5) gobottest.Assert(t, g, 2) g, err = d.BestGainForVoltage(20.0) gobottest.Assert(t, err, errors.New("The maximum voltage which can be read is 6.144000")) }
explode_data.jsonl/42588
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 49541, 16, 87, 16, 20, 11349, 14470, 58611, 2461, 94343, 1155, 353, 8840, 836, 8, 341, 2698, 11, 716, 1669, 2930, 2271, 49541, 16, 15, 16, 20, 11349, 2354, 33838, 2721, 2589, 32657, 2822, 3174, 11, 1848, 1669, 294, 1785, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateEnv(t *testing.T) { successCase := []api.EnvVar{ {Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}, {Name: "AbC_123", Value: "value"}, {Name: "abc", Value: ""}, { Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ APIVersion: testapi.Version(), FieldPath: "metadata.name", }, }, }, } if errs := validateEnv(successCase); len(errs) != 0 { t.Errorf("expected success: %v", errs) } errorCases := []struct { name string envs []api.EnvVar expectedError string }{ { name: "zero-length name", envs: []api.EnvVar{{Name: ""}}, expectedError: "[0].name: required value", }, { name: "name not a C identifier", envs: []api.EnvVar{{Name: "a.b.c"}}, expectedError: `[0].name: invalid value 'a.b.c', Details: must be a C identifier (matching regex [A-Za-z_][A-Za-z0-9_]*): e.g. "my_name" or "MyName"`, }, { name: "value and valueFrom specified", envs: []api.EnvVar{{ Name: "abc", Value: "foo", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ APIVersion: testapi.Version(), FieldPath: "metadata.name", }, }, }}, expectedError: "[0].valueFrom: invalid value '', Details: sources cannot be specified when value is not empty", }, { name: "missing FieldPath on ObjectFieldSelector", envs: []api.EnvVar{{ Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ APIVersion: testapi.Version(), }, }, }}, expectedError: "[0].valueFrom.fieldRef.fieldPath: required value", }, { name: "missing APIVersion on ObjectFieldSelector", envs: []api.EnvVar{{ Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "metadata.name", }, }, }}, expectedError: "[0].valueFrom.fieldRef.apiVersion: required value", }, { name: "invalid fieldPath", envs: []api.EnvVar{{ Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "metadata.whoops", APIVersion: testapi.Version(), }, }, }}, expectedError: "[0].valueFrom.fieldRef.fieldPath: invalid value 'metadata.whoops', Details: error converting fieldPath", }, { name: "invalid fieldPath labels", envs: []api.EnvVar{{ Name: "labels", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "metadata.labels", APIVersion: "v1", }, }, }}, expectedError: "[0].valueFrom.fieldRef.fieldPath: unsupported value 'metadata.labels': supported values: metadata.name, metadata.namespace", }, { name: "invalid fieldPath annotations", envs: []api.EnvVar{{ Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "metadata.annotations", APIVersion: "v1", }, }, }}, expectedError: "[0].valueFrom.fieldRef.fieldPath: unsupported value 'metadata.annotations': supported values: metadata.name, metadata.namespace", }, { name: "unsupported fieldPath", envs: []api.EnvVar{{ Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "status.phase", APIVersion: testapi.Version(), }, }, }}, expectedError: "[0].valueFrom.fieldRef.fieldPath: unsupported value 'status.phase', Details: supported values: metadata.name, metadata.namespace", }, } for _, tc := range errorCases { if errs := validateEnv(tc.envs); len(errs) == 0 { t.Errorf("expected failure for %s", tc.name) } else { for i := range errs { str := errs[i].(*errors.ValidationError).Error() if str != "" && str != tc.expectedError { t.Errorf("%s: expected error detail either empty or %s, got %s", tc.name, tc.expectedError, str) } } } } }
explode_data.jsonl/62788
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1758 }
[ 2830, 3393, 17926, 14359, 1155, 353, 8840, 836, 8, 341, 30553, 4207, 1669, 3056, 2068, 81214, 3962, 515, 197, 197, 63121, 25, 330, 13683, 497, 5162, 25, 330, 957, 7115, 197, 197, 63121, 25, 330, 25411, 497, 5162, 25, 330, 957, 7115, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestMapProxy_PutTransient(t *testing.T) { testKey := "testingKey" testValue := "testingValue" mp.Put(testKey, testValue) mp.PutTransient(testKey, "nextValue", 100, time.Second) res, err := mp.Get(testKey) AssertEqualf(t, err, res, "nextValue", "putTransient failed") mp.Clear() }
explode_data.jsonl/56972
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 112 }
[ 2830, 3393, 2227, 16219, 1088, 332, 48183, 1155, 353, 8840, 836, 8, 341, 18185, 1592, 1669, 330, 8840, 1592, 698, 18185, 1130, 1669, 330, 8840, 1130, 698, 53230, 39825, 8623, 1592, 11, 1273, 1130, 340, 53230, 39825, 48183, 8623, 1592, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRemoteAheadStatus(t *testing.T) { output := `On branch develop Your branch is ahead of 'origin/develop' by 1 commit. (use "git push" to publish your local commits) nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, AHEAD) { t.Errorf("Should be ahead status") } }
explode_data.jsonl/14064
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 24703, 87962, 2522, 1155, 353, 8840, 836, 8, 341, 21170, 1669, 1565, 1925, 8870, 2225, 198, 7771, 8870, 374, 8305, 315, 364, 8611, 14, 15840, 6, 553, 220, 16, 5266, 624, 220, 320, 810, 330, 12882, 4484, 1, 311, 3415, 697...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetByInternalOrderID(t *testing.T) { OrdersSetup(t) err := Bot.OrderManager.orderStore.Add(&order.Detail{ Exchange: testExchange, ID: "TestGetByInternalOrderID", InternalOrderID: "internalTest", }) if err != nil { t.Error(err) } o, err := Bot.OrderManager.orderStore.GetByInternalOrderID("internalTest") if err != nil { t.Error(err) } if o == nil { t.Fatal("Expected a matching order") } if o.ID != "TestGetByInternalOrderID" { t.Error("Expected to retrieve order") } _, err = Bot.OrderManager.orderStore.GetByInternalOrderID("NoOrder") if err != ErrOrderNotFound { t.Error(err) } }
explode_data.jsonl/22036
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 263 }
[ 2830, 3393, 1949, 1359, 11569, 4431, 915, 1155, 353, 8840, 836, 8, 341, 197, 24898, 21821, 1155, 340, 9859, 1669, 23007, 19664, 2043, 14041, 6093, 1904, 2099, 1358, 74396, 515, 197, 197, 31564, 25, 286, 1273, 31564, 345, 197, 29580, 25,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestUnversionedGetHealthCheckBundleActivationMulti(t *testing.T) { f := newFixture(t) // Initialize the server as if a bundle plugin was // configured on the manager. bp := pluginBundle.New(&pluginBundle.Config{Bundles: map[string]*pluginBundle.Source{ "b1": {Service: "s1", Resource: "bundle.tar.gz"}, "b2": {Service: "s2", Resource: "bundle.tar.gz"}, "b3": {Service: "s3", Resource: "bundle.tar.gz"}, }}, f.server.manager) f.server.manager.Register(pluginBundle.Name, bp) f.server.bundleStatuses = map[string]*pluginBundle.Status{ "b1": {Name: "b1"}, "b2": {Name: "b2"}, "b3": {Name: "b3"}, } // No bundle has been activated yet, expect the health check to fail req := newReqUnversioned(http.MethodGet, "/health?bundle=true", "") if err := f.executeRequest(req, 500, `{}`); err != nil { t.Fatal(err) } // Set one bundle to be activated update := map[string]*pluginBundle.Status{ "b1": {Name: "b1"}, "b2": {Name: "b2"}, "b3": {Name: "b3"}, } update["b2"].SetActivateSuccess("A") f.server.updateBundleStatus(update) // The heath check should still respond as unhealthy req = newReqUnversioned(http.MethodGet, "/health?bundle=true", "") if err := f.executeRequest(req, 500, `{}`); err != nil { t.Fatal(err) } // Activate all the bundles update["b1"].SetActivateSuccess("B") update["b3"].SetActivateSuccess("C") f.server.updateBundleStatus(update) // The heath check should succeed now req = newReqUnversioned(http.MethodGet, "/health?bundle=true", "") if err := f.executeRequest(req, 200, `{}`); err != nil { t.Fatal(err) } }
explode_data.jsonl/78978
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 624 }
[ 2830, 3393, 1806, 4366, 291, 1949, 14542, 3973, 8409, 61460, 20358, 1155, 353, 8840, 836, 8, 1476, 1166, 1669, 501, 18930, 1155, 692, 197, 322, 9008, 279, 3538, 438, 421, 264, 12894, 9006, 572, 198, 197, 322, 19755, 389, 279, 6645, 62...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestReingestDB(t *testing.T) { itest, reachedLedger := initializeDBIntegrationTest(t) tt := assert.New(t) // Create a fresh Horizon database newDB := dbtest.Postgres(t) // TODO: Unfortunately Horizon's ingestion System leaves open sessions behind,leading to // a "database is being accessed by other users" error when trying to drop it // defer newDB.Close() freshHorizonPostgresURL := newDB.DSN horizonConfig := itest.GetHorizonConfig() horizonConfig.DatabaseURL = freshHorizonPostgresURL // Initialize the DB schema dbConn, err := db.Open("postgres", freshHorizonPostgresURL) defer dbConn.Close() _, err = schema.Migrate(dbConn.DB.DB, schema.MigrateUp, 0) tt.NoError(err) // cap reachedLedger to the nearest checkpoint ledger because reingest range cannot ingest past the most // recent checkpoint ledger when using captive core toLedger := uint32(reachedLedger) archive, err := historyarchive.Connect(horizonConfig.HistoryArchiveURLs[0], historyarchive.ConnectOptions{ NetworkPassphrase: horizonConfig.NetworkPassphrase, CheckpointFrequency: horizonConfig.CheckpointFrequency, }) tt.NoError(err) // make sure a full checkpoint has elapsed otherwise there will be nothing to reingest var latestCheckpoint uint32 publishedFirstCheckpoint := func() bool { has, requestErr := archive.GetRootHAS() tt.NoError(requestErr) latestCheckpoint = has.CurrentLedger return latestCheckpoint > 1 } tt.Eventually(publishedFirstCheckpoint, 10*time.Second, time.Second) if toLedger > latestCheckpoint { toLedger = latestCheckpoint } horizonConfig.CaptiveCoreConfigAppendPath = filepath.Join( filepath.Dir(horizonConfig.CaptiveCoreConfigAppendPath), "captive-core-reingest-range-integration-tests.cfg", ) // Reingest into the DB err = horizoncmd.RunDBReingestRange(1, toLedger, false, 1, horizonConfig) tt.NoError(err) }
explode_data.jsonl/3152
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 598 }
[ 2830, 3393, 693, 287, 477, 3506, 1155, 353, 8840, 836, 8, 341, 23374, 477, 11, 8643, 60850, 1389, 1669, 9468, 3506, 52464, 2271, 1155, 340, 3244, 83, 1669, 2060, 7121, 1155, 692, 197, 322, 4230, 264, 7722, 55451, 4625, 198, 8638, 3506...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestKeyValueSetReturnsErrorIfNotNil(t *testing.T) { input := "KEY1=AB,KEY2=CD" var s KeyValueString = map[string]string{} err := s.Set(input) assert.NotNil(t, err) assert.EqualError(t, err, "only one instance of this flag is allowed") }
explode_data.jsonl/64727
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 95 }
[ 2830, 3393, 72082, 1649, 16446, 1454, 2679, 96144, 1155, 353, 8840, 836, 8, 341, 22427, 1669, 330, 4784, 16, 28, 1867, 11, 4784, 17, 28, 6484, 698, 2405, 274, 98620, 703, 284, 2415, 14032, 30953, 16094, 9859, 1669, 274, 4202, 5384, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_run(t *testing.T) { t.Log("Current test is [e]") testCases := [][2]string{ { `3 1 2 2 1`, `4`, }, { `5 2 3 3 2 4 4 3`, `90`, }, { `18 0`, `6402373705728000`, }, // TODO 测试参数的下界和上界 } testutil.AssertEqualStringCase(t, testCases, 0, run) }
explode_data.jsonl/72221
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 180 }
[ 2830, 3393, 14007, 1155, 353, 8840, 836, 8, 341, 3244, 5247, 445, 5405, 1273, 374, 508, 68, 47915, 18185, 37302, 1669, 508, 1457, 17, 30953, 515, 197, 197, 515, 298, 197, 63, 18, 220, 16, 198, 17, 220, 17, 220, 16, 12892, 298, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOptions(t *testing.T) { opt := core.InvokeOptions{} option := core.DefaultCallOptions(opt) assert.NotEmpty(t, option) inv := core.StreamingRequest() assert.NotEmpty(t, inv) inv = core.WithEndpoint("0.0.0.0") assert.NotEmpty(t, inv) inv = core.WithProtocol("0.0") assert.NotEmpty(t, inv) inv = core.WithFilters("") assert.NotEmpty(t, inv) inv = core.WithStrategy("") assert.NotEmpty(t, inv) inv = core.WithMetadata(nil) assert.NotEmpty(t, inv) }
explode_data.jsonl/50954
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 189 }
[ 2830, 3393, 3798, 1155, 353, 8840, 836, 8, 341, 64838, 1669, 6200, 32784, 3798, 16094, 80845, 1669, 6200, 13275, 7220, 3798, 24539, 340, 6948, 15000, 3522, 1155, 11, 2999, 692, 197, 14057, 1669, 6200, 33308, 287, 1900, 741, 6948, 15000, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAccAwsDmsEndpoint_DynamoDb(t *testing.T) { resourceName := "aws_dms_endpoint.dms_endpoint" randId := acctest.RandString(8) + "-dynamodb" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: dmsEndpointDestroy, Steps: []resource.TestStep{ { Config: dmsEndpointDynamoDbConfig(randId), Check: resource.ComposeTestCheckFunc( checkDmsEndpointExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "endpoint_arn"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"password"}, }, { Config: dmsEndpointDynamoDbConfigUpdate(randId), Check: resource.ComposeTestCheckFunc( checkDmsEndpointExists(resourceName), ), }, }, }) }
explode_data.jsonl/37166
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 398 }
[ 2830, 3393, 14603, 47359, 35, 1011, 27380, 1557, 85608, 7994, 1155, 353, 8840, 836, 8, 341, 50346, 675, 1669, 330, 8635, 814, 1011, 36699, 950, 1011, 36699, 698, 7000, 437, 764, 1669, 1613, 67880, 2013, 437, 703, 7, 23, 8, 488, 6523, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBlobClientList(t *testing.T) { localNodeID := roachpb.NodeID(1) remoteNodeID := roachpb.NodeID(2) localExternalDir, remoteExternalDir, stopper, cleanUpFn := createTestResources(t) defer cleanUpFn() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) rpcContext.TestingAllowNamedRPCToAnonymousServer = true blobClientFactory := setUpService(t, rpcContext, localNodeID, remoteNodeID, localExternalDir, remoteExternalDir) localFileNames := []string{"file/local/dataA.csv", "file/local/dataB.csv", "file/local/dataC.csv"} remoteFileNames := []string{"file/remote/A.csv", "file/remote/B.csv", "file/remote/C.csv"} for _, fileName := range localFileNames { fullPath := filepath.Join(localExternalDir, fileName) writeTestFile(t, fullPath, []byte("testLocalFile")) } for _, fileName := range remoteFileNames { fullPath := filepath.Join(remoteExternalDir, fileName) writeTestFile(t, fullPath, []byte("testRemoteFile")) } for _, tc := range []struct { name string nodeID roachpb.NodeID dirName string expectedList []string err string }{ { "list-local", localNodeID, "file/local/*.csv", localFileNames, "", }, { "list-remote", remoteNodeID, "file/remote/*.csv", remoteFileNames, "", }, { "list-local-no-match", localNodeID, "file/doesnotexist/*", []string{}, "", }, { "list-remote-no-match", remoteNodeID, "file/doesnotexist/*", []string{}, "", }, { "list-empty-pattern", remoteNodeID, "", []string{}, "pattern cannot be empty", }, { // should list files in top level directory "list-star", remoteNodeID, "*", []string{"file"}, "", }, { "list-outside-external-dir", remoteNodeID, "../*", // will error out []string{}, "outside of external-io-dir is not allowed", }, { "list-backout-external-dir", remoteNodeID, "..", []string{}, "outside of external-io-dir is not allowed", }, } { t.Run(tc.name, func(t *testing.T) { ctx := context.TODO() blobClient, err := blobClientFactory(ctx, tc.nodeID) if err != nil { t.Fatal(err) } list, err := blobClient.List(ctx, tc.dirName) if err != nil { if tc.err != "" && testutils.IsError(err, tc.err) { // correct error returned return } t.Fatal(err) } // Check that returned list matches expected list if len(list) != len(tc.expectedList) { t.Fatal(`listed incorrect number of files`, list) } for i, f := range list { if f != tc.expectedList[i] { t.Fatal("incorrect list returned ", list) } } }) } }
explode_data.jsonl/82492
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1171 }
[ 2830, 3393, 37985, 2959, 852, 1155, 353, 8840, 836, 8, 341, 8854, 1955, 915, 1669, 926, 610, 16650, 21714, 915, 7, 16, 340, 197, 18147, 1955, 915, 1669, 926, 610, 16650, 21714, 915, 7, 17, 340, 8854, 25913, 6184, 11, 8699, 25913, 61...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestMaxOpenConns(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") } defer setHookpostCloseConn(nil) setHookpostCloseConn(func(_ *fakeConn, err error) { if err != nil { t.Errorf("Error closing fakeConn: %v", err) } }) db := newTestDB(t, "magicquery") defer closeDB(t, db) driver := db.Driver().(*fakeDriver) // Force the number of open connections to 0 so we can get an accurate // count for the test db.clearAllConns(t) driver.mu.Lock() opens0 := driver.openCount closes0 := driver.closeCount driver.mu.Unlock() db.SetMaxIdleConns(10) db.SetMaxOpenConns(10) stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?") if err != nil { t.Fatal(err) } // Start 50 parallel slow queries. const ( nquery = 50 sleepMillis = 25 nbatch = 2 ) var wg sync.WaitGroup for batch := 0; batch < nbatch; batch++ { for i := 0; i < nquery; i++ { wg.Add(1) go func() { defer wg.Done() var op string if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows { t.Error(err) } }() } // Sleep for twice the expected length of time for the // batch of 50 queries above to finish before starting // the next round. time.Sleep(2 * sleepMillis * time.Millisecond) } wg.Wait() if g, w := db.numFreeConns(), 10; g != w { t.Errorf("free conns = %d; want %d", g, w) } if n := db.numDepsPollUntil(20, time.Second); n > 20 { t.Errorf("number of dependencies = %d; expected <= 20", n) db.dumpDeps(t) } driver.mu.Lock() opens := driver.openCount - opens0 closes := driver.closeCount - closes0 driver.mu.Unlock() if opens > 10 { t.Logf("open calls = %d", opens) t.Logf("close calls = %d", closes) t.Errorf("db connections opened = %d; want <= 10", opens) db.dumpDeps(t) } if err := stmt.Close(); err != nil { t.Fatal(err) } if g, w := db.numFreeConns(), 10; g != w { t.Errorf("free conns = %d; want %d", g, w) } if n := db.numDepsPollUntil(10, time.Second); n > 10 { t.Errorf("number of dependencies = %d; expected <= 10", n) db.dumpDeps(t) } db.SetMaxOpenConns(5) if g, w := db.numFreeConns(), 5; g != w { t.Errorf("free conns = %d; want %d", g, w) } if n := db.numDepsPollUntil(5, time.Second); n > 5 { t.Errorf("number of dependencies = %d; expected 0", n) db.dumpDeps(t) } db.SetMaxOpenConns(0) if g, w := db.numFreeConns(), 5; g != w { t.Errorf("free conns = %d; want %d", g, w) } if n := db.numDepsPollUntil(5, time.Second); n > 5 { t.Errorf("number of dependencies = %d; expected 0", n) db.dumpDeps(t) } db.clearAllConns(t) }
explode_data.jsonl/16003
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1130 }
[ 2830, 3393, 5974, 5002, 1109, 4412, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 4886, 5654, 304, 2805, 3856, 1138, 197, 532, 16867, 738, 31679, 2203, 7925, 9701, 27907, 340, 8196, 31679, 2203, 7925, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestInjectImportTS(t *testing.T) { default_suite.expectBundled(t, bundled{ files: map[string]string{ "/entry.ts": ` console.log('here') `, "/inject.js": ` // Unused imports are automatically removed in TypeScript files (this // is a mis-feature of the TypeScript language). However, injected // imports are an esbuild feature so we get to decide what the // semantics are. We do not want injected imports to disappear unless // they have been explicitly marked as having no side effects. console.log('must be present') `, }, entryPaths: []string{"/entry.ts"}, options: config.Options{ Mode: config.ModeConvertFormat, OutputFormat: config.FormatESModule, AbsOutputFile: "/out.js", InjectAbsPaths: []string{ "/inject.js", }, }, }) }
explode_data.jsonl/38586
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 307 }
[ 2830, 3393, 13738, 11511, 9951, 1155, 353, 8840, 836, 8, 341, 11940, 57239, 25952, 33, 1241, 832, 1155, 11, 51450, 515, 197, 74075, 25, 2415, 14032, 30953, 515, 298, 197, 3115, 4085, 21288, 788, 22074, 571, 12160, 1665, 492, 6739, 1305,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDriftctlCmd_Completion(t *testing.T) { cmd := NewDriftctlCmd(mocks.MockBuild{}) output, err := test.Execute(&cmd.Command, "completion", "bash") if output == "" { t.Errorf("Unexpected output: %v", output) } if err != nil { t.Errorf("Unexpected error: %v", err) } expected := "# bash completion for driftctl" if !strings.Contains(output, expected) { t.Errorf("Expected to contain: \n %v\nGot:\n %v", expected, output) } }
explode_data.jsonl/36198
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 170 }
[ 2830, 3393, 8847, 2085, 12373, 15613, 16946, 14386, 1155, 353, 8840, 836, 8, 341, 25920, 1669, 1532, 8847, 2085, 12373, 15613, 1255, 25183, 24664, 11066, 6257, 692, 21170, 11, 1848, 1669, 1273, 13827, 2099, 8710, 12714, 11, 330, 43312, 49...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestAuth(t *testing.T) { for _, c1 := range []struct { name string methods []headers.AuthMethod }{ { "basic", []headers.AuthMethod{headers.AuthBasic}, }, { "digest", []headers.AuthMethod{headers.AuthDigest}, }, { "both", nil, }, } { for _, conf := range []string{ "nofail", "wronguser", "wrongpass", "wrongurl", } { if conf == "wrongurl" && c1.name == "basic" { continue } t.Run(c1.name+"_"+conf, func(t *testing.T) { va := NewValidator("testuser", "testpass", c1.methods) wwwAuthenticate := va.Header() se, err := NewSender(wwwAuthenticate, func() string { if conf == "wronguser" { return "test1user" } return "testuser" }(), func() string { if conf == "wrongpass" { return "test1pass" } return "testpass" }()) require.NoError(t, err) req := &base.Request{ Method: base.Announce, URL: mustParseURL(func() string { if conf == "wrongurl" { return "rtsp://myhost/my1path" } return "rtsp://myhost/mypath" }()), } se.AddAuthorization(req) req.URL = mustParseURL("rtsp://myhost/mypath") err = va.ValidateRequest(req, nil) if conf != "nofail" { require.Error(t, err) } else { require.NoError(t, err) } }) } } }
explode_data.jsonl/50254
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 688 }
[ 2830, 3393, 5087, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 272, 16, 1669, 2088, 3056, 1235, 341, 197, 11609, 262, 914, 198, 197, 42257, 82, 3056, 7713, 25233, 3523, 198, 197, 59403, 197, 197, 515, 298, 197, 1, 22342, 756, 298, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestClientPublishEvent(t *testing.T) { index := "beat-int-pub-single-event" output, client := connectTestEs(t, map[string]interface{}{ "index": index, }) // drop old index preparing test client.Delete(index, "", "", nil) event := outputs.Data{Event: common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "libbeat", "message": "Test message from libbeat", }} err := output.PublishEvent(nil, outputs.Options{Guaranteed: true}, event) if err != nil { t.Fatal(err) } _, _, err = client.Refresh(index) if err != nil { t.Fatal(err) } _, resp, err := client.CountSearchURI(index, "", nil) if err != nil { t.Fatal(err) } assert.Equal(t, 1, resp.Count) }
explode_data.jsonl/40103
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 284 }
[ 2830, 3393, 2959, 50145, 1556, 1155, 353, 8840, 836, 8, 341, 26327, 1669, 330, 22227, 20052, 2268, 392, 56687, 39687, 698, 21170, 11, 2943, 1669, 4564, 2271, 17360, 1155, 11, 2415, 14032, 31344, 67066, 197, 197, 1, 1252, 788, 1922, 345,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestUpdate(t *testing.T) { var ( ctx = context.TODO() repository = reltest.New() scores = &scorestest.Service{} service = New(repository, scores) todo = Todo{ID: 1, Title: "Sleep"} changes = rel.NewChangeset(&todo) ) todo.Title = "Wake up" repository.ExpectUpdate(changes).ForType("todos.Todo") assert.Nil(t, service.Update(ctx, &todo, changes)) assert.NotEmpty(t, todo.ID) repository.AssertExpectations(t) scores.AssertExpectations(t) }
explode_data.jsonl/7574
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 221 }
[ 2830, 3393, 4289, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 20985, 286, 284, 2266, 90988, 741, 197, 17200, 3099, 284, 1351, 1944, 7121, 741, 197, 1903, 7701, 257, 284, 609, 12338, 267, 477, 13860, 16094, 197, 52934, 262, 284, 153...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1