text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestReadyCondition(t *testing.T) { now := time.Now() before := now.Add(-time.Second) nowFunc := func() time.Time { return now } withCapacity := &v1.Node{ Status: v1.NodeStatus{ Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), }, }, } cases := []struct { desc string node *v1.Node runtimeErrors []string networkErrors []string appArmorValidateHostFunc func() error cmStatus cm.Status expectConditions []v1.NodeCondition expectEvents []testEvent }{ { desc: "new, ready", node: withCapacity.DeepCopy(), expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status", now, now)}, // TODO(mtaufen): The current behavior is that we don't send an event for the initial NodeReady condition, // the reason for this is unclear, so we may want to actually send an event, and change these test cases // to ensure an event is sent. }, { desc: "new, ready: apparmor validator passed", node: withCapacity.DeepCopy(), appArmorValidateHostFunc: func() error { return nil }, expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status. AppArmor enabled", now, now)}, }, { desc: "new, ready: apparmor validator failed", node: withCapacity.DeepCopy(), appArmorValidateHostFunc: func() error { return fmt.Errorf("foo") }, // absence of an additional message is understood to mean that AppArmor is disabled expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status", now, now)}, }, { desc: "new, ready: soft requirement warning", node: withCapacity.DeepCopy(), cmStatus: cm.Status{ SoftRequirements: fmt.Errorf("foo"), }, expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status. WARNING: foo", now, now)}, }, { desc: "new, not ready: runtime errors", node: withCapacity.DeepCopy(), runtimeErrors: []string{"foo", "bar"}, expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo,bar", now, now)}, }, { desc: "new, not ready: network errors", node: withCapacity.DeepCopy(), networkErrors: []string{"foo", "bar"}, expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo,bar", now, now)}, }, { desc: "new, not ready: runtime and network errors", node: withCapacity.DeepCopy(), runtimeErrors: []string{"runtime"}, networkErrors: []string{"network"}, expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "runtime,network", now, now)}, }, { desc: "new, not ready: missing capacities", node: &v1.Node{}, expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "Missing node capacity for resources: cpu, memory, pods", now, now)}, }, // the transition tests ensure timestamps are set correctly, no need to test the entire condition matrix in this section { desc: "transition to ready", node: func() *v1.Node { node := withCapacity.DeepCopy() node.Status.Conditions = []v1.NodeCondition{*makeReadyCondition(false, "", before, before)} return node }(), expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status", now, now)}, expectEvents: []testEvent{ { eventType: v1.EventTypeNormal, event: events.NodeReady, }, }, }, { desc: "transition to not ready", node: func() *v1.Node { node := withCapacity.DeepCopy() node.Status.Conditions = []v1.NodeCondition{*makeReadyCondition(true, "", before, before)} return node }(), runtimeErrors: []string{"foo"}, expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo", now, now)}, expectEvents: []testEvent{ { eventType: v1.EventTypeNormal, event: events.NodeNotReady, }, }, }, { desc: "ready, no transition", node: func() *v1.Node { node := withCapacity.DeepCopy() node.Status.Conditions = []v1.NodeCondition{*makeReadyCondition(true, "", before, before)} return node }(), expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status", before, now)}, expectEvents: []testEvent{}, }, { desc: "not ready, no transition", node: func() *v1.Node { node := withCapacity.DeepCopy() node.Status.Conditions = []v1.NodeCondition{*makeReadyCondition(false, "", before, before)} return node }(), runtimeErrors: []string{"foo"}, expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "foo", before, now)}, expectEvents: []testEvent{}, }, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { runtimeErrorsFunc := func() []string { return tc.runtimeErrors } networkErrorsFunc := func() []string { return tc.networkErrors } cmStatusFunc := func() cm.Status { return tc.cmStatus } events := []testEvent{} recordEventFunc := func(eventType, event string) { events = append(events, testEvent{ eventType: eventType, event: event, }) } // construct setter setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, recordEventFunc) // call setter on node if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected condition assert.True(t, apiequality.Semantic.DeepEqual(tc.expectConditions, tc.node.Status.Conditions), "Diff: %s", diff.ObjectDiff(tc.expectConditions, tc.node.Status.Conditions)) // check expected events require.Equal(t, len(tc.expectEvents), len(events)) for i := range tc.expectEvents { assert.Equal(t, tc.expectEvents[i], events[i]) } }) } }
explode_data.jsonl/31857
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2564 }
[ 2830, 3393, 19202, 10547, 1155, 353, 8840, 836, 8, 341, 80922, 1669, 882, 13244, 741, 63234, 1669, 1431, 1904, 4080, 1678, 32435, 340, 80922, 9626, 1669, 2915, 368, 882, 16299, 314, 470, 1431, 555, 46948, 29392, 1669, 609, 85, 16, 21714...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTransformGroupAndEnabled(t *testing.T) { tests := []struct { commonFields common.Fields expected []string }{ { commonFields: common.Fields{common.Field{Name: "context", Path: "something"}}, expected: []string{"context"}, }, { commonFields: common.Fields{ common.Field{ Name: "context", Type: "group", Fields: common.Fields{ common.Field{Name: "type", Type: ""}, common.Field{ Name: "metric", Type: "group", Fields: common.Fields{ common.Field{Name: "object"}, }, }, }, }, }, expected: []string{"context.type", "context.metric.object"}, }, { commonFields: common.Fields{ common.Field{Name: "enabledField"}, common.Field{Name: "disabledField", Enabled: &falsy}, //enabled is ignored for Type!=group common.Field{ Name: "enabledGroup", Type: "group", Enabled: &truthy, Fields: common.Fields{ common.Field{Name: "type", Type: ""}, }, }, common.Field{ Name: "context", Type: "group", Enabled: &falsy, Fields: common.Fields{ common.Field{Name: "type", Type: ""}, common.Field{ Name: "metric", Type: "group", Fields: common.Fields{ common.Field{Name: "object"}, }, }, }, }, }, expected: []string{"enabledField", "disabledField", "enabledGroup.type"}, }, } for idx, test := range tests { trans, _ := newFieldsTransformer(version, test.commonFields) transformed, err := trans.transform() assert.NoError(t, err) out := transformed["fields"].([]common.MapStr) assert.Equal(t, len(test.expected)+ctMetaData, len(out)) for i, e := range test.expected { assert.Equal(t, e, out[i]["name"], fmt.Sprintf("Failed for idx %v", idx)) } } }
explode_data.jsonl/37638
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 844 }
[ 2830, 3393, 8963, 2808, 3036, 5462, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 83825, 8941, 4185, 42809, 198, 197, 42400, 257, 3056, 917, 198, 197, 59403, 197, 197, 515, 298, 83825, 8941, 25, 4185, 42809, 90, 546...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCreateGroupSyncable(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() group := th.CreateGroup() groupSyncable := &model.GroupSyncable{ GroupId: group.Id, CanLeave: true, AutoAdd: false, SyncableId: th.BasicTeam.Id, Type: model.GroupSyncableTypeTeam, } gs, err := th.App.CreateGroupSyncable(groupSyncable) require.Nil(t, err) require.NotNil(t, gs) gs, err = th.App.CreateGroupSyncable(groupSyncable) require.NotNil(t, err) require.Nil(t, gs) }
explode_data.jsonl/37041
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 215 }
[ 2830, 3393, 4021, 2808, 12154, 480, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 741, 44260, 1669, 270, 7251, 2808, 741, 44260, 12154, 480, 1669, 609, 2528, 5407, 12154, 480,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildEmptyDevice(t *testing.T) { got := &exampleoc.Device{} ygot.BuildEmptyTree(got) ni, err := got.NewNetworkInstance("DEFAULT") if err != nil { t.Fatalf("got unexpected error: %v", err) } ygot.BuildEmptyTree(ni) p, err := ni.NewProtocol(exampleoc.OpenconfigPolicyTypes_INSTALL_PROTOCOL_TYPE_BGP, "15169") if err != nil { t.Fatalf("got unexpected error: %v", err) } ygot.BuildEmptyTree(p) n, err := p.Bgp.NewNeighbor("192.0.2.1") if err != nil { t.Fatalf("got unexpected error: %v", err) } n.PeerAs = ygot.Uint32(42) n.SendCommunity = exampleoc.OpenconfigBgp_CommunityType_STANDARD p.Bgp.Global.As = ygot.Uint32(42) ygot.PruneEmptyBranches(got) want := &exampleoc.Device{ NetworkInstance: map[string]*exampleoc.NetworkInstance{ "DEFAULT": { Name: ygot.String("DEFAULT"), Protocol: map[exampleoc.NetworkInstance_Protocol_Key]*exampleoc.NetworkInstance_Protocol{ {exampleoc.OpenconfigPolicyTypes_INSTALL_PROTOCOL_TYPE_BGP, "15169"}: { Identifier: exampleoc.OpenconfigPolicyTypes_INSTALL_PROTOCOL_TYPE_BGP, Name: ygot.String("15169"), Bgp: &exampleoc.NetworkInstance_Protocol_Bgp{ Global: &exampleoc.NetworkInstance_Protocol_Bgp_Global{ As: ygot.Uint32(42), }, Neighbor: map[string]*exampleoc.NetworkInstance_Protocol_Bgp_Neighbor{ "192.0.2.1": { NeighborAddress: ygot.String("192.0.2.1"), PeerAs: ygot.Uint32(42), SendCommunity: exampleoc.OpenconfigBgp_CommunityType_STANDARD, }, }, }, }, }, }, }, } if diff := pretty.Compare(got, want); diff != "" { t.Errorf("did not get expected device struct, diff(-got,+want):\n%s", diff) } }
explode_data.jsonl/59914
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 773 }
[ 2830, 3393, 11066, 3522, 6985, 1155, 353, 8840, 836, 8, 341, 3174, 354, 1669, 609, 8687, 509, 43995, 16094, 14522, 22390, 25212, 3522, 6533, 3268, 354, 692, 197, 7751, 11, 1848, 1669, 2684, 7121, 12320, 2523, 445, 17285, 1138, 743, 1848...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestUnsharded(t *testing.T) { executor, _, _, sbclookup := createLegacyExecutorEnv() _, err := executorExec(executor, "select id from music_user_map where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from music_user_map where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { t.Errorf("sbclookup.Queries: %+v, want %+v\n", sbclookup.Queries, wantQueries) } }
explode_data.jsonl/67382
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 210 }
[ 2830, 3393, 1806, 927, 20958, 1155, 353, 8840, 836, 8, 341, 67328, 4831, 11, 8358, 8358, 7898, 66, 21020, 1669, 1855, 77415, 25255, 14359, 2822, 197, 6878, 1848, 1669, 31558, 10216, 46896, 4831, 11, 330, 1742, 877, 504, 4627, 3317, 5376...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetCandidates(t *testing.T) { policy := &models.ReplicationPolicy{ ID: 1, Filters: []models.Filter{ { Kind: replication.FilterItemKindTag, Value: "*", }, }, Trigger: &models.Trigger{ Kind: replication.TriggerKindImmediate, }, } sourcer := source.NewSourcer() candidates := []models.FilterItem{ { Kind: replication.FilterItemKindTag, Value: "library/hello-world:release-1.0", }, { Kind: replication.FilterItemKindTag, Value: "library/hello-world:latest", }, } metadata := map[string]interface{}{ "candidates": candidates, } result := getCandidates(policy, sourcer, metadata) assert.Equal(t, 2, len(result)) policy.Filters = []models.Filter{ { Kind: replication.FilterItemKindTag, Value: "release-*", }, } result = getCandidates(policy, sourcer, metadata) assert.Equal(t, 1, len(result)) // test label filter test.InitDatabaseFromEnv() policy.Filters = []models.Filter{ { Kind: replication.FilterItemKindLabel, Value: int64(1), }, } result = getCandidates(policy, sourcer, metadata) assert.Equal(t, 0, len(result)) }
explode_data.jsonl/31955
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 443 }
[ 2830, 3393, 1949, 97134, 1155, 353, 8840, 836, 8, 341, 3223, 8018, 1669, 609, 6507, 2817, 79, 1693, 13825, 515, 197, 29580, 25, 220, 16, 345, 197, 12727, 8612, 25, 3056, 6507, 31696, 515, 298, 197, 515, 571, 197, 10629, 25, 220, 477...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDesOfbEncrypt(t *testing.T) { data := "hello world" key := "abcdefgh" desOfbEncrypt := DesOfbEncrypt([]byte(data), []byte(key)) desOfbDecrypt := DesOfbDecrypt(desOfbEncrypt, []byte(key)) if string(desOfbDecrypt) != data { internal.LogFailedTestInfo(t, "DesOfbEncrypt/DesOfbDecrypt", data, data, string(desOfbDecrypt)) t.FailNow() } }
explode_data.jsonl/7309
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 4896, 2124, 65, 61520, 1155, 353, 8840, 836, 8, 341, 8924, 1669, 330, 14990, 1879, 698, 23634, 1669, 330, 56747, 1837, 52912, 2124, 65, 61520, 1669, 3874, 2124, 65, 61520, 10556, 3782, 2592, 701, 3056, 3782, 4857, 1171, 5291...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestJaroWinkler(t *testing.T) { for _, row := range jwink_testdata { res := JaroWinkler(row[0], row[1]) expected, err := strconv.ParseFloat(row[2], 64) if err != nil { t.Error("bad row in test data") } if math.Abs(res-expected) > 0.001 { t.Errorf("JaroWinkler(%q, %q) => %.3f, expected %.3f", row[0], row[1], res, expected) } } }
explode_data.jsonl/58060
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 166 }
[ 2830, 3393, 41, 17165, 54, 766, 1536, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 2802, 1669, 2088, 502, 86, 766, 4452, 691, 341, 197, 10202, 1669, 619, 17165, 54, 766, 1536, 7835, 58, 15, 1125, 2802, 58, 16, 2546, 197, 42400, 11, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestReconcileServiceInstance(t *testing.T) { fakeKubeClient, fakeCatalogClient, fakeClusterServiceBrokerClient, testController, sharedInformers := newTestController(t, fakeosb.FakeClientConfiguration{ ProvisionReaction: &fakeosb.ProvisionReaction{ Response: &osb.ProvisionResponse{ DashboardURL: &testDashboardURL, }, }, }) addGetNamespaceReaction(fakeKubeClient) sharedInformers.ClusterServiceBrokers().Informer().GetStore().Add(getTestClusterServiceBroker()) sharedInformers.ClusterServiceClasses().Informer().GetStore().Add(getTestClusterServiceClass()) sharedInformers.ClusterServicePlans().Informer().GetStore().Add(getTestClusterServicePlan()) instance := getTestServiceInstanceWithClusterRefs() if err := reconcileServiceInstance(t, testController, instance); err != nil { t.Fatalf("unexpected error: %v", err) } instance = assertServiceInstanceProvisionInProgressIsTheOnlyCatalogClientAction(t, fakeCatalogClient, instance) fakeCatalogClient.ClearActions() assertNumberOfBrokerActions(t, fakeClusterServiceBrokerClient.Actions(), 0) fakeKubeClient.ClearActions() if err := reconcileServiceInstance(t, testController, instance); err != nil { t.Fatalf("This should not fail : %v", err) } brokerActions := fakeClusterServiceBrokerClient.Actions() assertNumberOfBrokerActions(t, brokerActions, 1) assertProvision(t, brokerActions[0], &osb.ProvisionRequest{ AcceptsIncomplete: true, InstanceID: testServiceInstanceGUID, ServiceID: testClusterServiceClassGUID, PlanID: testClusterServicePlanGUID, OrganizationGUID: testClusterID, SpaceGUID: testNamespaceGUID, Context: testContext}) instanceKey := testNamespace + "/" + testServiceInstanceName // Since synchronous operation, must not make it into the polling queue. if testController.instancePollingQueue.NumRequeues(instanceKey) != 0 { t.Fatalf("Expected polling queue to not have any record of test instance") } actions := fakeCatalogClient.Actions() assertNumberOfActions(t, actions, 1) // verify no kube resources created. // One single action comes from getting namespace uid kubeActions := fakeKubeClient.Actions() if err := checkKubeClientActions(kubeActions, []kubeClientAction{ {verb: "get", resourceName: "namespaces", checkType: checkGetActionType}, }); err != nil { t.Fatal(err) } updatedServiceInstance := assertUpdateStatus(t, actions[0], instance) assertServiceInstanceOperationSuccess(t, updatedServiceInstance, v1beta1.ServiceInstanceOperationProvision, testClusterServicePlanName, testClusterServicePlanGUID, instance) assertServiceInstanceDashboardURL(t, updatedServiceInstance, testDashboardURL) events := getRecordedEvents(testController) expectedEvent := normalEventBuilder(successProvisionReason).msg(successProvisionMessage) if err := checkEvents(events, expectedEvent.stringArr()); err != nil { t.Fatal(err) } }
explode_data.jsonl/58141
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 926 }
[ 2830, 3393, 693, 40446, 457, 1860, 2523, 1155, 353, 8840, 836, 8, 341, 1166, 726, 42, 3760, 2959, 11, 12418, 41606, 2959, 11, 12418, 28678, 1860, 65545, 2959, 11, 1273, 2051, 11, 6094, 37891, 388, 1669, 501, 2271, 2051, 1155, 11, 1241...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestCreateSessionTimeout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { select { case <-time.After(2 * time.Second): t.Error("no startup timeout") case <-ctx.Done(): } }() cluster := createCluster() cluster.Hosts = []string{"127.0.0.1:1"} session, err := cluster.CreateSession() if err == nil { session.Close() t.Fatal("expected ErrNoConnectionsStarted, but no error was returned.") } }
explode_data.jsonl/11145
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 172 }
[ 2830, 3393, 4021, 5283, 7636, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 16867, 9121, 2822, 30680, 2915, 368, 341, 197, 38010, 341, 197, 2722, 9119, 1678, 36892, 7, 17, 353, 882, 32435, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUpdateStatus(t *testing.T) { testNetwork := cloud.ResourcePath("network", &meta.Key{Name: "test-network"}) testSubnetwork := cloud.ResourcePath("subnetwork", &meta.Key{Name: "test-subnetwork"}) testNegType := negtypes.VmIpPortEndpointType testNegRefs := []negv1beta1.NegObjectReference{ { Id: "0", SelfLink: "self-link-0", NetworkEndpointType: "neg-type-0", }, { Id: "1", SelfLink: "self-link-1", NetworkEndpointType: "neg-type-1", }, } testCases := []struct { desc string populateConditions map[string]bool negRefs []negv1beta1.NegObjectReference expectedNeedInit bool }{ {desc: "conditions don't exist, neg refs don't exist", populateConditions: map[string]bool{ negv1beta1.Initialized: false, negv1beta1.Synced: false, }, expectedNeedInit: true, }, {desc: "both conditions exist, neg refs exist", populateConditions: map[string]bool{ negv1beta1.Initialized: true, negv1beta1.Synced: true, }, negRefs: testNegRefs, expectedNeedInit: false, }, {desc: "both conditions exist, neg refs don't exist", populateConditions: map[string]bool{ negv1beta1.Initialized: true, negv1beta1.Synced: true, }, expectedNeedInit: true, }, {desc: "initialized exists, neg refs exist", populateConditions: map[string]bool{ negv1beta1.Initialized: true, negv1beta1.Synced: false, }, negRefs: testNegRefs, expectedNeedInit: false, }, {desc: "synced exists, neg refs exist", populateConditions: map[string]bool{ negv1beta1.Initialized: false, negv1beta1.Synced: true, }, negRefs: testNegRefs, expectedNeedInit: true, }, {desc: "conditions don't exist, negRefs exist", populateConditions: map[string]bool{ negv1beta1.Initialized: false, negv1beta1.Synced: false, }, negRefs: testNegRefs, expectedNeedInit: true, }, } for _, enableEndpointSlices := range []bool{false, true} { for _, syncErr := range []error{nil, fmt.Errorf("error")} { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { fakeCloud := negtypes.NewFakeNetworkEndpointGroupCloud(testSubnetwork, testNetwork) _, syncer := newTestTransactionSyncer(fakeCloud, testNegType, false, enableEndpointSlices) svcNegClient := syncer.svcNegClient syncer.needInit = false if len(tc.negRefs) == 0 { err := fakeCloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{ Version: syncer.NegSyncerKey.GetAPIVersion(), Name: testNegName, NetworkEndpointType: string(syncer.NegSyncerKey.NegType), Network: fakeCloud.NetworkURL(), Subnetwork: fakeCloud.SubnetworkURL(), Description: "", }, testZone1) _, err = fakeCloud.GetNetworkEndpointGroup(testNegName, testZone1, syncer.NegSyncerKey.GetAPIVersion()) if err != nil { t.Errorf("failed to get neg from cloud: %s ", err) } } // Since timestamp gets truncated to the second, there is a chance that the timestamps will be the same as LastTransitionTime or LastSyncTime so use creation TS from an earlier date creationTS := v1.Date(2020, time.July, 23, 0, 0, 0, 0, time.UTC) origCR := createNegCR(testNegName, creationTS, tc.populateConditions[negv1beta1.Initialized], tc.populateConditions[negv1beta1.Synced], tc.negRefs) origCR, err := svcNegClient.NetworkingV1beta1().ServiceNetworkEndpointGroups(testNamespace).Create(context2.Background(), origCR, v1.CreateOptions{}) if err != nil { t.Errorf("Failed to create test NEG CR: %s", err) } syncer.svcNegLister.Add(origCR) syncer.updateStatus(syncErr) negCR, err := svcNegClient.NetworkingV1beta1().ServiceNetworkEndpointGroups(testNamespace).Get(context2.Background(), testNegName, v1.GetOptions{}) if err != nil { t.Errorf("Failed to create test NEG CR: %s", err) } if syncErr != nil { checkCondition(t, negCR.Status.Conditions, negv1beta1.Synced, creationTS, corev1.ConditionFalse, true) } else if tc.populateConditions[negv1beta1.Synced] { checkCondition(t, negCR.Status.Conditions, negv1beta1.Synced, creationTS, corev1.ConditionTrue, false) } else { checkCondition(t, negCR.Status.Conditions, negv1beta1.Synced, creationTS, corev1.ConditionTrue, true) } if syncer.needInit != tc.expectedNeedInit { t.Errorf("expected manager.needInit to be %t, but was %t", tc.expectedNeedInit, syncer.needInit) } if !creationTS.Before(&negCR.Status.LastSyncTime) { t.Errorf("neg cr should have an updated LastSyncTime") } }) } } } }
explode_data.jsonl/38789
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2117 }
[ 2830, 3393, 4289, 2522, 1155, 353, 8840, 836, 8, 341, 18185, 12320, 1669, 9437, 20766, 1820, 445, 17511, 497, 609, 5490, 9610, 63121, 25, 330, 1944, 56732, 23625, 18185, 3136, 17511, 1669, 9437, 20766, 1820, 445, 1966, 17511, 497, 609, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestValidatePort(t *testing.T) { ports := map[int]bool{ 0: false, 65536: false, -1: false, 100: true, 1000: true, 65535: true, } for port, valid := range ports { if got := ValidatePort(port); (got == nil) != valid { t.Errorf("Failed: got valid=%t but wanted valid=%t: %v for %d", got == nil, valid, got, port) } } }
explode_data.jsonl/56893
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 160 }
[ 2830, 3393, 17926, 7084, 1155, 353, 8840, 836, 8, 341, 197, 3394, 1669, 2415, 18640, 96436, 515, 197, 197, 15, 25, 257, 895, 345, 197, 197, 21, 20, 20, 18, 21, 25, 895, 345, 197, 197, 12, 16, 25, 262, 895, 345, 197, 197, 16, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRPC_QueryNodes(t *testing.T) { codec.UpgradeHeight = 7000 _, _, cleanup := NewInMemoryTendermintNode(t, oneValTwoNodeGenesisState()) _, stopCli, evtChan := subscribeTo(t, tmTypes.EventNewBlock) <-evtChan // Wait for block kb := getInMemoryKeybase() cb, err := kb.GetCoinbase() assert.Nil(t, err) var params = HeightAndValidatorOptsParams{ Height: 0, Opts: types2.QueryValidatorsParams{ StakingStatus: types.Staked, Page: 1, Limit: 1, }, } q := newQueryRequest("nodes", newBody(params)) rec := httptest.NewRecorder() Nodes(rec, q, httprouter.Params{}) body := rec.Body.String() address := cb.GetAddress().String() assert.True(t, strings.Contains(body, address)) <-evtChan // Wait for block q = newQueryRequest("nodes", newBody(params)) rec = httptest.NewRecorder() Nodes(rec, q, httprouter.Params{}) body = rec.Body.String() address = cb.GetAddress().String() assert.True(t, strings.Contains(body, address)) cleanup() stopCli() }
explode_data.jsonl/44712
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 401 }
[ 2830, 3393, 29528, 48042, 12288, 1155, 353, 8840, 836, 8, 341, 43343, 66, 13, 43861, 3640, 284, 220, 22, 15, 15, 15, 198, 197, 6878, 8358, 21290, 1669, 1532, 641, 10642, 51, 1659, 67791, 1955, 1155, 11, 825, 2208, 11613, 1955, 84652, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_GetAuthURL(t *testing.T) { t.Parallel() a := assert.New(t) s := &fitbit.Session{} _, err := s.GetAuthURL() a.Error(err) s.AuthURL = "/foo" url, _ := s.GetAuthURL() a.Equal(url, "/foo") }
explode_data.jsonl/16573
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 98 }
[ 2830, 3393, 13614, 5087, 3144, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 11323, 1669, 2060, 7121, 1155, 340, 1903, 1669, 609, 6276, 4489, 20674, 31483, 197, 6878, 1848, 1669, 274, 2234, 5087, 3144, 741, 11323, 6141, 3964, 69...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMatch(t *testing.T) { var testData = []struct { rawYql string data map[string]interface{} out bool }{ { rawYql: `age>23 and (sex in ('boy','girl') or sex='other') and score>=95 and rank !in ('b','c','d')`, data: map[string]interface{}{ "age": int64(24), "sex": "boy", "score": int64(95), "rank": "s", }, out: true, }, { rawYql: `age>23 and (sex in ('boy','girl') or sex='other')`, data: map[string]interface{}{ "age": int64(24), "sex": "other", }, out: true, }, { rawYql: `age>23 and (sex in ('boy','girl') or sex='other')`, data: map[string]interface{}{ "age": int64(24), "sex": "boy", }, out: true, }, { rawYql: `age>23 and (sex in ('boy','girl') or some!=5) and words='hello world'`, data: map[string]interface{}{ "age": int64(211), "sex": "boy", "some": int64(6), "words": "hello world", }, out: true, }, { rawYql: `age>23 and (sex in ('boy','girl') or some!=5) and words='hello world'`, data: map[string]interface{}{ "age": int64(21), "sex": "boy", "some": int64(6), "words": "hello world", }, out: false, }, { rawYql: `tag in (1,3,5) and status!=0`, data: map[string]interface{}{ "tag": []int64{1, 5}, "status": int64(3), }, out: true, }, } ass := assert.New(t) for _, tc := range testData { ok, err := Match(tc.rawYql, tc.data) ass.NoError(err) ass.Equal(tc.out, ok, "rawYql=%s||data=%+v", tc.rawYql, tc.data) } }
explode_data.jsonl/65945
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 781 }
[ 2830, 3393, 8331, 1155, 353, 8840, 836, 8, 341, 2405, 67348, 284, 3056, 1235, 341, 197, 76559, 56, 1470, 914, 198, 197, 8924, 256, 2415, 14032, 31344, 16094, 197, 13967, 262, 1807, 198, 197, 59403, 197, 197, 515, 298, 76559, 56, 1470,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMinProperties(t *testing.T) { GenerateValuesAsYaml(t, "minProperties.test.schema.json", func(console *tests.ConsoleWrapper, donec chan struct{}) { defer close(donec) // Test boolean type console.ExpectString("Enter a value for stringValue") console.SendLine("") console.ExpectString("Enter a value for stringValue1") console.SendLine("") console.ExpectString("Sorry, your reply was invalid: nestedObject has less than 1 items, has []") console.ExpectString("Enter a value for stringValue") console.SendLine("abc") console.ExpectString("Enter a value for stringValue1") console.SendLine("def") console.ExpectEOF() }) }
explode_data.jsonl/61761
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 233 }
[ 2830, 3393, 6217, 7903, 1155, 353, 8840, 836, 8, 341, 197, 31115, 6227, 2121, 56, 9467, 1155, 11, 330, 1065, 7903, 5958, 30892, 4323, 756, 197, 29244, 52818, 353, 23841, 46298, 11542, 11, 2814, 66, 26023, 2036, 28875, 341, 298, 16867, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseIntegers(t *testing.T) { data := "1,2,3,4,5,6" assert.Equal(t, []int{1, 2, 3, 4, 5, 6}, ParseIntegers(data, ",")) assert.Panics(t, func() { ParseIntegers("1,2,3,x,5,6", ",") }) }
explode_data.jsonl/37800
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 105 }
[ 2830, 3393, 14463, 1072, 67592, 1155, 353, 8840, 836, 8, 341, 8924, 1669, 330, 16, 11, 17, 11, 18, 11, 19, 11, 20, 11, 21, 698, 6948, 12808, 1155, 11, 3056, 396, 90, 16, 11, 220, 17, 11, 220, 18, 11, 220, 19, 11, 220, 20, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHz(t *testing.T) { var ( messages = messageChannel{make(chan *engine.Message)} eng, err = engine.New(&backend{ sampleRate: sampleRate, frameSize: frameSize, }, frameSize, engine.WithMessageChannel(messages)) logger = log.New(os.Stdout, "", -1) ) require.NoError(t, err) run, err := New(eng, logger) require.NoError(t, err) v, err := run.Eval([]byte(`(hz 440)`)) require.NoError(t, err) require.Equal(t, 0.009977324263038548, v.(dsp.Valuer).Float64()) v, err = run.Eval([]byte(`(hz 440.0)`)) require.NoError(t, err) require.Equal(t, 0.009977324263038548, v.(dsp.Valuer).Float64()) v, err = run.Eval([]byte(`(hz "A4")`)) require.NoError(t, err) require.Equal(t, 0.009977324263038548, v.(dsp.Valuer).Float64()) v, err = run.Eval([]byte(`(hz :A4)`)) require.NoError(t, err) require.Equal(t, 0.009977324263038548, v.(dsp.Valuer).Float64()) _, err = run.Eval([]byte(`(hz "111")`)) require.Error(t, err) _, err = run.Eval([]byte(`(hz)`)) require.Error(t, err) }
explode_data.jsonl/57806
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 463 }
[ 2830, 3393, 11475, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 2109, 3737, 284, 1943, 9629, 90, 6927, 35190, 353, 8512, 8472, 10569, 197, 197, 826, 11, 1848, 284, 4712, 7121, 2099, 20942, 515, 298, 1903, 1516, 11564, 25, 6077, 1156...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRouterTimeout(t *testing.T) { // Create a timeout manager maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ InitialTimeout: 10 * time.Millisecond, MinimumTimeout: 10 * time.Millisecond, MaximumTimeout: maxTimeout, TimeoutCoefficient: 1, TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), "", prometheus.NewRegistry(), ) if err != nil { t.Fatal(err) } go tm.Dispatch() // Create a router chainRouter := ChainRouter{} metrics := prometheus.NewRegistry() mc, err := message.NewCreator(metrics, true, "dummyNamespace", 10*time.Second) assert.NoError(t, err) err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Millisecond, ids.Set{}, nil, HealthConfig{}, "", prometheus.NewRegistry()) assert.NoError(t, err) // Create bootstrapper, engine and handler var ( calledGetFailed, calledGetAncestorsFailed, calledQueryFailed, calledQueryFailed2, calledGetAcceptedFailed, calledGetAcceptedFrontierFailed bool wg = sync.WaitGroup{} ) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() err = vdrs.AddWeight(ids.GenerateTestNodeID(), 1) assert.NoError(t, err) resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) assert.NoError(t, err) handler, err := handler.New( mc, ctx, vdrs, nil, nil, time.Second, resourceTracker, ) assert.NoError(t, err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ T: t, }, EngineTest: common.EngineTest{ T: t, }, } bootstrapper.Default(true) bootstrapper.CantGossip = false bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion version.Application) error { return nil } bootstrapper.HaltF = func() {} bootstrapper.GetFailedF = func(nodeID ids.NodeID, requestID uint32) error { wg.Done(); calledGetFailed = true; return nil } bootstrapper.GetAncestorsFailedF = func(nodeID ids.NodeID, requestID uint32) error { defer wg.Done() calledGetAncestorsFailed = true return nil } bootstrapper.QueryFailedF = func(nodeID ids.NodeID, requestID uint32) error { defer wg.Done() if !calledQueryFailed { calledQueryFailed = true return nil } calledQueryFailed2 = true return nil } bootstrapper.GetAcceptedFailedF = func(nodeID ids.NodeID, requestID uint32) error { defer wg.Done() calledGetAcceptedFailed = true return nil } bootstrapper.GetAcceptedFrontierFailedF = func(nodeID ids.NodeID, requestID uint32) error { defer wg.Done() calledGetAcceptedFrontierFailed = true return nil } handler.SetBootstrapper(bootstrapper) ctx.SetState(snow.Bootstrapping) // assumed bootstrapping is ongoing chainRouter.AddChain(handler) bootstrapper.StartF = func(startReqID uint32) error { return nil } handler.Start(false) // Register requests for each request type msgs := []message.Op{ message.Put, message.Ancestors, message.Chits, message.Chits, message.Accepted, message.AcceptedFrontier, } wg.Add(len(msgs)) for i, msg := range msgs { chainRouter.RegisterRequest(ids.GenerateTestNodeID(), ctx.ChainID, uint32(i), msg) } wg.Wait() chainRouter.lock.Lock() defer chainRouter.lock.Unlock() assert.True(t, calledGetFailed && calledGetAncestorsFailed && calledQueryFailed2 && calledGetAcceptedFailed && calledGetAcceptedFrontierFailed) }
explode_data.jsonl/76078
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1323 }
[ 2830, 3393, 9523, 7636, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 264, 9632, 6645, 198, 22543, 7636, 1669, 220, 17, 20, 353, 882, 71482, 198, 3244, 76, 11, 1848, 1669, 9632, 7121, 2043, 1006, 197, 197, 5, 19278, 17865, 27781, 76...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIDPIDPInitiatedExistingSession(t *testing.T) { test := NewIdentifyProviderTest(t) test.IDP.SessionProvider = &mockSessionProvider{ GetSessionFunc: func(w http.ResponseWriter, r *http.Request, req *IdpAuthnRequest) *Session { return &Session{ ID: "f00df00df00d", UserName: "alice", } }, } w := httptest.NewRecorder() r, _ := http.NewRequest("GET", "https://idp.example.com/services/sp/whoami", nil) test.IDP.ServeIDPInitiated(w, r, test.SP.MetadataURL.String(), "ThisIsTheRelayState") assert.Check(t, is.Equal(200, w.Code)) golden.Assert(t, w.Body.String(), t.Name()+"_response") }
explode_data.jsonl/19834
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 262 }
[ 2830, 3393, 915, 33751, 47, 3803, 10029, 53067, 5283, 1155, 353, 8840, 836, 8, 341, 18185, 1669, 1532, 28301, 1437, 5179, 2271, 1155, 340, 18185, 9910, 47, 20674, 5179, 284, 609, 16712, 5283, 5179, 515, 197, 37654, 5283, 9626, 25, 2915,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChannelableGetListType(t *testing.T) { c := &Channelable{} switch c.GetListType().(type) { case *ChannelableList: // expected default: t.Errorf("expected GetListType to return *ChannelableList, got %T", c.GetListType()) } }
explode_data.jsonl/82004
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 88 }
[ 2830, 3393, 9629, 480, 1949, 852, 929, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 609, 9629, 480, 16094, 8961, 272, 2234, 852, 929, 1005, 7, 1313, 8, 341, 2722, 353, 9629, 480, 852, 510, 197, 197, 322, 3601, 198, 11940, 510, 197, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDockerErrorContextCancel(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockDocker := mock_docker.NewMockDocker(mockCtrl) mockClock := clock.NewMock(t) fs := newFakeFileSystem(testCgroupEntries) p := newTestPlugin( t, withMockClock(mockClock), withMockDocker(mockDocker), withFileSystem(fs), ) ctx, cancel := context.WithCancel(context.Background()) mockDocker.EXPECT(). ContainerInspect(gomock.Any(), testContainerID). Return(types.ContainerJSON{}, errors.New("docker error")) go func() { mockClock.WaitForAfter(time.Second, "never got call to 'after'") // cancel the context after the first call cancel() }() res, err := doAttestWithContext(ctx, t, p, &workloadattestor.AttestRequest{Pid: 123}) require.Error(t, err) require.Contains(t, err.Error(), "context canceled") require.Nil(t, res) }
explode_data.jsonl/28000
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 35, 13659, 1454, 1972, 9269, 1155, 353, 8840, 836, 8, 341, 77333, 15001, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 7860, 15001, 991, 18176, 741, 77333, 35, 13659, 1669, 7860, 814, 13659, 7121, 11571, 35, 13659, 303...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestShouldSetNumberOfNodesForTrialPlan(t *testing.T) { // given optComponentsSvc := dummyOptionalComponentServiceMock(fixKymaComponentList()) componentsProvider := &automock.ComponentListProvider{} componentsProvider.On("AllComponents", mock.AnythingOfType("string")).Return(fixKymaComponentList(), nil) builder, err := NewInputBuilderFactory(optComponentsSvc, runtime.NewDisabledComponentsProvider(), componentsProvider, Config{TrialNodesNumber: 2}, "not-important", fixTrialRegionMapping()) assert.NoError(t, err) pp := fixProvisioningParameters(broker.TrialPlanID, "") creator, err := builder.CreateProvisionInput(pp, internal.RuntimeVersionData{Version: "1.17.0", Origin: internal.Defaults}) require.NoError(t, err) creator.SetProvisioningParameters(pp) // when input, err := creator.CreateProvisionRuntimeInput() require.NoError(t, err) // then assert.Equal(t, 2, input.ClusterConfig.GardenerConfig.AutoScalerMin) assert.Equal(t, 2, input.ClusterConfig.GardenerConfig.AutoScalerMax) }
explode_data.jsonl/69388
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 323 }
[ 2830, 3393, 14996, 1649, 40619, 12288, 2461, 81923, 20485, 1155, 353, 8840, 836, 8, 341, 197, 322, 2661, 198, 64838, 10443, 92766, 1669, 17292, 15309, 2189, 1860, 11571, 955, 941, 42, 1600, 64, 2189, 852, 2398, 197, 5149, 5179, 1669, 60...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOneRender4Tiles(t *testing.T) { rpool, err := NewRenderPool(sampledata.SlaveCmd, 1, 1, 0, 0, executionTimeout) require.Nil(t, err) coord := gopnik.TileCoord{ X: 0, Y: 0, Zoom: 1, Size: 2, } ansCh := make(chan *RenderPoolResponse) err = rpool.EnqueueRequest(coord, ansCh, gopnikrpc.Priority_HIGH) require.Nil(t, err) ans := <-ansCh require.Nil(t, ans.Error) require.Equal(t, len(ans.Tiles), 4) for i := 0; i < 2; i++ { for j := 0; j < 2; j++ { sampledata.CheckTile(t, ans.Tiles[i*2+j].Image, fmt.Sprintf("1_%d_%d.png", j, i)) } } }
explode_data.jsonl/50618
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 285 }
[ 2830, 3393, 3966, 6750, 19, 58365, 1155, 353, 8840, 836, 8, 341, 7000, 10285, 11, 1848, 1669, 1532, 6750, 10551, 32968, 691, 808, 75, 523, 15613, 11, 220, 16, 11, 220, 16, 11, 220, 15, 11, 220, 15, 11, 11320, 7636, 340, 17957, 596...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetEnvAsIntOrFallback(t *testing.T) { const expected = 1 assert := assert.New(t) key := "FLOCKER_SET_VAR" os.Setenv(key, strconv.Itoa(expected)) returnVal, _ := GetEnvAsIntOrFallback(key, 1) assert.Equal(expected, returnVal) key = "FLOCKER_UNSET_VAR" returnVal, _ = GetEnvAsIntOrFallback(key, expected) assert.Equal(expected, returnVal) }
explode_data.jsonl/36781
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 1949, 14359, 2121, 1072, 2195, 87206, 1155, 353, 8840, 836, 8, 341, 4777, 3601, 284, 220, 16, 271, 6948, 1669, 2060, 7121, 1155, 692, 23634, 1669, 330, 37, 8044, 640, 8481, 25750, 698, 25078, 4202, 3160, 4857, 11, 33317, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEngine_TimeTag(t *testing.T) { engine := NewDefaultEngine() defer engine.Close() engine.MustOpen() pt := models.MustNewPoint( "cpu", models.NewTags(map[string]string{"time": "value"}), map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := engine.Engine.WritePoints(context.TODO(), []models.Point{pt}); err == nil { t.Fatal("expected error: got nil") } pt = models.MustNewPoint( "cpu", models.NewTags(map[string]string{"foo": "bar", "time": "value"}), map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := engine.Engine.WritePoints(context.TODO(), []models.Point{pt}); err == nil { t.Fatalf("unexpected error: %v", err) } }
explode_data.jsonl/5982
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 274 }
[ 2830, 3393, 4571, 39080, 5668, 1155, 353, 8840, 836, 8, 341, 80118, 1669, 1532, 3675, 4571, 741, 16867, 4712, 10421, 741, 80118, 50463, 5002, 2822, 60796, 1669, 4119, 50463, 3564, 2609, 1006, 197, 197, 1, 16475, 756, 197, 19727, 82, 712...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMatches_HasDocID(t *testing.T) { type fields struct { vectors []testVector } type args struct { docID int } tests := []struct { name string fields fields args args want bool }{ { "no match", fields{vectors: []testVector{ {"word", []Vector{{1, 1}}}}}, args{docID: 10}, false, }, { "match", fields{vectors: []testVector{ {"word", []Vector{{1, 1}}}}}, args{docID: 1}, true, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { m := createMatches(tt.fields.vectors) if got := m.HasDocID(tt.args.docID); got != tt.want { t.Errorf("Matches.HasDocID() %s = %v, want %v", tt.name, got, tt.want) } }) } }
explode_data.jsonl/22247
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 347 }
[ 2830, 3393, 42470, 2039, 300, 9550, 915, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 5195, 10605, 3056, 1944, 3781, 198, 197, 630, 13158, 2827, 2036, 341, 197, 59536, 915, 526, 198, 197, 630, 78216, 1669, 3056, 1235, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestProcess_OnAnalyzeProjectUsecase_WhenFailingToSaveIdentifiers_ShouldReturnError(t *testing.T) { project := entity.Project{ Reference: "eroatta/test", SourceCode: entity.SourceCode{ Hash: "asdf1234asdf", Location: "/tmp/repositories/eroatta/test", Files: []string{"main.go"}, }, } projectRepositoryMock := projectRepositoryMock{ project: project, } sourceCodeRepositoryMock := sourceCodeFileReaderMock{ files: map[string][]byte{ "main.go": []byte("package main"), }, err: nil, } identifierRepositoryMock := identifierRepositoryMock{ err: repository.ErrIdentifierUnexpected, } analysisRepositoryMock := analysisRepositoryMock{ analysisResults: entity.AnalysisResults{}, getErr: repository.ErrAnalysisNoResults, } config := &entity.AnalysisConfig{ Miners: []string{}, ExtractorFactory: newExtractorMock, Splitters: []string{"conserv"}, SplittingAlgorithmFactory: splitter.NewSplitterFactory(), Expanders: []string{"mock"}, ExpansionAlgorithmFactory: expanderAbstractFactoryMock{}, } uc := usecase.NewAnalyzeProjectUsecase(projectRepositoryMock, sourceCodeRepositoryMock, identifierRepositoryMock, analysisRepositoryMock, config) projectID, _ := uuid.NewUUID() results, err := uc.Process(context.TODO(), projectID) assert.EqualError(t, err, usecase.ErrUnableToSaveIdentifiers.Error()) assert.Empty(t, results) }
explode_data.jsonl/64108
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 558 }
[ 2830, 3393, 7423, 35482, 2082, 55856, 7849, 52, 5024, 519, 62, 4498, 37, 14277, 1249, 8784, 28301, 11836, 36578, 616, 5598, 1454, 1155, 353, 8840, 836, 8, 341, 72470, 1669, 5387, 30944, 515, 197, 197, 8856, 25, 330, 2328, 31919, 12697, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestZKGroupStorageRegisterAndGetAndDeleteConsumer(t *testing.T) { zk := newZKGroupStorage([]string{"127.0.0.1:2181"}, 6*time.Second) err := zk.registerConsumer(testGroup, testConsumerID, nil) if err != nil { t.Fatal(err) } err = zk.deleteConsumer(testGroup, testConsumerID) if err != nil { t.Fatal(err) } zk.registerConsumer(testGroup, testConsumerID, nil) err = zk.registerConsumer(testGroup, testConsumerID, nil) if err == nil { zk.deleteConsumer(testGroup, testConsumerID) t.Fatal("Expected it can't register consumer twice, but it did") } consumerList, err := zk.getConsumerList(testGroup) if err != nil { t.Fatal(err) } if consumerList[0] != testConsumerID { zk.deleteConsumer(testGroup, testConsumerID) t.Fatal("consumer id get from zookeeper isn't expected") } zk.deleteConsumer(testGroup, testConsumerID) }
explode_data.jsonl/5651
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 315 }
[ 2830, 3393, 57, 42, 2808, 5793, 8690, 97726, 3036, 6435, 29968, 1155, 353, 8840, 836, 8, 341, 20832, 74, 1669, 501, 57, 42, 2808, 5793, 10556, 917, 4913, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, 17, 16, 23, 16, 14345, 220, 21, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestValidateCertSANs(t *testing.T) { var tests = []struct { sans []string expected bool }{ {[]string{}, true}, // ok if not provided {[]string{"1,2,,3"}, false}, // not a DNS label or IP {[]string{"my-hostname", "???&?.garbage"}, false}, // not valid {[]string{"my-hostname", "my.subdomain", "1.2.3.4"}, true}, // supported {[]string{"my-hostname2", "my.other.subdomain", "10.0.0.10"}, true}, // supported {[]string{"my-hostname", "my.subdomain", "2001:db8::4"}, true}, // supported {[]string{"my-hostname2", "my.other.subdomain", "2001:db8::10"}, true}, // supported } for _, rt := range tests { actual := ValidateCertSANs(rt.sans, nil) if (len(actual) == 0) != rt.expected { t.Errorf( "failed ValidateCertSANs:\n\texpected: %t\n\t actual: %t", rt.expected, (len(actual) == 0), ) } } }
explode_data.jsonl/39222
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 496 }
[ 2830, 3393, 17926, 36934, 68691, 82, 1155, 353, 8840, 836, 8, 341, 2405, 7032, 284, 3056, 1235, 341, 197, 1903, 596, 257, 3056, 917, 198, 197, 42400, 1807, 198, 197, 59403, 197, 197, 90, 1294, 917, 22655, 830, 2137, 24348, 442, 5394, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestInstrumentedVersion(t *testing.T) { fakeRuntime, _, _, _ := createTestRuntimeManager() irs := newInstrumentedRuntimeService(fakeRuntime) vr, err := irs.Version("1") assert.NoError(t, err) assert.Equal(t, kubeRuntimeAPIVersion, vr.Version) }
explode_data.jsonl/40684
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 91 }
[ 2830, 3393, 56324, 291, 5637, 1155, 353, 8840, 836, 8, 341, 1166, 726, 15123, 11, 8358, 8358, 716, 1669, 1855, 2271, 15123, 2043, 741, 197, 16838, 1669, 501, 56324, 291, 15123, 1860, 74138, 15123, 340, 5195, 81, 11, 1848, 1669, 6216, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_unmarshal_into_existing_value(t *testing.T) { should := require.New(t) type TestObject struct { Field1 int Field2 interface{} } var obj TestObject m := map[string]interface{}{} obj.Field2 = &m cfg := jsoniter.Config{UseNumber: true}.Froze() err := cfg.Unmarshal([]byte(`{"Field1":1,"Field2":{"k":"v"}}`), &obj) should.NoError(err) should.Equal(map[string]interface{}{ "k": "v", }, m) }
explode_data.jsonl/60992
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 174 }
[ 2830, 3393, 4907, 27121, 45514, 62630, 3142, 1155, 353, 8840, 836, 8, 341, 197, 5445, 1669, 1373, 7121, 1155, 340, 13158, 3393, 1190, 2036, 341, 197, 94478, 16, 526, 198, 197, 94478, 17, 3749, 16094, 197, 532, 2405, 2839, 3393, 1190, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTeamsService_CreateTeam(t *testing.T) { client, mux, _, teardown := setup() defer teardown() input := NewTeam{Name: "n", Privacy: String("closed"), RepoNames: []string{"r"}} mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) { v := new(NewTeam) json.NewDecoder(r.Body).Decode(v) testMethod(t, r, "POST") if !cmp.Equal(v, &input) { t.Errorf("Request body = %+v, want %+v", v, input) } fmt.Fprint(w, `{"id":1}`) }) ctx := context.Background() team, _, err := client.Teams.CreateTeam(ctx, "o", input) if err != nil { t.Errorf("Teams.CreateTeam returned error: %v", err) } want := &Team{ID: Int64(1)} if !cmp.Equal(team, want) { t.Errorf("Teams.CreateTeam returned %+v, want %+v", team, want) } const methodName = "CreateTeam" testBadOptions(t, methodName, func() (err error) { _, _, err = client.Teams.CreateTeam(ctx, "\n", input) return err }) testNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) { got, resp, err := client.Teams.CreateTeam(ctx, "o", input) if got != nil { t.Errorf("testNewRequestAndDoFailure %v = %#v, want nil", methodName, got) } return resp, err }) }
explode_data.jsonl/4514
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 490 }
[ 2830, 3393, 60669, 1860, 34325, 14597, 1155, 353, 8840, 836, 8, 341, 25291, 11, 59807, 11, 8358, 49304, 1669, 6505, 741, 16867, 49304, 2822, 22427, 1669, 1532, 14597, 63121, 25, 330, 77, 497, 18874, 25, 923, 445, 34087, 3975, 71509, 798...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPluginCustomBackend(t *testing.T) { ctx := context.Background() manager, _ := plugins.New(nil, "test-instance-id", inmem.New()) backend := &testPlugin{} manager.Register("test_plugin", backend) config, err := ParseConfig([]byte(`{"plugin": "test_plugin"}`), nil, []string{"test_plugin"}) if err != nil { t.Fatal(err) } plugin := New(config, manager) plugin.Log(ctx, &server.Info{Revision: "A"}) plugin.Log(ctx, &server.Info{Revision: "B"}) if len(backend.events) != 2 || backend.events[0].Revision != "A" || backend.events[1].Revision != "B" { t.Fatal("Unexpected events:", backend.events) } }
explode_data.jsonl/2168
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 233 }
[ 2830, 3393, 11546, 10268, 29699, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 92272, 11, 716, 1669, 17215, 7121, 27907, 11, 330, 1944, 73655, 12897, 497, 304, 10536, 7121, 12367, 197, 20942, 1669, 609, 1944, 11546, 16094, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestCancel(t *testing.T) { ca := Cancel{Next: sleepPlugin{}, timeout: 20 * time.Millisecond} ctx := context.Background() w := dnstest.NewRecorder(&test.ResponseWriter{}) m := new(dns.Msg) m.SetQuestion("aaa.example.com.", dns.TypeTXT) ca.ServeDNS(ctx, w, m) if w.Rcode != dns.RcodeBadTime { t.Error("Expected ServeDNS to be canceled by context") } }
explode_data.jsonl/16617
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 9269, 1155, 353, 8840, 836, 8, 341, 197, 924, 1669, 23542, 90, 5847, 25, 6084, 11546, 22655, 9632, 25, 220, 17, 15, 353, 882, 71482, 532, 20985, 1669, 2266, 19047, 2822, 6692, 1669, 47488, 267, 477, 7121, 47023, 2099, 1944...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNoArg(t *testing.T) { testCases := []struct { name string broker CeleryBroker backend CeleryBackend taskName string taskFunc interface{} expected int }{ { name: "no argument that returns integer value with redis broker/backend ", broker: redisBroker, backend: redisBackend, taskName: uuid.Must(uuid.NewV4()).String(), taskFunc: func() int { return 5545 }, expected: 5545, }, { name: "no argument that returns integer value with redis broker/backend ", broker: redisBroker, backend: redisBackend, taskName: uuid.Must(uuid.NewV4()).String(), taskFunc: &noArgTask{}, expected: 1, }, { name: "no argument that returns integer value with amqp broker/backend ", broker: amqpBroker, backend: amqpBackend, taskName: uuid.Must(uuid.NewV4()).String(), taskFunc: func() int { return 6930 }, expected: 6930, }, } for _, tc := range testCases { cli, _ := NewCeleryClient(tc.broker, tc.backend, 1) cli.Register(tc.taskName, tc.taskFunc) cli.StartWorker() asyncResult, err := cli.Delay(tc.taskName) if err != nil { t.Errorf("test '%s': failed to get result for task %s: %+v", tc.name, tc.taskName, err) cli.StopWorker() continue } res, err := asyncResult.Get(TIMEOUT) if err != nil { t.Errorf("test '%s': failed to get result for task %s: %+v", tc.name, tc.taskName, err) cli.StopWorker() continue } // json always return float64 intead of int if tc.expected != int(res.(float64)) { t.Errorf("test '%s': returned result %+v is different from expected result %+v", tc.name, res, tc.expected) } cli.StopWorker() } }
explode_data.jsonl/77843
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 712 }
[ 2830, 3393, 2753, 2735, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 2233, 45985, 256, 46543, 722, 65545, 198, 197, 197, 20942, 220, 46543, 722, 29699, 198, 197, 49115, 675, 914, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEc2Instance_LoadDetails_InstanceDetails_SubnetID_IsNull(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() ec2Instance, mockEC2ApiHelper := getMockInstance(ctrl) mockEC2ApiHelper.EXPECT().GetInstanceDetails(&instanceID).Return(&ec2.Instance{}, nil) err := ec2Instance.LoadDetails(mockEC2ApiHelper) assert.NotNil(t, err) }
explode_data.jsonl/752
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 131 }
[ 2830, 3393, 50730, 17, 2523, 19553, 7799, 70849, 7799, 36359, 4711, 915, 31879, 3280, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 197, 757, 17, 2523, 11, 7860, 7498, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetCache(t *testing.T) { cst := cbor.NewMemCborStore() st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) if err != nil { t.Fatal(err) } a, err := address.NewIDAddress(uint64(222)) if err != nil { t.Fatal(err) } act := &types.Actor{ Balance: types.NewInt(0), Code: builtin.StorageMinerActorCodeID, Head: builtin.AccountActorCodeID, Nonce: 0, } err = st.SetActor(a, act) if err != nil { t.Fatal(err) } act.Nonce = 1 outact, err := st.GetActor(a) if err != nil { t.Fatal(err) } if outact.Nonce == 1 { t.Error("nonce should not have updated") } }
explode_data.jsonl/60995
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 284 }
[ 2830, 3393, 1649, 8233, 1155, 353, 8840, 836, 8, 341, 1444, 267, 1669, 272, 9368, 7121, 18816, 34, 9368, 6093, 741, 18388, 11, 1848, 1669, 1532, 1397, 6533, 1337, 267, 11, 59626, 17, 35842, 2461, 12320, 43333, 7121, 477, 12320, 5637, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestCatalog_Register_ACLDeny(t *testing.T) { t.Parallel() dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() testrpc.WaitForTestAgent(t, s1.RPC, "dc1") codec := rpcClient(t, s1) defer codec.Close() // Create the ACL. arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTypeClient, Rules: ` service "foo" { policy = "write" } `, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var id string if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &id); err != nil { t.Fatalf("err: %v", err) } argR := structs.RegisterRequest{ Datacenter: "dc1", Node: "foo", Address: "127.0.0.1", Service: &structs.NodeService{ Service: "db", Tags: []string{"master"}, Port: 8000, }, WriteRequest: structs.WriteRequest{Token: id}, } var outR struct{} // This should fail since we are writing to the "db" service, which isn't // allowed. err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } // The "foo" service should work, though. argR.Service.Service = "foo" err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if err != nil { t.Fatalf("err: %v", err) } // Try the special case for the "consul" service that allows it no matter // what with pre-version 8 ACL enforcement. argR.Service.Service = "consul" err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if err != nil { t.Fatalf("err: %v", err) } // Make sure the exception goes away when we turn on version 8 ACL // enforcement. s1.config.ACLEnforceVersion8 = true err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } // Register a db service using the root token. argR.Service.Service = "db" argR.Service.ID = "my-id" argR.Token = "root" err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if err != nil { t.Fatalf("err: %v", err) } // Prove that we are properly looking up the node services and passing // that to the ACL helper. We can vet the helper independently in its // own unit test after this. This is trying to register over the db // service we created above, which is a check that depends on looking // at the existing registration data with that service ID. This is a new // check for version 8. argR.Service.Service = "foo" argR.Service.ID = "my-id" argR.Token = id err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } }
explode_data.jsonl/49209
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1156 }
[ 2830, 3393, 41606, 73124, 97627, 23619, 88, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 48532, 16, 11, 274, 16, 1669, 1273, 5475, 2354, 2648, 1155, 11, 2915, 1337, 353, 2648, 8, 341, 197, 1444, 875, 3140, 1043, 3057, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFinImmediately(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */) // Shutdown immediately, check that we get a FIN. if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { t.Fatalf("Shutdown failed: %s", err) } iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1) checker.IPv4(t, c.GetPacket(), checker.PayloadLen(header.TCPMinimumSize), checker.TCP( checker.DstPort(context.TestPort), checker.TCPSeqNum(uint32(c.IRS)+1), checker.TCPAckNum(uint32(iss)), checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), ), ) // Ack and send FIN as well. c.SendPacket(nil, &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck | header.TCPFlagFin, SeqNum: iss, AckNum: c.IRS.Add(2), RcvWnd: 30000, }) // Check that the stack acks the FIN. checker.IPv4(t, c.GetPacket(), checker.PayloadLen(header.TCPMinimumSize), checker.TCP( checker.DstPort(context.TestPort), checker.TCPSeqNum(uint32(c.IRS)+2), checker.TCPAckNum(uint32(iss)+1), checker.TCPFlags(header.TCPFlagAck), ), ) }
explode_data.jsonl/75981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 529 }
[ 2830, 3393, 9134, 95693, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 2266, 7121, 1155, 11, 1638, 8505, 52, 340, 16867, 272, 727, 60639, 2822, 1444, 7251, 21146, 5378, 8787, 6341, 14076, 2833, 11, 220, 18, 15, 15, 15, 15, 11, 481, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestExportTraceDataOp(t *testing.T) { doneFn, err := obsreporttest.SetupRecordedMetricsTest() require.NoError(t, err) defer doneFn() set := componenttest.NewNopExporterCreateSettings() sr := new(oteltest.SpanRecorder) set.TracerProvider = oteltest.NewTracerProvider(oteltest.WithSpanRecorder(sr)) parentCtx, parentSpan := set.TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() obsrep := NewExporter(ExporterSettings{ Level: configtelemetry.LevelNormal, ExporterID: exporter, ExporterCreateSettings: set, }) params := []testParams{ {items: 22, err: nil}, {items: 14, err: errFake}, } for i := range params { ctx := obsrep.StartTracesOp(parentCtx) assert.NotNil(t, ctx) obsrep.EndTracesOp(ctx, params[i].items, params[i].err) } spans := sr.Completed() require.Equal(t, len(params), len(spans)) var sentSpans, failedToSendSpans int for i, span := range spans { assert.Equal(t, "exporter/"+exporter.String()+"/traces", span.Name()) switch params[i].err { case nil: sentSpans += params[i].items assert.Equal(t, attribute.Int64Value(int64(params[i].items)), span.Attributes()[obsmetrics.SentSpansKey]) assert.Equal(t, attribute.Int64Value(0), span.Attributes()[obsmetrics.FailedToSendSpansKey]) assert.Equal(t, codes.Unset, span.StatusCode()) case errFake: failedToSendSpans += params[i].items assert.Equal(t, attribute.Int64Value(0), span.Attributes()[obsmetrics.SentSpansKey]) assert.Equal(t, attribute.Int64Value(int64(params[i].items)), span.Attributes()[obsmetrics.FailedToSendSpansKey]) assert.Equal(t, codes.Error, span.StatusCode()) assert.Equal(t, params[i].err.Error(), span.StatusMessage()) default: t.Fatalf("unexpected error: %v", params[i].err) } } obsreporttest.CheckExporterTraces(t, exporter, int64(sentSpans), int64(failedToSendSpans)) }
explode_data.jsonl/9548
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 751 }
[ 2830, 3393, 16894, 6550, 1043, 7125, 1155, 353, 8840, 836, 8, 341, 40495, 24911, 11, 1848, 1669, 7448, 11736, 1944, 39820, 6471, 291, 27328, 2271, 741, 17957, 35699, 1155, 11, 1848, 340, 16867, 2814, 24911, 2822, 8196, 1669, 3692, 1944, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestHeatmapCanNotBeDecodedIfDataFormatIsInvalid(t *testing.T) { req := require.New(t) panel := DashboardHeatmap{ Span: 12, Height: "300px", Transparent: true, Datasource: "some-prometheus", DataFormat: "invalid value here", HideZeroBuckets: true, HightlightCards: true, Targets: nil, ReverseYBuckets: true, } _, err := panel.toOption() req.Error(err) req.Equal(ErrInvalidDataFormat, err) }
explode_data.jsonl/44073
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 61306, 2186, 6713, 2623, 3430, 4900, 6737, 2679, 1043, 4061, 3872, 7928, 1155, 353, 8840, 836, 8, 341, 24395, 1669, 1373, 7121, 1155, 692, 44952, 1669, 26808, 61306, 2186, 515, 197, 7568, 848, 25, 310, 220, 16, 17, 345, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWorkflowTemplateRefGetFromStored(t *testing.T) { wf := unmarshalWF(wfWithStatus) t.Run("ProcessWFWithStoredWFT", func(t *testing.T) { cancel, controller := newController(wf) defer cancel() woc := newWorkflowOperationCtx(wf, controller) _, execArgs, err := woc.loadExecutionSpec() assert.NoError(t, err) assert.Equal(t, "test", execArgs.Parameters[0].Value.String()) assert.Equal(t, "hello", execArgs.Parameters[1].Value.String()) }) }
explode_data.jsonl/30604
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 178 }
[ 2830, 3393, 62768, 7275, 3945, 1949, 3830, 93243, 1155, 353, 8840, 836, 8, 341, 6692, 69, 1669, 650, 27121, 32131, 3622, 69, 2354, 2522, 340, 3244, 16708, 445, 7423, 32131, 2354, 93243, 54, 3994, 497, 2915, 1155, 353, 8840, 836, 8, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUseShouldNotAddNilMiddlewares(t *testing.T) { r := NewRouter() start := len(r.middlewares) r.Use(make([]Middleware, 0)...) assert.Len(t, r.middlewares, start) }
explode_data.jsonl/18953
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 72 }
[ 2830, 3393, 10253, 14996, 2623, 2212, 19064, 43935, 37903, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 9523, 741, 21375, 1669, 2422, 2601, 80264, 37903, 340, 7000, 9046, 36944, 10556, 24684, 11, 220, 15, 8, 31218, 6948, 65819, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestEnsureInitialWallTimeMonotonicity(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { name string prevHLCUpperBound int64 clockStartTime int64 checkPersist bool }{ { name: "lower upper bound time", prevHLCUpperBound: 100, clockStartTime: 1000, checkPersist: true, }, { name: "higher upper bound time", prevHLCUpperBound: 10000, clockStartTime: 1000, checkPersist: true, }, { name: "significantly higher upper bound time", prevHLCUpperBound: int64(3 * time.Hour), clockStartTime: int64(1 * time.Hour), checkPersist: true, }, { name: "equal upper bound time", prevHLCUpperBound: int64(time.Hour), clockStartTime: int64(time.Hour), checkPersist: true, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { a := assert.New(t) const maxOffset = 500 * time.Millisecond m := hlc.NewManualClock(test.clockStartTime) c := hlc.NewClock(m.UnixNano, maxOffset) sleepUntilFn := func(until int64, currentTime func() int64) { delta := until - currentTime() if delta > 0 { m.Increment(delta) } } wallTime1 := c.Now().WallTime if test.clockStartTime < test.prevHLCUpperBound { a.True( wallTime1 < test.prevHLCUpperBound, fmt.Sprintf( "expected wall time %d < prev upper bound %d", wallTime1, test.prevHLCUpperBound, ), ) } ensureClockMonotonicity( context.TODO(), c, c.PhysicalTime(), test.prevHLCUpperBound, sleepUntilFn, ) wallTime2 := c.Now().WallTime // After ensuring monotonicity, wall time should be greater than // persisted upper bound a.True( wallTime2 > test.prevHLCUpperBound, fmt.Sprintf( "expected wall time %d > prev upper bound %d", wallTime2, test.prevHLCUpperBound, ), ) }) } }
explode_data.jsonl/70836
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 938 }
[ 2830, 3393, 64439, 6341, 32597, 1462, 11095, 354, 14011, 487, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 1060, 914, 198, 197, 50728, 39, 8556, 14251, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnionDeclConformsNullable(t *testing.T) { decl, ok := testSchema(t).lookupDeclByName("ExampleFlexibleUnion", true) if !ok { t.Fatalf("lookupDeclByName failed") } unionDecl := decl.(*UnionDecl) checkConforms(t, context{}, unionDecl, []conformTest{ conformOk{gidlir.Record{ Name: "ExampleFlexibleUnion", Fields: []gidlir.Field{ {Key: gidlir.FieldKey{Name: "s"}, Value: "foo"}, }, }}, conformOk{nil}, }, ) }
explode_data.jsonl/21404
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 208 }
[ 2830, 3393, 32658, 21629, 1109, 9807, 15703, 1155, 353, 8840, 836, 8, 341, 197, 10005, 11, 5394, 1669, 1273, 8632, 1155, 568, 21020, 21629, 16898, 445, 13314, 75147, 32658, 497, 830, 340, 743, 753, 562, 341, 197, 3244, 30762, 445, 21020...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUserStoreGetProfilesByUsernames(t *testing.T) { Setup() teamId := model.NewId() u1 := &model.User{} u1.Email = model.NewId() u1.Username = "username1" + model.NewId() Must(store.User().Save(u1)) Must(store.Team().SaveMember(&model.TeamMember{TeamId: teamId, UserId: u1.Id})) u2 := &model.User{} u2.Email = model.NewId() u2.Username = "username2" + model.NewId() Must(store.User().Save(u2)) Must(store.Team().SaveMember(&model.TeamMember{TeamId: teamId, UserId: u2.Id})) if r1 := <-store.User().GetProfilesByUsernames([]string{u1.Username, u2.Username}, teamId); r1.Err != nil { t.Fatal(r1.Err) } else { users := r1.Data.(map[string]*model.User) if len(users) != 2 { t.Fatal("invalid returned users") } if users[u1.Id].Id != u1.Id { t.Fatal("invalid returned user") } } if r1 := <-store.User().GetProfilesByUsernames([]string{u1.Username}, teamId); r1.Err != nil { t.Fatal(r1.Err) } else { users := r1.Data.(map[string]*model.User) if len(users) != 1 { t.Fatal("invalid returned users") } if users[u1.Id].Id != u1.Id { t.Fatal("invalid returned user") } } }
explode_data.jsonl/5096
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 501 }
[ 2830, 3393, 1474, 6093, 1949, 62719, 1359, 1474, 11400, 1155, 353, 8840, 836, 8, 341, 197, 21821, 2822, 197, 9196, 764, 1669, 1614, 7121, 764, 2822, 10676, 16, 1669, 609, 2528, 7344, 16094, 10676, 16, 24066, 284, 1614, 7121, 764, 741, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestSSLConnection(t *testing.T) { maybeSkipSSLTests(t) // Environment sanity check: should fail without SSL checkSSLSetup(t, "sslmode=disable user=pqgossltest") db, err := openSSLConn(t, "sslmode=require user=pqgossltest") if err != nil { t.Fatal(err) } rows, err := db.Query("SELECT 1") if err != nil { t.Fatal(err) } rows.Close() }
explode_data.jsonl/1382
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 22594, 4526, 1155, 353, 8840, 836, 8, 341, 2109, 49791, 35134, 22594, 18200, 1155, 340, 197, 322, 11586, 46842, 1779, 25, 1265, 3690, 2041, 25316, 198, 25157, 22594, 21821, 1155, 11, 330, 24635, 8516, 28, 18015, 1196, 17385, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestImportTeam(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() th.TestForAllClients(t, func(T *testing.T, c *model.Client4) { data, err := testutils.ReadTestFile("Fake_Team_Import.zip") require.False(t, err != nil && len(data) == 0, "Error while reading the test file.") _, resp := th.SystemAdminClient.ImportTeam(data, binary.Size(data), "XYZ", "Fake_Team_Import.zip", th.BasicTeam.Id) CheckBadRequestStatus(t, resp) _, resp = th.SystemAdminClient.ImportTeam(data, binary.Size(data), "", "Fake_Team_Import.zip", th.BasicTeam.Id) CheckBadRequestStatus(t, resp) }, "Import from unknown and source") t.Run("ImportTeam", func(t *testing.T) { var data []byte var err error data, err = testutils.ReadTestFile("Fake_Team_Import.zip") require.False(t, err != nil && len(data) == 0, "Error while reading the test file.") // Import the channels/users/posts fileResp, resp := th.SystemAdminClient.ImportTeam(data, binary.Size(data), "slack", "Fake_Team_Import.zip", th.BasicTeam.Id) CheckNoError(t, resp) fileData, err := base64.StdEncoding.DecodeString(fileResp["results"]) require.Nil(t, err, "failed to decode base64 results data") fileReturned := fmt.Sprintf("%s", fileData) require.Truef(t, strings.Contains(fileReturned, "darth.vader@stardeath.com"), "failed to report the user was imported, fileReturned: %s", fileReturned) // Checking the imported users importedUser, resp := th.SystemAdminClient.GetUserByUsername("bot_test", "") CheckNoError(t, resp) require.Equal(t, importedUser.Username, "bot_test", "username should match with the imported user") importedUser, resp = th.SystemAdminClient.GetUserByUsername("lordvader", "") CheckNoError(t, resp) require.Equal(t, importedUser.Username, "lordvader", "username should match with the imported user") // Checking the imported Channels importedChannel, resp := th.SystemAdminClient.GetChannelByName("testchannel", th.BasicTeam.Id, "") CheckNoError(t, resp) require.Equal(t, importedChannel.Name, "testchannel", "names did not match expected: testchannel") importedChannel, resp = th.SystemAdminClient.GetChannelByName("general", th.BasicTeam.Id, "") CheckNoError(t, resp) require.Equal(t, importedChannel.Name, "general", "names did not match expected: general") posts, resp := th.SystemAdminClient.GetPostsForChannel(importedChannel.Id, 0, 60, "") CheckNoError(t, resp) require.Equal(t, posts.Posts[posts.Order[3]].Message, "This is a test post to test the import process", "missing posts in the import process") }) t.Run("MissingFile", func(t *testing.T) { _, resp := th.SystemAdminClient.ImportTeam(nil, 4343, "slack", "Fake_Team_Import.zip", th.BasicTeam.Id) CheckBadRequestStatus(t, resp) }) t.Run("WrongPermission", func(t *testing.T) { var data []byte var err error data, err = testutils.ReadTestFile("Fake_Team_Import.zip") require.False(t, err != nil && len(data) == 0, "Error while reading the test file.") // Import the channels/users/posts _, resp := th.Client.ImportTeam(data, binary.Size(data), "slack", "Fake_Team_Import.zip", th.BasicTeam.Id) CheckForbiddenStatus(t, resp) }) }
explode_data.jsonl/70736
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1104 }
[ 2830, 3393, 11511, 14597, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 2822, 70479, 8787, 2461, 2403, 47174, 1155, 11, 2915, 4140, 353, 8840, 836, 11, 272, 353, 2528, 11716, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test2(t *testing.T) { reqs := PasswordStrengthRequirements{MinimumTotalLength: 8, Digits: 3} p, e := NewPassword(&reqs) if e != nil { t.Error(e) } if ok, msg := reqs.Validate(p); !ok { t.Error(msg) } }
explode_data.jsonl/7141
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 94 }
[ 2830, 3393, 17, 1155, 353, 8840, 836, 8, 341, 24395, 82, 1669, 12362, 52857, 59202, 90, 28695, 7595, 4373, 25, 220, 23, 11, 422, 18667, 25, 220, 18, 532, 3223, 11, 384, 1669, 1532, 4876, 2099, 2958, 82, 340, 743, 384, 961, 2092, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRemoteHeaderRequestSpan(t *testing.T) { testCases := []struct { remoteHeight uint64 localHeight uint64 expected []int }{ // Remote is way higher. We should ask for the remote head and go backwards {1500, 1000, []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, }, {15000, 13006, []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, }, // Remote is pretty close to us. We don't have to fetch as many {1200, 1150, []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, }, // Remote is equal to us (so on a fork with higher td) // We should get the closest couple of ancestors {1500, 1500, []int{1497, 1499}, }, // We're higher than the remote! Odd {1000, 1500, []int{997, 999}, }, // Check some weird edgecases that it behaves somewhat rationally {0, 1500, []int{0, 2}, }, {6000000, 0, []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, }, {0, 0, []int{0, 2}, }, } reqs := func(from, count, span int) []int { var r []int num := from for len(r) < count { r = append(r, num) num += span + 1 } return r } for i, tt := range testCases { from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) data := reqs(int(from), count, span) if max != uint64(data[len(data)-1]) { t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) } failed := false if len(data) != len(tt.expected) { failed = true t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) } else { for j, n := range data { if n != tt.expected[j] { failed = true break } } } if failed { res := strings.Replace(fmt.Sprint(data), " ", ",", -1) exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) t.Logf("got: %v\n", res) t.Logf("exp: %v\n", exp) t.Errorf("test %d: wrong values", i) } } }
explode_data.jsonl/46741
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 921 }
[ 2830, 3393, 24703, 4047, 1900, 12485, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 197, 18147, 3640, 2622, 21, 19, 198, 197, 8854, 3640, 220, 2622, 21, 19, 198, 197, 42400, 257, 3056, 396, 198, 197, 59403, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMVCCStatsRandomized(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() // NB: no failure type ever required count five or more. When there is a result // found by this test, or any other MVCC code is changed, it's worth reducing // this first to two, three, ... and running the test for a minute to get a // good idea of minimally reproducing examples. const count = 200 actions := make(map[string]func(*state) string) actions["Put"] = func(s *state) string { if err := MVCCPut(ctx, s.eng, s.MS, s.key, s.TS, s.rngVal(), s.Txn); err != nil { return err.Error() } return "" } actions["InitPut"] = func(s *state) string { failOnTombstones := (s.rng.Intn(2) == 0) desc := fmt.Sprintf("failOnTombstones=%t", failOnTombstones) if err := MVCCInitPut(ctx, s.eng, s.MS, s.key, s.TS, s.rngVal(), failOnTombstones, s.Txn); err != nil { return desc + ": " + err.Error() } return desc } actions["Del"] = func(s *state) string { if err := MVCCDelete(ctx, s.eng, s.MS, s.key, s.TS, s.Txn); err != nil { return err.Error() } return "" } actions["DelRange"] = func(s *state) string { returnKeys := (s.rng.Intn(2) == 0) max := s.rng.Int63n(5) desc := fmt.Sprintf("returnKeys=%t, max=%d", returnKeys, max) if _, _, _, err := MVCCDeleteRange(ctx, s.eng, s.MS, roachpb.KeyMin, roachpb.KeyMax, max, s.TS, s.Txn, returnKeys); err != nil { return desc + ": " + err.Error() } return desc } actions["EnsureTxn"] = func(s *state) string { if s.Txn == nil { s.Txn = &roachpb.Transaction{TxnMeta: enginepb.TxnMeta{ID: uuid.MakeV4(), Timestamp: s.TS}} } return "" } resolve := func(s *state, status roachpb.TransactionStatus) string { ranged := s.rng.Intn(2) == 0 desc := fmt.Sprintf("ranged=%t", ranged) if s.Txn != nil { if !ranged { if err := MVCCResolveWriteIntent(ctx, s.eng, s.MS, s.intent(status)); err != nil { return desc + ": " + err.Error() } } else { max := s.rng.Int63n(5) desc += fmt.Sprintf(", max=%d", max) if _, _, err := MVCCResolveWriteIntentRange(ctx, s.eng, s.MS, s.intentRange(status), max); err != nil { return desc + ": " + err.Error() } } if status != roachpb.PENDING { s.Txn = nil } } return desc } actions["Abort"] = func(s *state) string { return resolve(s, roachpb.ABORTED) } actions["Commit"] = func(s *state) string { return resolve(s, roachpb.COMMITTED) } actions["Push"] = func(s *state) string { return resolve(s, roachpb.PENDING) } actions["GC"] = func(s *state) string { // Sometimes GC everything, sometimes only older versions. gcTS := hlc.Timestamp{ WallTime: s.rng.Int63n(s.TS.WallTime + 1 /* avoid zero */), } if err := MVCCGarbageCollect( ctx, s.eng, s.MS, []roachpb.GCRequest_GCKey{{ Key: s.key, Timestamp: gcTS, }}, s.TS, ); err != nil { return err.Error() } return fmt.Sprint(gcTS) } for _, test := range []struct { name string key roachpb.Key seed int64 }{ { name: "userspace", key: roachpb.Key("foo"), seed: randutil.NewPseudoSeed(), }, { name: "sys", key: keys.RangeDescriptorKey(roachpb.RKey("bar")), seed: randutil.NewPseudoSeed(), }, } { t.Run(test.name, func(t *testing.T) { testutils.RunTrueAndFalse(t, "inline", func(t *testing.T, inline bool) { t.Run(fmt.Sprintf("seed=%d", test.seed), func(t *testing.T) { eng := createTestEngine() defer eng.Close() s := &randomTest{ actions: actions, inline: inline, state: state{ rng: rand.New(rand.NewSource(test.seed)), eng: eng, key: test.key, MS: &enginepb.MVCCStats{}, }, } for i := 0; i < count; i++ { s.step(t) } }) }) }) } }
explode_data.jsonl/41650
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1712 }
[ 2830, 3393, 66626, 3706, 16635, 13999, 1506, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 20985, 1669, 2266, 19047, 2822, 197, 322, 34979, 25, 902, 7901, 943, 3512, 2567, 1760, 4236, 476, 803, 13, 3197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_reconcileBECPUShare(t *testing.T) { type args struct { podMeta *statesinformer.PodMeta podCurCPUShare int64 containerCurCPUShare map[string]int64 wantPodCPUShare int64 wantContainerCPUShare map[string]int64 } tests := []struct { name string args args }{ { name: "set-cpu-share", args: args{ podMeta: &statesinformer.PodMeta{ Pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test-ns", Name: "test-name", UID: "test-pod-uid", }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: "test-container-1", Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ extension.BatchCPU: *resource.NewQuantity(500, resource.DecimalSI), }, Requests: corev1.ResourceList{ extension.BatchCPU: *resource.NewQuantity(500, resource.DecimalSI), }, }, }, { Name: "test-container-2", Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ extension.BatchCPU: *resource.NewQuantity(1000, resource.DecimalSI), }, Requests: corev1.ResourceList{ extension.BatchCPU: *resource.NewQuantity(1000, resource.DecimalSI), }, }, }, }, }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{ { Name: "test-container-1", ContainerID: "docker://testcontainer1hashid", }, { Name: "test-container-2", ContainerID: "docker://testcontainer2hashid", }, }, }, }, CgroupDir: "kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podtest_pod_uid.slice", }, podCurCPUShare: 2, containerCurCPUShare: map[string]int64{ "test-container-1": 2, "test-container-2": 2, }, wantPodCPUShare: 1536, wantContainerCPUShare: map[string]int64{ "test-container-1": 512, "test-container-2": 1024, }, }, }, } for _, tt := range tests { system.Conf = system.NewDsModeConfig() system.Conf.CgroupRootDir, _ = ioutil.TempDir("/tmp", "koordlet-test") err := initTestPodCPUShare(tt.args.podMeta, tt.args.podCurCPUShare, tt.args.containerCurCPUShare) if err != nil { t.Errorf("init cpu share failed, error: %v", err) } t.Run(tt.name, func(t *testing.T) { reconcileBECPUShare(tt.args.podMeta) podCPUShareResult, err := util.GetPodCurCPUShare(tt.args.podMeta.CgroupDir) if err != nil { t.Errorf("get pod cpu share result failed, error %v", err) } if podCPUShareResult != tt.args.wantPodCPUShare { t.Errorf("pod cpu share result not equal, want %v, got %v", tt.args.wantPodCPUShare, podCPUShareResult) } for _, containerStat := range tt.args.podMeta.Pod.Status.ContainerStatuses { containerCPUShareResult, err := util.GetContainerCurCPUShare(tt.args.podMeta.CgroupDir, &containerStat) if err != nil { t.Errorf("get container %v cpu share result failed, error %v", containerStat.Name, err) } wantContainerCPUShare, exist := tt.args.wantContainerCPUShare[containerStat.Name] if !exist { t.Errorf("container %v want cpu share quota not exist", containerStat.Name) } if containerCPUShareResult != wantContainerCPUShare { t.Errorf("container %v cpu share result not equal, want %v, got %v", containerStat.Name, wantContainerCPUShare, containerCPUShareResult) } } }) } }
explode_data.jsonl/38231
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1679 }
[ 2830, 3393, 1288, 40446, 457, 33, 7498, 6325, 12115, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 3223, 347, 12175, 2290, 353, 32069, 258, 34527, 88823, 12175, 198, 197, 3223, 347, 16704, 31615, 12115, 286, 526, 21, 19, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestOpenWord2016(t *testing.T) { doc, err := document.Open("../testdata/Office2016/Word-Windows.docx") if err != nil { t.Errorf("error opening Windows Word 2016 document: %s", err) } got := bytes.Buffer{} if err := doc.Save(&got); err != nil { t.Errorf("error saving W216 file: %s", err) } testhelper.CompareGoldenZipFilesOnly(t, "../../testdata/Office2016/Word-Windows.docx", got.Bytes()) }
explode_data.jsonl/61208
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 149 }
[ 2830, 3393, 5002, 10879, 17, 15, 16, 21, 1155, 353, 8840, 836, 8, 341, 59536, 11, 1848, 1669, 2197, 12953, 17409, 92425, 14, 23914, 17, 15, 16, 21, 14, 10879, 13002, 1491, 23671, 87, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 13080...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFieldReference(t *testing.T) { const record = `{rec:{i:5 (int32),s:"boo",f:6.1} (=0)} (=1)` testSuccessful(t, "rec.i", record, zint32(5)) testSuccessful(t, "rec.s", record, zstring("boo")) testSuccessful(t, "rec.f", record, zfloat64(6.1)) }
explode_data.jsonl/2313
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 113 }
[ 2830, 3393, 1877, 8856, 1155, 353, 8840, 836, 8, 341, 4777, 3255, 284, 53692, 2758, 12547, 72, 25, 20, 320, 396, 18, 17, 701, 82, 2974, 32993, 497, 69, 25, 21, 13, 16, 92, 38738, 15, 9139, 38738, 16, 8, 19324, 18185, 36374, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPartitionByClauseRestore(t *testing.T) { testCases := []NodeRestoreTestCase{ {"PARTITION BY a", "PARTITION BY `a`"}, {"PARTITION BY NULL", "PARTITION BY NULL"}, {"PARTITION BY a, b", "PARTITION BY `a`, `b`"}, } extractNodeFunc := func(node Node) Node { return node.(*SelectStmt).Fields.Fields[0].Expr.(*WindowFuncExpr).Spec.PartitionBy } runNodeRestoreTest(t, testCases, "select avg(val) over (%s rows current row) from t", extractNodeFunc) }
explode_data.jsonl/27585
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 176 }
[ 2830, 3393, 49978, 1359, 28482, 56284, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1955, 56284, 16458, 515, 197, 197, 4913, 33490, 7092, 7710, 264, 497, 330, 33490, 7092, 7710, 1565, 64, 63, 7115, 197, 197, 4913, 33490, 7092...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestROM_BusReader_Fail_Boundary(t *testing.T) { contents := sampleROM() rom, err := NewROM("", contents) if err != nil { t.Fatal(err) } r := rom.BusReader(0x00FFFF) p := uint16(0) err = binary.Read(r, binary.LittleEndian, &p) if !errors.Is(err, io.EOF) { t.Fatalf("expected fail with EOF but got: %v", err) } }
explode_data.jsonl/69477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 143 }
[ 2830, 3393, 3361, 1668, 355, 5062, 1400, 604, 1668, 72466, 1155, 353, 8840, 836, 8, 341, 197, 17610, 1669, 6077, 3361, 741, 197, 441, 11, 1848, 1669, 1532, 3361, 19814, 8794, 340, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestTerragruntStackCommandsWithPlanFile(t *testing.T) { t.Parallel() disjointEnvironmentPath := "fixture-stack/disjoint" cleanupTerraformFolder(t, disjointEnvironmentPath) runTerragrunt(t, fmt.Sprintf("terragrunt plan-all -out=plan.tfplan --terragrunt-log-level info --terragrunt-non-interactive --terragrunt-working-dir %s", disjointEnvironmentPath)) runTerragrunt(t, fmt.Sprintf("terragrunt apply-all plan.tfplan --terragrunt-log-level info --terragrunt-non-interactive --terragrunt-working-dir %s", disjointEnvironmentPath)) }
explode_data.jsonl/10088
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 51402, 68305, 3850, 4336, 4062, 16056, 20485, 1703, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 34597, 32850, 12723, 1820, 1669, 330, 59612, 56090, 41510, 32850, 698, 1444, 60639, 51, 13886, 627, 13682, 1155, 11, 84...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUserStoreGetProfiles(t *testing.T) { Setup() teamId := model.NewId() u1 := &model.User{} u1.Email = model.NewId() Must(store.User().Save(u1)) Must(store.Team().SaveMember(&model.TeamMember{TeamId: teamId, UserId: u1.Id})) u2 := &model.User{} u2.Email = model.NewId() Must(store.User().Save(u2)) Must(store.Team().SaveMember(&model.TeamMember{TeamId: teamId, UserId: u2.Id})) if r1 := <-store.User().GetProfiles(teamId, 0, 100); r1.Err != nil { t.Fatal(r1.Err) } else { users := r1.Data.([]*model.User) if len(users) != 2 { t.Fatal("invalid returned users") } found := false for _, u := range users { if u.Id == u1.Id { found = true } } if !found { t.Fatal("missing user") } } if r2 := <-store.User().GetProfiles("123", 0, 100); r2.Err != nil { t.Fatal(r2.Err) } else { if len(r2.Data.([]*model.User)) != 0 { t.Fatal("should have returned empty map") } } }
explode_data.jsonl/5091
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 423 }
[ 2830, 3393, 1474, 6093, 1949, 62719, 1155, 353, 8840, 836, 8, 341, 197, 21821, 2822, 197, 9196, 764, 1669, 1614, 7121, 764, 2822, 10676, 16, 1669, 609, 2528, 7344, 16094, 10676, 16, 24066, 284, 1614, 7121, 764, 741, 9209, 590, 31200, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestWidgetSection_Find(t *testing.T) { wid1 := NewWidgetID() wid2 := NewWidgetID() wid3 := NewWidgetID() wid4 := NewWidgetID() wid5 := NewWidgetID() wid6 := NewWidgetID() wid7 := NewWidgetID() tests := []struct { Name string Input WidgetID Expected1 int Expected2 WidgetAreaType Nil bool }{ { Name: "top", Input: wid2, Expected1: 1, Expected2: WidgetAreaTop, }, { Name: "middle", Input: wid4, Expected1: 0, Expected2: WidgetAreaMiddle, }, { Name: "bottom", Input: wid7, Expected1: 1, Expected2: WidgetAreaBottom, }, { Name: "invalid id", Input: NewWidgetID(), Expected1: -1, Expected2: "", }, { Name: "Return nil if no widget section", Input: wid1, Nil: true, Expected1: -1, Expected2: "", }, } for _, tc := range tests { tc := tc t.Run(tc.Name, func(t *testing.T) { t.Parallel() if tc.Nil { index, area := (*WidgetSection)(nil).Find(tc.Input) assert.Equal(t, tc.Expected1, index) assert.Equal(t, tc.Expected2, area) return } ws := NewWidgetSection() ws.Area(WidgetAreaTop).AddAll([]WidgetID{wid1, wid2, wid3}) ws.Area(WidgetAreaMiddle).AddAll([]WidgetID{wid4, wid5}) ws.Area(WidgetAreaBottom).AddAll([]WidgetID{wid6, wid7}) index, area := ws.Find(tc.Input) assert.Equal(t, tc.Expected1, index) assert.Equal(t, tc.Expected2, area) }) } }
explode_data.jsonl/77655
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 728 }
[ 2830, 3393, 4548, 9620, 95245, 1155, 353, 8840, 836, 8, 341, 6692, 307, 16, 1669, 1532, 4548, 915, 741, 6692, 307, 17, 1669, 1532, 4548, 915, 741, 6692, 307, 18, 1669, 1532, 4548, 915, 741, 6692, 307, 19, 1669, 1532, 4548, 915, 741,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_Service_Middleware(t *testing.T) { bkoff := backoff.NewExponentialBackOff() bkoff.MaxElapsedTime = time.Second * 15 err := backoff.Retry(func() error { logger, _ := test.NewNullLogger() logger.SetLevel(logrus.DebugLevel) c := service.NewContainer(logger) c.Register(ID, &Service{}) err := c.Init(&testCfg{httpCfg: `{ "enable": true, "address": ":6032", "maxRequestSize": 1024, "uploads": { "dir": ` + tmpDir() + `, "forbid": [] }, "workers":{ "command": "php ../../tests/http/client.php echo pipes", "relay": "pipes", "pool": { "numWorkers": 1, "allocateTimeout": 10000000, "destroyTimeout": 10000000 } } }`}) if err != nil { return err } s, st := c.Get(ID) assert.NotNil(t, s) assert.Equal(t, service.StatusOK, st) s.(*Service).AddMiddleware(func(f http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/halt" { w.WriteHeader(500) _, err := w.Write([]byte("halted")) if err != nil { t.Errorf("error writing the data to the http reply: error %v", err) } } else { f(w, r) } } }) go func() { err := c.Serve() if err != nil { t.Errorf("serve error: %v", err) } }() time.Sleep(time.Millisecond * 500) req, err := http.NewRequest("GET", "http://localhost:6032?hello=world", nil) if err != nil { c.Stop() return err } r, err := http.DefaultClient.Do(req) if err != nil { c.Stop() return err } b, err := ioutil.ReadAll(r.Body) if err != nil { c.Stop() return err } assert.Equal(t, 201, r.StatusCode) assert.Equal(t, "WORLD", string(b)) err = r.Body.Close() if err != nil { c.Stop() return err } req, err = http.NewRequest("GET", "http://localhost:6032/halt", nil) if err != nil { c.Stop() return err } r, err = http.DefaultClient.Do(req) if err != nil { c.Stop() return err } b, err = ioutil.ReadAll(r.Body) if err != nil { c.Stop() return err } assert.Equal(t, 500, r.StatusCode) assert.Equal(t, "halted", string(b)) err = r.Body.Close() if err != nil { c.Stop() return err } c.Stop() return nil }, bkoff) if err != nil { t.Fatal(err) } }
explode_data.jsonl/34501
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1090 }
[ 2830, 3393, 52548, 1245, 11603, 1155, 353, 8840, 836, 8, 341, 2233, 74, 1847, 1669, 1182, 1847, 7121, 840, 59825, 3707, 4596, 741, 2233, 74, 1847, 14535, 98483, 284, 882, 32435, 353, 220, 16, 20, 271, 9859, 1669, 1182, 1847, 2013, 151...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReconcileOnCancelledRunFinallyPipelineRunWithRunningFinalTask(t *testing.T) { // TestReconcileOnCancelledRunFinallyPipelineRunWithRunningFinalTask runs "Reconcile" on a PipelineRun that has been gracefully cancelled. // It verifies that reconcile is successful and completed tasks and running final tasks are left untouched. prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` metadata: name: test-pipeline-run-cancelled-run-finally namespace: foo spec: pipelineRef: name: test-pipeline serviceAccountName: test-sa status: CancelledRunFinally status: startTime: "2022-01-01T00:00:00Z" taskRuns: test-pipeline-run-cancelled-run-finally-final-task: pipelineTaskName: final-task-1 test-pipeline-run-cancelled-run-finally-hello-world: pipelineTaskName: hello-world-1 status: conditions: - lastTransitionTime: null status: "True" type: Succeeded `)} ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` metadata: name: test-pipeline namespace: foo spec: finally: - name: final-task-1 taskRef: name: some-task tasks: - name: hello-world-1 taskRef: name: hello-world `)} ts := []*v1beta1.Task{ simpleHelloWorldTask, simpleSomeTask, } trs := []*v1beta1.TaskRun{ createHelloWorldTaskRunWithStatus(t, "test-pipeline-run-cancelled-run-finally-hello-world", "foo", "test-pipeline-run-cancelled-run-finally", "test-pipeline", "my-pod-name", apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), createHelloWorldTaskRun(t, "test-pipeline-run-cancelled-run-finally-final-task", "foo", "test-pipeline-run-cancelled-run-finally", "test-pipeline"), } cms := []*corev1.ConfigMap{withEnabledAlphaAPIFields(newFeatureFlagsConfigMap())} d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, TaskRuns: trs, ConfigMaps: cms, } prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ "Normal Started", } reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-cancelled-run-finally", wantEvents, false) if reconciledRun.Status.CompletionTime != nil { t.Errorf("Expected a CompletionTime to be nil on incomplete PipelineRun but was %v", reconciledRun.Status.CompletionTime) } // This PipelineRun should still be complete and unknown, and the status should reflect that if !reconciledRun.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() { t.Errorf("Expected PipelineRun status to be complete and unknown, but was %v", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) } // There should be 2 task runs, one for already completed "hello-world-1" task and one for the "final-task-1" final task if len(reconciledRun.Status.TaskRuns) != 2 { t.Errorf("Expected PipelineRun status to have 2 task runs, but was %v", len(reconciledRun.Status.TaskRuns)) } actions := clients.Pipeline.Actions() patchActions := make([]ktesting.PatchAction, 0) for _, action := range actions { if patchAction, ok := action.(ktesting.PatchAction); ok { patchActions = append(patchActions, patchAction) } } if len(patchActions) != 0 { t.Errorf("Expected no patch actions, but was %v", len(patchActions)) } }
explode_data.jsonl/27287
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1265 }
[ 2830, 3393, 693, 40446, 457, 1925, 39473, 6727, 23949, 34656, 51918, 18990, 19357, 6262, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 693, 40446, 457, 1925, 39473, 6727, 23949, 34656, 51918, 18990, 19357, 6262, 8473, 330, 693, 40446, 457...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestPack(t *testing.T) { headerBuffer := make([]byte, 0) tests := []struct { name string buffer *[]byte }{ { name: "PackTest", buffer: &headerBuffer, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { h := &PackageHeader{} h.Pack(tt.buffer) }) } }
explode_data.jsonl/19767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 143 }
[ 2830, 3393, 30684, 1155, 353, 8840, 836, 8, 341, 20883, 4095, 1669, 1281, 10556, 3782, 11, 220, 15, 340, 78216, 1669, 3056, 1235, 341, 197, 11609, 256, 914, 198, 197, 31122, 353, 1294, 3782, 198, 197, 59403, 197, 197, 515, 298, 11609,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCCEventMgmt(t *testing.T) { cc1Def := &ChaincodeDefinition{Name: "cc1", Version: "v1", Hash: []byte("cc1")} cc1DBArtifactsTar := []byte("cc1DBArtifacts") cc2Def := &ChaincodeDefinition{Name: "cc2", Version: "v1", Hash: []byte("cc2")} cc2DBArtifactsTar := []byte("cc2DBArtifacts") cc3Def := &ChaincodeDefinition{Name: "cc3", Version: "v1", Hash: []byte("cc3")} cc3DBArtifactsTar := []byte("cc3DBArtifacts") // cc1 is deployed and installed. cc2 is deployed but not installed. cc3 is not deployed but installed mockProvider := newMockProvider() mockProvider.setChaincodeInstalled(cc1Def, cc1DBArtifactsTar) mockProvider.setChaincodeDeployed("channel1", cc1Def, true) mockProvider.setChaincodeDeployed("channel1", cc2Def, true) mockProvider.setChaincodeInstalled(cc3Def, cc3DBArtifactsTar) setEventMgrForTest(newMgr(mockProvider)) defer clearEventMgrForTest() handler1, handler2, handler3 := &mockHandler{}, &mockHandler{}, &mockHandler{} eventMgr := GetMgr() require.NotNil(t, eventMgr) eventMgr.Register("channel1", handler1) eventMgr.Register("channel2", handler2) eventMgr.Register("channel1", handler3) eventMgr.Register("channel2", handler3) cc2ExpectedEvent := &mockEvent{cc2Def, cc2DBArtifactsTar} _ = cc2ExpectedEvent cc3ExpectedEvent := &mockEvent{cc3Def, cc3DBArtifactsTar} // Deploy cc3 on chain1 - handler1 and handler3 should receive event because cc3 is being deployed only on chain1 require.NoError(t, eventMgr.HandleChaincodeDeploy("channel1", []*ChaincodeDefinition{cc3Def}), ) eventMgr.ChaincodeDeployDone("channel1") require.Contains(t, handler1.eventsRecieved, cc3ExpectedEvent) require.NotContains(t, handler2.eventsRecieved, cc3ExpectedEvent) require.Contains(t, handler3.eventsRecieved, cc3ExpectedEvent) require.Equal(t, 1, handler1.doneRecievedCount) require.Equal(t, 0, handler2.doneRecievedCount) require.Equal(t, 1, handler3.doneRecievedCount) // Deploy cc3 on chain2 as well and this time handler2 should also receive event require.NoError(t, eventMgr.HandleChaincodeDeploy("channel2", []*ChaincodeDefinition{cc3Def}), ) eventMgr.ChaincodeDeployDone("channel2") require.Contains(t, handler2.eventsRecieved, cc3ExpectedEvent) require.Equal(t, 1, handler1.doneRecievedCount) require.Equal(t, 1, handler2.doneRecievedCount) require.Equal(t, 2, handler3.doneRecievedCount) // Install CC2 - handler1 and handler 3 should receive event because cc2 is deployed only on chain1 and not on chain2 require.NoError(t, eventMgr.HandleChaincodeInstall(cc2Def, cc2DBArtifactsTar), ) eventMgr.ChaincodeInstallDone(true) require.Contains(t, handler1.eventsRecieved, cc2ExpectedEvent) require.NotContains(t, handler2.eventsRecieved, cc2ExpectedEvent) require.Contains(t, handler3.eventsRecieved, cc2ExpectedEvent) require.Equal(t, 2, handler1.doneRecievedCount) require.Equal(t, 1, handler2.doneRecievedCount) require.Equal(t, 3, handler3.doneRecievedCount) // setting cc2Def as a new lifecycle definition should cause install not to trigger event mockProvider.setChaincodeDeployed("channel1", cc2Def, false) handler1.eventsRecieved = []*mockEvent{} require.NoError(t, eventMgr.HandleChaincodeInstall(cc2Def, cc2DBArtifactsTar), ) eventMgr.ChaincodeInstallDone(true) require.NotContains(t, handler1.eventsRecieved, cc2ExpectedEvent) }
explode_data.jsonl/24684
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1169 }
[ 2830, 3393, 3706, 1556, 44, 46063, 1155, 353, 8840, 836, 8, 341, 63517, 16, 2620, 1669, 609, 18837, 1851, 10398, 63121, 25, 330, 638, 16, 497, 6079, 25, 330, 85, 16, 497, 6531, 25, 3056, 3782, 445, 638, 16, 42132, 63517, 16, 3506, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_isFourOfAKind(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want string want1 bool }{ { name: "Should be a 4 of a kind", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "8", want1: true, }, { name: "Should be a 4 of a kind 2", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "king", want1: true, }, { name: "Should not be a 4 of a kind", args: args{deck.Hand{ deck.Card{Value: 3}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "", want1: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1 := isFourOfAKind(tt.args.hand) if got != tt.want { t.Errorf("isFourOfAKind() got = %v, want %v", got, tt.want) } if got1 != tt.want1 { t.Errorf("isFourOfAKind() got1 = %v, want %v", got1, tt.want1) } }) } }
explode_data.jsonl/7189
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 689 }
[ 2830, 3393, 6892, 26972, 2124, 32, 10629, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 9598, 437, 9530, 35308, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 220, 914, 198, 197, 31215, 220, 2827, 198, 197, 5078...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_PanicInCheckAllowed(t *testing.T) { executor.RegisterCheckRule(panicRule) defer executor.DeregisterCheckRule(panicRule) fs, err := filesystem.New() require.NoError(t, err) defer func() { _ = fs.Close() }() require.NoError(t, fs.WriteTextFile("project/main.tf", ` resource "problem" "this" { panic = true } `)) p := parser.New(parser.OptionStopOnHCLError(true)) err = p.ParseDirectory(fs.RealPath("/project")) require.NoError(t, err) modules, _, err := p.EvaluateAll() require.NoError(t, err) _, _, err = executor.New(executor.OptionStopOnErrors(false)).Execute(modules) assert.Error(t, err) }
explode_data.jsonl/26625
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 239 }
[ 2830, 3393, 1088, 31270, 641, 3973, 35382, 1155, 353, 8840, 836, 8, 1476, 67328, 4831, 19983, 3973, 11337, 7, 19079, 11337, 340, 16867, 31558, 909, 52633, 1571, 3973, 11337, 7, 19079, 11337, 692, 53584, 11, 1848, 1669, 38389, 7121, 741, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHandshakeReplayNone(t *testing.T) { sim := setupSimulator(t) for _, m := range modes { testHandshakeReplay(t, sim, numBlocks, m, false) } for _, m := range modes { testHandshakeReplay(t, sim, numBlocks, m, true) } }
explode_data.jsonl/6601
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 93 }
[ 2830, 3393, 2314, 29661, 693, 1363, 4064, 1155, 353, 8840, 836, 8, 341, 1903, 318, 1669, 6505, 14027, 10511, 1155, 692, 2023, 8358, 296, 1669, 2088, 19777, 341, 197, 18185, 2314, 29661, 693, 1363, 1155, 11, 1643, 11, 1629, 29804, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDaoUpTagState(t *testing.T) { var ( c = context.TODO() tid = int64(0) state = int32(0) ) convey.Convey("UpTagState", t, func(ctx convey.C) { affect, err := d.UpTagState(c, tid, state) ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) { ctx.So(err, convey.ShouldBeNil) ctx.So(affect, convey.ShouldNotBeNil) }) }) }
explode_data.jsonl/36693
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 176 }
[ 2830, 3393, 12197, 2324, 5668, 1397, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 1444, 257, 284, 2266, 90988, 741, 197, 3244, 307, 256, 284, 526, 21, 19, 7, 15, 340, 197, 24291, 284, 526, 18, 17, 7, 15, 340, 197, 340, 37203, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMatConvertScaleAbs(t *testing.T) { src := NewMatWithSize(100, 100, MatTypeCV32F) dst := NewMat() ConvertScaleAbs(src, &dst, 1, 0) if dst.Empty() { t.Error("TestConvertScaleAbs dst should not be empty.") } src.Close() dst.Close() }
explode_data.jsonl/81734
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 103 }
[ 2830, 3393, 11575, 12012, 6947, 27778, 1155, 353, 8840, 836, 8, 341, 41144, 1669, 1532, 11575, 2354, 1695, 7, 16, 15, 15, 11, 220, 16, 15, 15, 11, 6867, 929, 19589, 18, 17, 37, 340, 52051, 1669, 1532, 11575, 741, 197, 12012, 6947, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNewPeerHandlerOnRaftPrefix(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("test data")) }) ph := newPeerHandler(&fakeCluster{}, h, nil) srv := httptest.NewServer(ph) defer srv.Close() tests := []string{ rafthttp.RaftPrefix, rafthttp.RaftPrefix + "/hello", } for i, tt := range tests { resp, err := http.Get(srv.URL + tt) if err != nil { t.Fatalf("unexpected http.Get error: %v", err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("unexpected ioutil.ReadAll error: %v", err) } if w := "test data"; string(body) != w { t.Errorf("#%d: body = %s, want %s", i, body, w) } } }
explode_data.jsonl/565
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 309 }
[ 2830, 3393, 3564, 30888, 3050, 1925, 55535, 723, 14335, 1155, 353, 8840, 836, 8, 341, 9598, 1669, 1758, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 6692, 4073, 10556, 3782, 445, 1944, 821, 5455, 197, 3518, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStructureAssign(t *testing.T) { expect := &Structure{ Length: 2503, Checksum: "hey", Compression: compression.Gzip.String(), Depth: 11, ErrCount: 12, Encoding: "UTF-8", Entries: 3000000000, Format: "csv", Strict: true, } got := &Structure{ Length: 2000, Format: "json", } got.Assign(&Structure{ Length: 2503, Checksum: "hey", Compression: compression.Gzip.String(), Depth: 11, ErrCount: 12, Encoding: "UTF-8", Entries: 3000000000, Format: "csv", Strict: true, }) if err := CompareStructures(expect, got); err != nil { t.Error(err) } got.Assign(nil, nil) if err := CompareStructures(expect, got); err != nil { t.Error(err) } emptySt := &Structure{} emptySt.Assign(expect) if err := CompareStructures(expect, emptySt); err != nil { t.Error(err) } }
explode_data.jsonl/16439
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 426 }
[ 2830, 3393, 22952, 28933, 1155, 353, 8840, 836, 8, 341, 24952, 1669, 609, 22952, 515, 197, 197, 4373, 25, 414, 220, 17, 20, 15, 18, 345, 197, 69472, 1242, 25, 262, 330, 35561, 756, 197, 197, 81411, 25, 25111, 1224, 9964, 6431, 3148,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestAddNonReplStats(t *testing.T) { d := NewMongodbData( &StatLine{ StorageEngine: "", Time: time.Now(), UptimeNanos: 0, Insert: 0, Query: 0, Update: 0, UpdateCnt: 0, Delete: 0, GetMore: 0, Command: 0, Flushes: 0, FlushesCnt: 0, Virtual: 0, Resident: 0, QueuedReaders: 0, QueuedWriters: 0, ActiveReaders: 0, ActiveWriters: 0, AvailableReaders: 0, AvailableWriters: 0, TotalTicketsReaders: 0, TotalTicketsWriters: 0, NetIn: 0, NetOut: 0, NumConnections: 0, Passes: 0, DeletedDocuments: 0, TimedOutC: 0, NoTimeoutC: 0, PinnedC: 0, TotalC: 0, DeletedD: 0, InsertedD: 0, ReturnedD: 0, UpdatedD: 0, CurrentC: 0, AvailableC: 0, TotalCreatedC: 0, ScanAndOrderOp: 0, WriteConflictsOp: 0, TotalKeysScanned: 0, TotalObjectsScanned: 0, }, tags, ) var acc testutil.Accumulator d.AddDefaultStats() d.flush(&acc) for key := range defaultStats { require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } }
explode_data.jsonl/35773
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 892 }
[ 2830, 3393, 2212, 8121, 693, 500, 16635, 1155, 353, 8840, 836, 8, 341, 2698, 1669, 1532, 44, 21225, 1043, 1006, 197, 197, 5, 15878, 2460, 515, 298, 197, 5793, 4571, 25, 981, 8324, 298, 67567, 25, 394, 882, 13244, 3148, 298, 15980, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetState(t *testing.T) { fakeStore := &daprt.MockStateStore{} fakeStore.On("Get", mock.MatchedBy(func(req *state.GetRequest) bool { return req.Key == "fakeAPI||good-key" })).Return( &state.GetResponse{ Data: []byte("test-data"), ETag: "test-etag", }, nil) fakeStore.On("Get", mock.MatchedBy(func(req *state.GetRequest) bool { return req.Key == "fakeAPI||error-key" })).Return( nil, errors.New("failed to get state with error-key")) fakeAPI := &api{ id: "fakeAPI", stateStores: map[string]state.Store{"store1": fakeStore}, } port, _ := freeport.GetFreePort() server := startDaprAPIServer(port, fakeAPI, "") defer server.Stop() clientConn := createTestClient(port) defer clientConn.Close() client := runtimev1pb.NewDaprClient(clientConn) testCases := []struct { testName string storeName string key string errorExcepted bool expectedResponse *runtimev1pb.GetStateResponse expectedError codes.Code }{ { testName: "get state", storeName: "store1", key: "good-key", errorExcepted: false, expectedResponse: &runtimev1pb.GetStateResponse{ Data: []byte("test-data"), Etag: "test-etag", }, expectedError: codes.OK, }, { testName: "get store with non-existing store", storeName: "no-store", key: "good-key", errorExcepted: true, expectedResponse: &runtimev1pb.GetStateResponse{}, expectedError: codes.InvalidArgument, }, { testName: "get store with key but error occurs", storeName: "store1", key: "error-key", errorExcepted: true, expectedResponse: &runtimev1pb.GetStateResponse{}, expectedError: codes.Internal, }, } for _, tt := range testCases { t.Run(tt.testName, func(t *testing.T) { req := &runtimev1pb.GetStateRequest{ StoreName: tt.storeName, Key: tt.key, } resp, err := client.GetState(context.Background(), req) if !tt.errorExcepted { assert.NoError(t, err, "Expected no error") assert.Equal(t, resp.Data, tt.expectedResponse.Data, "Expected response Data to be same") assert.Equal(t, resp.Etag, tt.expectedResponse.Etag, "Expected response Etag to be same") } else { assert.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) } }
explode_data.jsonl/21734
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1051 }
[ 2830, 3393, 1949, 1397, 1155, 353, 8840, 836, 8, 341, 1166, 726, 6093, 1669, 609, 91294, 3342, 24664, 1397, 6093, 16094, 1166, 726, 6093, 8071, 445, 1949, 497, 7860, 1321, 34244, 1359, 18552, 6881, 353, 2454, 2234, 1900, 8, 1807, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSpaceService_Gets(t *testing.T) { testCases := []struct { name string options *model.GetSpacesOptionScheme startAt, maxResults int mockFile string wantHTTPMethod string endpoint string context context.Context wantHTTPCodeReturn int wantErr bool }{ { name: "GetSpacesWhenTheParametersAreCorrect", options: &model.GetSpacesOptionScheme{ SpaceKeys: []string{"DUMMY", "TEST"}, SpaceIDs: []int{1111, 2222, 3333}, SpaceType: "global", Status: "archived", Labels: []string{"label-09", "label-02"}, Favorite: true, FavoriteUserKey: "DUMMY", Expand: []string{"operations"}, }, startAt: 0, maxResults: 50, mockFile: "./mocks/get-spaces.json", wantHTTPMethod: http.MethodGet, endpoint: "/wiki/rest/api/space?expand=operations&favorite=true&favouriteUserKey=DUMMY&label=label-09%2Clabel-02&limit=50&spaceId=1111&spaceId=2222&spaceId=3333&spaceKey=DUMMY&spaceKey=TEST&start=0&status=archived&type=global", context: context.Background(), wantHTTPCodeReturn: http.StatusOK, wantErr: false, }, { name: "GetSpacesWhenTheContextIsNotProvided", options: &model.GetSpacesOptionScheme{ SpaceKeys: []string{"DUMMY", "TEST"}, SpaceIDs: []int{1111, 2222, 3333}, SpaceType: "global", Status: "archived", Labels: []string{"label-09", "label-02"}, Favorite: true, FavoriteUserKey: "DUMMY", Expand: []string{"operations"}, }, startAt: 0, maxResults: 50, mockFile: "./mocks/get-spaces.json", wantHTTPMethod: http.MethodGet, endpoint: "/wiki/rest/api/space?expand=operations&favorite=true&favouriteUserKey=DUMMY&label=label-09%2Clabel-02&limit=50&spaceId=1111&spaceId=2222&spaceId=3333&spaceKey=DUMMY&spaceKey=TEST&start=0&status=archived&type=global", context: nil, wantHTTPCodeReturn: http.StatusOK, wantErr: true, }, { name: "GetSpacesWhenTheRequestMethodIsIncorrect", options: &model.GetSpacesOptionScheme{ SpaceKeys: []string{"DUMMY", "TEST"}, SpaceIDs: []int{1111, 2222, 3333}, SpaceType: "global", Status: "archived", Labels: []string{"label-09", "label-02"}, Favorite: true, FavoriteUserKey: "DUMMY", Expand: []string{"operations"}, }, startAt: 0, maxResults: 50, mockFile: "./mocks/get-spaces.json", wantHTTPMethod: http.MethodPut, endpoint: "/wiki/rest/api/space?expand=operations&favorite=true&favouriteUserKey=DUMMY&label=label-09%2Clabel-02&limit=50&spaceId=1111&spaceId=2222&spaceId=3333&spaceKey=DUMMY&spaceKey=TEST&start=0&status=archived&type=global", context: context.Background(), wantHTTPCodeReturn: http.StatusOK, wantErr: true, }, { name: "GetSpacesWhenTheStatusCodeIsIncorrect", options: &model.GetSpacesOptionScheme{ SpaceKeys: []string{"DUMMY", "TEST"}, SpaceIDs: []int{1111, 2222, 3333}, SpaceType: "global", Status: "archived", Labels: []string{"label-09", "label-02"}, Favorite: true, FavoriteUserKey: "DUMMY", Expand: []string{"operations"}, }, startAt: 0, maxResults: 50, mockFile: "./mocks/get-spaces.json", wantHTTPMethod: http.MethodGet, endpoint: "/wiki/rest/api/space?expand=operations&favorite=true&favouriteUserKey=DUMMY&label=label-09%2Clabel-02&limit=50&spaceId=1111&spaceId=2222&spaceId=3333&spaceKey=DUMMY&spaceKey=TEST&start=0&status=archived&type=global", context: context.Background(), wantHTTPCodeReturn: http.StatusBadRequest, wantErr: true, }, { name: "GetSpacesWhenTheResponseBodyIsEmpty", options: &model.GetSpacesOptionScheme{ SpaceKeys: []string{"DUMMY", "TEST"}, SpaceIDs: []int{1111, 2222, 3333}, SpaceType: "global", Status: "archived", Labels: []string{"label-09", "label-02"}, Favorite: true, FavoriteUserKey: "DUMMY", Expand: []string{"operations"}, }, startAt: 0, maxResults: 50, mockFile: "./mocks/empty-json.json", wantHTTPMethod: http.MethodGet, endpoint: "/wiki/rest/api/space?expand=operations&favorite=true&favouriteUserKey=DUMMY&label=label-09%2Clabel-02&limit=50&spaceId=1111&spaceId=2222&spaceId=3333&spaceKey=DUMMY&spaceKey=TEST&start=0&status=archived&type=global", context: context.Background(), wantHTTPCodeReturn: http.StatusOK, wantErr: true, }, } for _, testCase := range testCases { testCase := testCase t.Run(testCase.name, func(t *testing.T) { t.Parallel() //Init a new HTTP mock server mockOptions := mockServerOptions{ Endpoint: testCase.endpoint, MockFilePath: testCase.mockFile, MethodAccepted: testCase.wantHTTPMethod, ResponseCodeWanted: testCase.wantHTTPCodeReturn, } mockServer, err := startMockServer(&mockOptions) if err != nil { t.Fatal(err) } defer mockServer.Close() //Init the library instance mockClient, err := startMockClient(mockServer.URL) if err != nil { t.Fatal(err) } service := &SpaceService{client: mockClient} gotResult, gotResponse, err := service.Gets( testCase.context, testCase.options, testCase.startAt, testCase.maxResults, ) if testCase.wantErr { if err != nil { t.Logf("error returned: %v", err.Error()) } assert.Error(t, err) } else { assert.NoError(t, err) assert.NotEqual(t, gotResponse, nil) assert.NotEqual(t, gotResult, nil) apiEndpoint, err := url.Parse(gotResponse.Endpoint) if err != nil { t.Fatal(err) } var endpointToAssert string if apiEndpoint.Query().Encode() != "" { endpointToAssert = fmt.Sprintf("%v?%v", apiEndpoint.Path, apiEndpoint.Query().Encode()) } else { endpointToAssert = apiEndpoint.Path } t.Logf("HTTP Endpoint Wanted: %v, HTTP Endpoint Returned: %v", testCase.endpoint, endpointToAssert) assert.Equal(t, testCase.endpoint, endpointToAssert) } }) } }
explode_data.jsonl/39243
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 3212 }
[ 2830, 3393, 9914, 1860, 2646, 1415, 1155, 353, 8840, 836, 8, 1476, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 394, 914, 198, 197, 35500, 1797, 353, 2528, 2234, 71324, 5341, 28906, 198, 197, 21375, 1655, 11, 1932, 9801, 526, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestIdnHostname(t *testing.T) { GenerateValuesAsYaml(t, "idnhostname.test.schema.json", func(console *tests.ConsoleWrapper, donec chan struct{}) { defer close(donec) // Test boolean type console.ExpectString("Enter a value for hostnameValue") console.SendLine("*****") console.ExpectString("Sorry, your reply was invalid: ***** is not a RFC 1034 hostname, " + "it should be like example.com") console.ExpectString("Enter a value for hostnameValue") console.SendLine("example.com") console.ExpectEOF() }) }
explode_data.jsonl/61769
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 199 }
[ 2830, 3393, 764, 77, 88839, 1155, 353, 8840, 836, 8, 341, 197, 31115, 6227, 2121, 56, 9467, 1155, 11, 330, 307, 77, 27806, 5958, 30892, 4323, 756, 197, 29244, 52818, 353, 23841, 46298, 11542, 11, 2814, 66, 26023, 2036, 28875, 341, 298...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCorsHandlerWithOrigins(t *testing.T) { tests := []struct { name string origins []string reqOrigin string expect string }{ { name: "allow all origins", expect: allOrigins, }, { name: "allow one origin", origins: []string{"http://local"}, reqOrigin: "http://local", expect: "http://local", }, { name: "allow many origins", origins: []string{"http://local", "http://remote"}, reqOrigin: "http://local", expect: "http://local", }, { name: "allow all origins", reqOrigin: "http://local", expect: "*", }, { name: "allow many origins with all mark", origins: []string{"http://local", "http://remote", "*"}, reqOrigin: "http://another", expect: "http://another", }, { name: "not allow origin", origins: []string{"http://local", "http://remote"}, reqOrigin: "http://another", }, } methods := []string{ http.MethodOptions, http.MethodGet, http.MethodPost, } for _, test := range tests { for _, method := range methods { test := test t.Run(test.name+"-handler", func(t *testing.T) { r := httptest.NewRequest(method, "http://localhost", nil) r.Header.Set(originHeader, test.reqOrigin) w := httptest.NewRecorder() handler := NotAllowedHandler(test.origins...) handler.ServeHTTP(w, r) if method == http.MethodOptions { assert.Equal(t, http.StatusNoContent, w.Result().StatusCode) } else { assert.Equal(t, http.StatusNotFound, w.Result().StatusCode) } assert.Equal(t, test.expect, w.Header().Get(allowOrigin)) }) } } for _, test := range tests { for _, method := range methods { test := test t.Run(test.name+"-middleware", func(t *testing.T) { r := httptest.NewRequest(method, "http://localhost", nil) r.Header.Set(originHeader, test.reqOrigin) w := httptest.NewRecorder() handler := Middleware(test.origins...)(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) handler.ServeHTTP(w, r) if method == http.MethodOptions { assert.Equal(t, http.StatusNoContent, w.Result().StatusCode) } else { assert.Equal(t, http.StatusOK, w.Result().StatusCode) } assert.Equal(t, test.expect, w.Header().Get(allowOrigin)) }) } } }
explode_data.jsonl/72804
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1007 }
[ 2830, 3393, 78063, 3050, 2354, 62726, 1330, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 414, 914, 198, 197, 197, 4670, 1330, 256, 3056, 917, 198, 197, 24395, 13298, 914, 198, 197, 24952, 262, 914, 198, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestKubeServiceGet(t *testing.T) { testCases := []struct { expectedName string data []byte err error }{ { expectedName: "kube-name-1234", data: []byte(`{"name":"kube-name-1234"}`), err: nil, }, { data: nil, err: errors.New("test err"), }, } prefix := DefaultStoragePrefix for _, testCase := range testCases { m := new(testutils.MockStorage) m.On("Get", context.Background(), prefix, "fake_id"). Return(testCase.data, testCase.err) service := NewService(prefix, m, nil) kube, err := service.Get(context.Background(), "fake_id") if testCase.err != errors.Cause(err) { t.Errorf("Wrong error expected %v actual %v", testCase.err, err) return } if testCase.err == nil && kube.Name != testCase.expectedName { t.Errorf("Wrong kube name expected %s actual %s", testCase.expectedName, kube.Name) } } }
explode_data.jsonl/1994
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 385 }
[ 2830, 3393, 42, 3760, 1860, 1949, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 42400, 675, 914, 198, 197, 8924, 260, 3056, 3782, 198, 197, 9859, 688, 1465, 198, 197, 59403, 197, 197, 515, 298, 42400, 675, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestClient_Stdin(t *testing.T) { // Overwrite stdin for this test with a temporary file tf, err := ioutil.TempFile("", "terraform") if err != nil { t.Fatalf("err: %s", err) } defer os.Remove(tf.Name()) defer tf.Close() if _, err = tf.WriteString("hello"); err != nil { t.Fatalf("error: %s", err) } if err = tf.Sync(); err != nil { t.Fatalf("error: %s", err) } if _, err = tf.Seek(0, 0); err != nil { t.Fatalf("error: %s", err) } oldStdin := os.Stdin defer func() { os.Stdin = oldStdin }() os.Stdin = tf process := helperProcess("stdin") c := NewClient(&ClientConfig{ Cmd: process, HandshakeConfig: testHandshake, Plugins: testPluginMap, }) defer c.Kill() _, err = c.Start() if err != nil { t.Fatalf("error: %s", err) } for { if c.Exited() { break } time.Sleep(50 * time.Millisecond) } if !process.ProcessState.Success() { t.Fatal("process didn't exit cleanly") } }
explode_data.jsonl/57850
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 419 }
[ 2830, 3393, 2959, 62, 22748, 258, 1155, 353, 8840, 836, 8, 341, 197, 322, 6065, 4934, 31369, 369, 419, 1273, 448, 264, 13340, 1034, 198, 3244, 69, 11, 1848, 1669, 43144, 65009, 1703, 19814, 330, 61385, 1138, 743, 1848, 961, 2092, 341,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPackUnpack(t *testing.T) { v := []string{"test-filename/with-subdir"} testOptsList := []options{ nil, options{ "tsize": "1234", "blksize": "22", }, } for _, filename := range v { for _, mode := range []string{"octet", "netascii"} { for _, opts := range testOptsList { packUnpack(t, filename, mode, opts) } } } }
explode_data.jsonl/17544
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 162 }
[ 2830, 3393, 30684, 1806, 4748, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 3056, 917, 4913, 1944, 2220, 4033, 14, 4197, 17967, 3741, 16707, 18185, 43451, 852, 1669, 3056, 2875, 515, 197, 84131, 345, 197, 35500, 515, 298, 197, 1, 83, 214...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDockerSaveToFileNoImages(t *testing.T) { file := "file" g := NewWithT(t) ctx := context.Background() mockCtrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(mockCtrl) executable.EXPECT().Execute(ctx, "save", "-o", file).Return(bytes.Buffer{}, nil) d := executables.NewDocker(executable) g.Expect(d.SaveToFile(ctx, file)).To(Succeed()) }
explode_data.jsonl/6808
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 149 }
[ 2830, 3393, 35, 13659, 8784, 41550, 2753, 14228, 1155, 353, 8840, 836, 8, 341, 17661, 1669, 330, 1192, 1837, 3174, 1669, 1532, 2354, 51, 1155, 340, 20985, 1669, 2266, 19047, 741, 77333, 15001, 1669, 342, 316, 1176, 7121, 2051, 1155, 692...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRSAPublicKeySKI(t *testing.T) { t.Parallel() provider, _, cleanup := currentTestConfig.Provider(t) defer cleanup() k, err := provider.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false}) if err != nil { t.Fatalf("Failed generating RSA key [%s]", err) } pk, err := k.PublicKey() if err != nil { t.Fatalf("Failed getting public key from private RSA key [%s]", err) } ski := pk.SKI() if len(ski) == 0 { t.Fatal("SKI not valid. Zero length.") } }
explode_data.jsonl/29287
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 190 }
[ 2830, 3393, 11451, 2537, 475, 1592, 81545, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 19979, 11, 8358, 21290, 1669, 1482, 2271, 2648, 36208, 1155, 340, 16867, 21290, 2822, 16463, 11, 1848, 1669, 9109, 9610, 9967, 2099, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestArrayOfGenericAlg(t *testing.T) { at1 := ArrayOf(5, TypeOf(string(""))) at := ArrayOf(6, at1) v1 := New(at).Elem() v2 := New(at).Elem() if v1.Interface() != v1.Interface() { t.Errorf("constructed array %v not equal to itself", v1.Interface()) } v1.Index(0).Index(0).Set(ValueOf("abc")) v2.Index(0).Index(0).Set(ValueOf("efg")) if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 { t.Errorf("constructed arrays %v and %v should not be equal", i1, i2) } v1.Index(0).Index(0).Set(ValueOf("abc")) v2.Index(0).Index(0).Set(ValueOf((v1.Index(0).Index(0).String() + " ")[:3])) if i1, i2 := v1.Interface(), v2.Interface(); i1 != i2 { t.Errorf("constructed arrays %v and %v should be equal", i1, i2) } // Test hash m := MakeMap(MapOf(at, TypeOf(int(0)))) m.SetMapIndex(v1, ValueOf(1)) if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() { t.Errorf("constructed arrays %v and %v have different hashes", i1, i2) } }
explode_data.jsonl/29596
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 421 }
[ 2830, 3393, 62656, 19964, 86895, 1155, 353, 8840, 836, 8, 341, 35447, 16, 1669, 2910, 2124, 7, 20, 11, 3990, 2124, 3609, 445, 29836, 35447, 1669, 2910, 2124, 7, 21, 11, 518, 16, 340, 5195, 16, 1669, 1532, 19356, 568, 25586, 741, 519...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestZeroValueStats(t *testing.T) { stats := &dtypes.StatsJSON{ Stats: dtypes.Stats{ BlkioStats: dtypes.BlkioStats{}, CPUStats: dtypes.CPUStats{}, MemoryStats: dtypes.MemoryStats{}, }, Networks: nil, } containers := containerJSON(t) config := &Config{} md, err := ContainerStatsToMetrics(stats, containers, config) assert.Nil(t, err) assert.NotNil(t, md) metrics := []Metric{ {name: "container.cpu.usage.system", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "ns", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.usage.total", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "ns", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.usage.kernelmode", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "ns", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.usage.usermode", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "ns", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.throttling_data.periods", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "1", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.throttling_data.throttled_periods", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "1", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.throttling_data.throttled_time", mtype: metricspb.MetricDescriptor_CUMULATIVE_INT64, unit: "ns", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.cpu.percent", mtype: metricspb.MetricDescriptor_GAUGE_DOUBLE, unit: "1", labelKeys: nil, values: []Value{{labelValues: nil, doubleValue: 0}}}, {name: "container.memory.usage.limit", mtype: metricspb.MetricDescriptor_GAUGE_INT64, unit: "By", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.memory.usage.total", mtype: metricspb.MetricDescriptor_GAUGE_INT64, unit: "By", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, {name: "container.memory.percent", mtype: metricspb.MetricDescriptor_GAUGE_DOUBLE, unit: "1", labelKeys: nil, values: []Value{{labelValues: nil, doubleValue: 0}}}, {name: "container.memory.usage.max", mtype: metricspb.MetricDescriptor_GAUGE_INT64, unit: "By", labelKeys: nil, values: []Value{{labelValues: nil, value: 0}}}, } assertMetricsDataEqual(t, metrics, nil, md) }
explode_data.jsonl/4108
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 923 }
[ 2830, 3393, 17999, 1130, 16635, 1155, 353, 8840, 836, 8, 341, 79659, 1669, 609, 8047, 1804, 7758, 1862, 5370, 515, 197, 197, 16635, 25, 7594, 1804, 7758, 1862, 515, 298, 197, 4923, 74, 815, 16635, 25, 220, 7594, 1804, 21569, 74, 815, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUseSSL(t *testing.T) { assert := assert.New(t) cluster := testCluster(t) assert.Equal(false, UseSSL(cluster)) cluster = testSecuredCluster(t) assert.Equal(true, UseSSL(cluster)) }
explode_data.jsonl/28888
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 78 }
[ 2830, 3393, 10253, 22594, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 197, 18855, 1669, 1273, 28678, 1155, 340, 6948, 12808, 3576, 11, 5443, 22594, 48030, 1171, 197, 18855, 284, 1273, 8430, 3073, 28678, 1155, 340, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestConfig_Listener(t *testing.T) { config := DefaultConfig() // Fails on invalid input if ln, err := config.Listener("tcp", "nope", 8080); err == nil { ln.Close() t.Fatalf("expected addr error") } if ln, err := config.Listener("nope", "127.0.0.1", 8080); err == nil { ln.Close() t.Fatalf("expected protocol err") } if ln, err := config.Listener("tcp", "127.0.0.1", -1); err == nil { ln.Close() t.Fatalf("expected port error") } // Works with valid inputs ln, err := config.Listener("tcp", "127.0.0.1", 24000) if err != nil { t.Fatalf("err: %s", err) } ln.Close() if net := ln.Addr().Network(); net != "tcp" { t.Fatalf("expected tcp, got: %q", net) } if addr := ln.Addr().String(); addr != "127.0.0.1:24000" { t.Fatalf("expected 127.0.0.1:4646, got: %q", addr) } // Falls back to default bind address if non provided config.BindAddr = "0.0.0.0" ln, err = config.Listener("tcp4", "", 24000) if err != nil { t.Fatalf("err: %s", err) } ln.Close() if addr := ln.Addr().String(); addr != "0.0.0.0:24000" { t.Fatalf("expected 0.0.0.0:24000, got: %q", addr) } }
explode_data.jsonl/76936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 483 }
[ 2830, 3393, 2648, 62, 2743, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 7899, 2648, 2822, 197, 322, 434, 6209, 389, 8318, 1946, 198, 743, 29390, 11, 1848, 1669, 2193, 64091, 445, 27161, 497, 330, 2152, 375, 497, 220, 23, 15, 23, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestMarkAsSuccess(t *testing.T) { t.Run("nil", func(t *testing.T) { assert.Nil(t, circuitbreaker.MarkAsSuccess(nil)) }) t.Run("MarkAsSuccess", func(t *testing.T) { originalErr := errors.New("logic error") err := circuitbreaker.MarkAsSuccess(originalErr) assert.Equal(t, err.Error(), "circuitbreaker mark this error as a success: logic error") nfe, ok := err.(*circuitbreaker.SuccessMarkableError) assert.True(t, ok) assert.Equal(t, nfe.Unwrap(), originalErr) }) }
explode_data.jsonl/8219
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 186 }
[ 2830, 3393, 8949, 2121, 7188, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 8385, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 6948, 59678, 1155, 11, 16224, 64121, 75888, 2121, 7188, 27907, 1171, 197, 3518, 3244, 16708, 445, 8949, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIncrementAndGet(t *testing.T) { seq := newSequence() id := seq.incrementAndGet() if id != 0 { t.Error("incrementAndGet() should increment 1 and return the value.") } id = seq.incrementAndGet() if id != 1 { t.Error("incrementAndGet() should increment 1 and return the value.") } }
explode_data.jsonl/62906
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 38311, 97726, 1155, 353, 8840, 836, 8, 341, 78561, 1669, 501, 14076, 741, 15710, 1669, 12981, 56936, 97726, 741, 743, 877, 961, 220, 15, 341, 197, 3244, 6141, 445, 35744, 97726, 368, 1265, 16252, 220, 16, 323, 470, 279, 89...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPathToSubject(t *testing.T) { type io struct { p string s string } a := []io{ {p: "/", s: ""}, {p: "/foo", s: "foo"}, {p: "/foo", s: "foo"}, {p: "/foo/", s: "foo"}, {p: "/foo/foo", s: "foo.foo"}, } for _, tc := range a { v := PathToSubject(tc.p) if v != tc.s { t.Errorf("expected '%s' to become '%s' but got '%s'", tc.p, tc.s, v) } } }
explode_data.jsonl/80471
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 194 }
[ 2830, 3393, 1820, 1249, 13019, 1155, 353, 8840, 836, 8, 341, 13158, 6399, 2036, 341, 197, 3223, 914, 198, 197, 1903, 914, 198, 197, 532, 11323, 1669, 3056, 815, 515, 197, 197, 90, 79, 25, 64657, 274, 25, 77496, 197, 197, 90, 79, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUsersCountsBatchOtherString(t *testing.T) { param := apiParam.UsersCountsBatchOtherParam{} apiHelper.SetDefaultValues(&param) param.Uids = "5225532117" body, err := UsersCountsBatchOtherString(param, "2.00ZrQ6BDnYTUdC7164d3f05bY5kDBEsdfs") if err != nil { t.Errorf("UsersCountsBatchOtherString error : %v\n", err) return } com := []resp.UsersCountsBatchOtherResp{} error := json.Unmarshal([]byte(body), &com) if error != nil { t.Errorf("to json error : %v\n", error.Error()) } else { t.Errorf("get body mid => %v\n", com[0].ID) } }
explode_data.jsonl/46943
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 237 }
[ 2830, 3393, 7137, 63731, 21074, 11409, 703, 1155, 353, 8840, 836, 8, 341, 36037, 1669, 6330, 2001, 36782, 63731, 21074, 11409, 2001, 31483, 54299, 5511, 4202, 3675, 6227, 2099, 903, 692, 36037, 5255, 3365, 284, 330, 20, 17, 17, 20, 20, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestIntersect(t *testing.T) { a := newListPostings(1, 2, 3) b := newListPostings(2, 3, 4) var cases = []struct { in []Postings res Postings }{ { in: []Postings{}, res: EmptyPostings(), }, { in: []Postings{a, b, EmptyPostings()}, res: EmptyPostings(), }, { in: []Postings{b, a, EmptyPostings()}, res: EmptyPostings(), }, { in: []Postings{EmptyPostings(), b, a}, res: EmptyPostings(), }, { in: []Postings{EmptyPostings(), a, b}, res: EmptyPostings(), }, { in: []Postings{a, EmptyPostings(), b}, res: EmptyPostings(), }, { in: []Postings{b, EmptyPostings(), a}, res: EmptyPostings(), }, { in: []Postings{b, EmptyPostings(), a, a, b, a, a, a}, res: EmptyPostings(), }, { in: []Postings{ newListPostings(1, 2, 3, 4, 5), newListPostings(6, 7, 8, 9, 10), }, res: newListPostings(), }, { in: []Postings{ newListPostings(1, 2, 3, 4, 5), newListPostings(4, 5, 6, 7, 8), }, res: newListPostings(4, 5), }, { in: []Postings{ newListPostings(1, 2, 3, 4, 9, 10), newListPostings(1, 4, 5, 6, 7, 8, 10, 11), }, res: newListPostings(1, 4, 10), }, { in: []Postings{ newListPostings(1), newListPostings(0, 1), }, res: newListPostings(1), }, { in: []Postings{ newListPostings(1), }, res: newListPostings(1), }, { in: []Postings{ newListPostings(1), newListPostings(), }, res: newListPostings(), }, { in: []Postings{ newListPostings(), newListPostings(), }, res: newListPostings(), }, } for _, c := range cases { t.Run("", func(t *testing.T) { if c.res == nil { t.Fatal("intersect result expectancy cannot be nil") } expected, err := ExpandPostings(c.res) testutil.Ok(t, err) i := Intersect(c.in...) if c.res == EmptyPostings() { testutil.Equals(t, EmptyPostings(), i) return } if i == EmptyPostings() { t.Fatal("intersect unexpected result: EmptyPostings sentinel") } res, err := ExpandPostings(i) testutil.Ok(t, err) testutil.Equals(t, expected, res) }) } }
explode_data.jsonl/13123
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1067 }
[ 2830, 3393, 3306, 9687, 1155, 353, 8840, 836, 8, 341, 11323, 1669, 67418, 4133, 819, 7, 16, 11, 220, 17, 11, 220, 18, 340, 2233, 1669, 67418, 4133, 819, 7, 17, 11, 220, 18, 11, 220, 19, 692, 2405, 5048, 284, 3056, 1235, 341, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestErrorOptionNotRecognized(t *testing.T) { type config struct { Var string `env:"VAR,not_supported!"` } cfg := &config{} assert.EqualError(t, Parse(cfg), "env: tag option \"not_supported!\" not supported") }
explode_data.jsonl/78795
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 80 }
[ 2830, 3393, 1454, 5341, 2623, 17915, 1506, 1155, 353, 8840, 836, 8, 341, 13158, 2193, 2036, 341, 197, 197, 3962, 914, 1565, 3160, 2974, 33040, 11, 1921, 57885, 0, 8805, 197, 630, 50286, 1669, 609, 1676, 16094, 6948, 12808, 1454, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestUptimeMonitorMonitorToBaseMonitorMapper(t *testing.T) { uptimeMonitorObject := UptimeMonitorMonitor{Name: "Test Monitor", PK: 124, MspAddress: "https://stakater.com", MspInterval: 5, CheckType: "HTTP"} monitorObject := UptimeMonitorMonitorToBaseMonitorMapper(uptimeMonitorObject) if monitorObject.ID != strconv.Itoa(uptimeMonitorObject.PK) || monitorObject.Name != uptimeMonitorObject.Name || monitorObject.URL != uptimeMonitorObject.MspAddress || "5" != monitorObject.Annotations["uptime.monitor.stakater.com/interval"] || "HTTP" != monitorObject.Annotations["uptime.monitor.stakater.com/check_type"] { t.Error("Correct: \n", uptimeMonitorObject.Name, uptimeMonitorObject.PK, uptimeMonitorObject.MspAddress, uptimeMonitorObject.MspInterval, uptimeMonitorObject.CheckType) t.Error("Parsed: \n", monitorObject.Name, monitorObject.ID, monitorObject.URL, monitorObject.Annotations["uptime.monitor.stakater.com/interval"], monitorObject.Annotations["uptime.monitor.stakater.com/check_type"], ) t.Error("Mapper did not map the values correctly") } }
explode_data.jsonl/56709
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 409 }
[ 2830, 3393, 52, 28941, 30098, 30098, 1249, 3978, 30098, 10989, 1155, 353, 8840, 836, 8, 341, 197, 74659, 30098, 1190, 1669, 547, 28941, 30098, 30098, 63121, 25, 330, 2271, 23519, 756, 197, 10025, 42, 25, 688, 220, 16, 17, 19, 345, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestDingInstance(t *testing.T) { var ( err error obj *Client config string res string ) c.Convey("Define configuration", t, func() { config = configTpl c.Convey("Parse configuration", func() { err = beemod.Register(DefaultBuild).SetCfg([]byte(config), "toml").Run() c.So(err, c.ShouldBeNil) c.Convey("Set configuration group (initialization)", func() { obj = Invoker("myding") c.So(obj, c.ShouldNotBeNil) c.Convey("testing method", func() { res, err = obj.SendMsg("TESTa") c.So(err, c.ShouldBeNil) t.Log(res) }) }) }) }) }
explode_data.jsonl/29795
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 273 }
[ 2830, 3393, 35, 287, 2523, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 9859, 262, 1465, 198, 197, 22671, 262, 353, 2959, 198, 197, 25873, 914, 198, 197, 10202, 262, 914, 198, 197, 340, 1444, 4801, 5617, 445, 35338, 6546, 497, 259...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetUuidIfNeeded(t *testing.T) { val := &a{} _, _, _, err := handleNodeState(nil, "UUID") require.NotNil(t, err) v := reflect.ValueOf(val) isNew, _, _, err := handleNodeState(&v, "UUID") require.Nil(t, err) require.True(t, isNew) val.UUID = "dasdfasd" v = reflect.ValueOf(val) isNew, _, _, err = handleNodeState(&v, "UUID") require.Nil(t, err) require.True(t, isNew) val.UUID = "dasdfasd" val.LoadMap = map[string]*RelationConfig{} v = reflect.ValueOf(val) isNew, _, _, err = handleNodeState(&v, "UUID") require.Nil(t, err) require.True(t, isNew) val.UUID = "dasdfasd" val.LoadMap = nil v = reflect.ValueOf(val) isNew, _, _, err = handleNodeState(&v, "UUID") require.Nil(t, err) require.True(t, isNew) val.UUID = "dasdfasd" val.LoadMap = map[string]*RelationConfig{ "dasdfasd": { Ids: []int64{69}, RelationType: Single, }, } v = reflect.ValueOf(val) isNew, _, _, err = handleNodeState(&v, "UUID") require.Nil(t, err) require.False(t, isNew) }
explode_data.jsonl/71803
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 476 }
[ 2830, 3393, 1649, 38431, 95803, 1155, 353, 8840, 836, 8, 341, 19302, 1669, 609, 64, 31483, 197, 6878, 8358, 8358, 1848, 1669, 3705, 1955, 1397, 27907, 11, 330, 24754, 1138, 17957, 93882, 1155, 11, 1848, 692, 5195, 1669, 8708, 6167, 2124...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDefaults(t *testing.T) { b := MoreDefaultsB{} aa := test.A{} a := &MoreDefaultsA{} b2 := a.GetB2() a2 := a.GetA2() if a.GetField1() != 1234 { t.Fatalf("Field1 wrong") } if a.GetField2() != 0 { t.Fatalf("Field2 wrong") } if a.GetB1() != nil { t.Fatalf("B1 wrong") } if b2.GetField1() != b.GetField1() { t.Fatalf("B2 wrong") } if a.GetA1() != nil { t.Fatalf("A1 wrong") } if a2.GetNumber() != aa.GetNumber() { t.Fatalf("A2 wrong") } }
explode_data.jsonl/15312
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 264 }
[ 2830, 3393, 16273, 1155, 353, 8840, 836, 8, 972, 2233, 1669, 4398, 16273, 33, 90, 1771, 197, 5305, 1669, 1273, 875, 90, 1771, 11323, 1669, 609, 7661, 16273, 32, 90, 1771, 2233, 17, 1669, 264, 2234, 33, 17, 3568, 11323, 17, 1669, 264...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestServerlessJSON(t *testing.T) { cfgFn := func(cfg *Config) { cfg.ServerlessMode.Enabled = true } app := testApp(nil, cfgFn, t) txn := app.StartTransaction("hello") txn.Private.(internal.AddAgentAttributer).AddAgentAttribute(AttributeAWSLambdaARN, "thearn", nil) txn.End() buf := &bytes.Buffer{} internal.ServerlessWrite(app.Application.Private, "lambda-test-arn", buf) metadata, data, err := parseServerlessPayload(buf.Bytes()) if err != nil { t.Fatal(err) } // Data should contain txn event and metrics. Timestamps make exact // JSON comparison tough. if v := data["metric_data"]; nil == v { t.Fatal(data) } if v := data["analytic_event_data"]; nil == v { t.Fatal(data) } if v := string(metadata["arn"]); v != `"lambda-test-arn"` { t.Fatal(v) } if v := string(metadata["agent_version"]); v != `"`+Version+`"` { t.Fatal(v) } }
explode_data.jsonl/52869
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 345 }
[ 2830, 3393, 5475, 1717, 5370, 1155, 353, 8840, 836, 8, 341, 50286, 24911, 1669, 2915, 28272, 353, 2648, 8, 341, 197, 50286, 22997, 1717, 3636, 13690, 284, 830, 198, 197, 532, 28236, 1669, 1273, 2164, 27907, 11, 13286, 24911, 11, 259, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSnapshotWithLabel(t *testing.T) { if test.UpdateGoldenGlobal { t.Skip() } a := struct { A string B int C bool D *string }{ A: "foo", B: 1, C: true, D: swag.String("bar"), } b := "Hello World!" test.Snapshoter.Label("_A").Save(t, a) test.Snapshoter.Label("_B").Save(t, b) }
explode_data.jsonl/17631
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 148 }
[ 2830, 3393, 15009, 2354, 2476, 1155, 353, 8840, 836, 8, 341, 743, 1273, 16689, 59790, 11646, 341, 197, 3244, 57776, 741, 197, 532, 11323, 1669, 2036, 341, 197, 22985, 914, 198, 197, 12791, 526, 198, 197, 6258, 1807, 198, 197, 10957, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestServer_Response_NoData_Header_FooBar(t *testing.T) { testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { w.Header().Set("Foo-Bar", "some-value") return nil }, func(st *serverTester) { getSlash(st) hf := st.wantHeaders() if !hf.StreamEnded() { t.Fatal("want END_STREAM flag") } if !hf.HeadersEnded() { t.Fatal("want END_HEADERS flag") } goth := st.decodeHeader(hf.HeaderBlockFragment()) wanth := [][2]string{ {":status", "200"}, {"foo-bar", "some-value"}, {"content-type", "text/plain; charset=utf-8"}, {"content-length", "0"}, } if !reflect.DeepEqual(goth, wanth) { t.Errorf("Got headers %v; want %v", goth, wanth) } }) }
explode_data.jsonl/71662
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 315 }
[ 2830, 3393, 5475, 65873, 36989, 1043, 71353, 1400, 2624, 3428, 1155, 353, 8840, 836, 8, 341, 18185, 5475, 2582, 1155, 11, 2915, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 1465, 341, 197, 6692, 15753, 1005, 1649, 445, 40923, 93320, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMetricsAndRateLimitAndRulesAndBookinfo(t *testing.T) { t.Skip("https://github.com/istio/istio/issues/6309") if err := replaceRouteRule(routeReviewsV3Rule); err != nil { fatalf(t, "Could not create replace reviews routing rule: %v", err) } defer func() { if err := deleteRoutingConfig(routeReviewsV3Rule); err != nil { t.Fatalf("Could not delete reviews routing rule: %v", err) } }() // the rate limit rule applies a max rate limit of 1 rps to the ratings service. if err := applyMixerRule(rateLimitRule); err != nil { fatalf(t, "could not create required mixer rule: %v", err) } defer func() { if err := deleteMixerRule(rateLimitRule); err != nil { t.Logf("could not clear rule: %v", err) } }() allowRuleSync() // setup prometheus API promAPI, err := promAPI() if err != nil { fatalf(t, "Could not build prometheus API client: %v", err) } // establish baseline initPrior429s, initPrior200s := fetchRequestCount(t, promAPI, "ratings", "", 0) _ = sendTraffic(t, "Warming traffic...", 150) allowPrometheusSync() prior429s, prior200s := fetchRequestCount(t, promAPI, "ratings", "", initPrior429s+initPrior200s+150) // check if at least one more prior429 was reported if prior429s-initPrior429s < 1 { fatalf(t, "no 429 is allotted time: prior429s:%v", prior429s) } res := sendTraffic(t, "Sending traffic...", 300) allowPrometheusSync() totalReqs := res.DurationHistogram.Count succReqs := float64(res.RetCodes[http.StatusOK]) badReqs := res.RetCodes[http.StatusBadRequest] actualDuration := res.ActualDuration.Seconds() // can be a bit more than requested log.Info("Successfully sent request(s) to /productpage; checking metrics...") t.Logf("Fortio Summary: %d reqs (%f rps, %f 200s (%f rps), %d 400s - %+v)", totalReqs, res.ActualQPS, succReqs, succReqs/actualDuration, badReqs, res.RetCodes) // consider only successful requests (as recorded at productpage service) callsToRatings := succReqs // the rate-limit is 1 rps want200s := 1. * actualDuration // everything in excess of 200s should be 429s (ideally) want429s := callsToRatings - want200s t.Logf("Expected Totals: 200s: %f (%f rps), 429s: %f (%f rps)", want200s, want200s/actualDuration, want429s, want429s/actualDuration) // if we received less traffic than the expected enforced limit to ratings // then there is no way to determine if the rate limit was applied at all // and for how much traffic. log all metrics and abort test. if callsToRatings < want200s { t.Logf("full set of prometheus metrics:\n%s", promDump(promAPI, "istio_requests_total")) fatalf(t, "Not enough traffic generated to exercise rate limit: ratings_reqs=%f, want200s=%f", callsToRatings, want200s) } got200s, got429s := fetchRequestCount(t, promAPI, "ratings", "destination_version=\"v1\"", prior429s+prior200s+300) if got429s == 0 { t.Logf("prometheus values for istio_requests_total:\n%s", promDump(promAPI, "istio_requests_total")) errorf(t, "Could not find 429s: %v", err) } // Lenient calculation TODO: tighten/simplify want429s = math.Floor(want429s * .25) got429s = got429s - prior429s t.Logf("Actual 429s: %f (%f rps)", got429s, got429s/actualDuration) // check resource exhausted if got429s < want429s { t.Logf("prometheus values for istio_requests_total:\n%s", promDump(promAPI, "istio_requests_total")) errorf(t, "Bad metric value for rate-limited requests (429s): got %f, want at least %f", got429s, want429s) } if got200s == 0 { t.Logf("prometheus values for istio_requests_total:\n%s", promDump(promAPI, "istio_requests_total")) errorf(t, "Could not find successes value: %v", err) } got200s = got200s - prior200s t.Logf("Actual 200s: %f (%f rps), expecting ~1 rps", got200s, got200s/actualDuration) // establish some baseline to protect against flakiness due to randomness in routing // and to allow for leniency in actual ceiling of enforcement (if 10 is the limit, but we allow slightly // less than 10, don't fail this test). want := math.Floor(want200s * .25) // check successes if got200s < want { t.Logf("prometheus values for istio_requests_total:\n%s", promDump(promAPI, "istio_requests_total")) errorf(t, "Bad metric value for successful requests (200s): got %f, want at least %f", got200s, want) } // TODO: until https://github.com/istio/istio/issues/3028 is fixed, use 25% - should be only 5% or so want200s = math.Ceil(want200s * 1.5) if got200s > want { t.Logf("prometheus values for istio_requests_total:\n%s", promDump(promAPI, "istio_requests_total")) errorf(t, "Bad metric value for successful requests (200s): got %f, want at most %f", got200s, want200s) } }
explode_data.jsonl/81442
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1666 }
[ 2830, 3393, 27328, 3036, 11564, 16527, 3036, 26008, 3036, 7134, 2733, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 445, 2428, 1110, 5204, 905, 14, 380, 815, 14, 380, 815, 38745, 14, 21, 18, 15, 24, 5130, 743, 1848, 1669, 8290, 4899, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTimeouts(t *testing.T) { if testutils.RunningOnCircleCI() { t.Skip("Skipping as not supported on CIRCLE CI kernel") } defer testutils.SetupTestOSContext(t)() i, err := New("") assert.NilError(t, err) _, err = i.GetConfig() assert.NilError(t, err) cfg := Config{66 * time.Second, 66 * time.Second, 66 * time.Second} err = i.SetConfig(&cfg) assert.NilError(t, err) c2, err := i.GetConfig() assert.NilError(t, err) assert.DeepEqual(t, cfg, *c2) // A timeout value 0 means that the current timeout value of the corresponding entry is preserved cfg = Config{77 * time.Second, 0 * time.Second, 77 * time.Second} err = i.SetConfig(&cfg) assert.NilError(t, err) c3, err := i.GetConfig() assert.NilError(t, err) assert.DeepEqual(t, *c3, Config{77 * time.Second, 66 * time.Second, 77 * time.Second}) }
explode_data.jsonl/47024
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 322 }
[ 2830, 3393, 7636, 82, 1155, 353, 8840, 836, 8, 341, 743, 1273, 6031, 2013, 11216, 1925, 25199, 11237, 368, 341, 197, 3244, 57776, 445, 85945, 438, 537, 7248, 389, 356, 64012, 20694, 10001, 1138, 197, 532, 16867, 1273, 6031, 39820, 2271,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2