text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestLDSIngressRouteTCPForward(t *testing.T) { rh, cc, done := setup(t) defer done() // s1 is a tls secret s1 := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "secret", Namespace: "default", }, Type: "kubernetes.io/tls", Data: map[string][]byte{ v1.TLSCertKey: []byte("certificate"), v1.TLSPrivateKeyKey: []byte("key"), }, } i1 := &ingressroutev1.IngressRoute{ ObjectMeta: metav1.ObjectMeta{ Name: "simple", Namespace: "default", }, Spec: ingressroutev1.IngressRouteSpec{ VirtualHost: &ingressroutev1.VirtualHost{ Fqdn: "kuard-tcp.example.com", TLS: &ingressroutev1.TLS{ SecretName: "secret", }, }, Routes: []ingressroutev1.Route{{ Match: "/", Services: []ingressroutev1.Service{{ Name: "wrong-backend", Port: 80, }}, }}, TCPProxy: &ingressroutev1.TCPProxy{ Services: []ingressroutev1.Service{{ Name: "correct-backend", Port: 80, }}, }, }, } rh.OnAdd(s1) svc := service("default", "correct-backend", v1.ServicePort{ Protocol: "TCP", Port: 80, TargetPort: intstr.FromInt(8080), }) rh.OnAdd(svc) rh.OnAdd(i1) ingressHTTPS := &v2.Listener{ Name: "ingress_https", Address: *envoy.SocketAddress("0.0.0.0", 8443), FilterChains: filterchaintls("kuard-tcp.example.com", s1, tcpproxy(t, "ingress_https", "default/correct-backend/80/da39a3ee5e")), ListenerFilters: []listener.ListenerFilter{ envoy.TLSInspector(), }, } assertEqual(t, &v2.DiscoveryResponse{ VersionInfo: "3", Resources: []types.Any{ any(t, ingressHTTPS), any(t, staticListener()), }, TypeUrl: listenerType, Nonce: "3", }, streamLDS(t, cc)) }
explode_data.jsonl/22831
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 833 }
[ 2830, 3393, 43, 5936, 641, 2483, 4899, 49896, 25925, 1155, 353, 8840, 836, 8, 341, 7000, 71, 11, 12527, 11, 2814, 1669, 6505, 1155, 340, 16867, 2814, 2822, 197, 322, 274, 16, 374, 264, 55026, 6234, 198, 1903, 16, 1669, 609, 85, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReconcileServiceInstanceDeleteBlockedByCredentials(t *testing.T) { fakeKubeClient, fakeCatalogClient, fakeBrokerClient, testController, sharedInformers := newTestController(t, fakeosb.FakeClientConfiguration{ DeprovisionReaction: &fakeosb.DeprovisionReaction{ Response: &osb.DeprovisionResponse{}, }, }) sharedInformers.ClusterServiceBrokers().Informer().GetStore().Add(getTestClusterServiceBroker()) sharedInformers.ClusterServiceClasses().Informer().GetStore().Add(getTestClusterServiceClass()) sharedInformers.ClusterServicePlans().Informer().GetStore().Add(getTestClusterServicePlan()) credentials := getTestServiceBinding() sharedInformers.ServiceBindings().Informer().GetStore().Add(credentials) instance := getTestServiceInstanceWithClusterRefs() instance.ObjectMeta.DeletionTimestamp = &metav1.Time{} instance.ObjectMeta.Finalizers = []string{v1beta1.FinalizerServiceCatalog} // we only invoke the broker client to deprovision if we have a reconciled generation set // as that implies a previous success. instance.Generation = 2 instance.Status.ReconciledGeneration = 1 instance.Status.ObservedGeneration = 1 instance.Status.ProvisionStatus = v1beta1.ServiceInstanceProvisionStatusProvisioned instance.Status.ExternalProperties = &v1beta1.ServiceInstancePropertiesState{ ClusterServicePlanExternalName: testClusterServicePlanName, ClusterServicePlanExternalID: testClusterServicePlanGUID, } instance.Status.DeprovisionStatus = v1beta1.ServiceInstanceDeprovisionStatusRequired fakeCatalogClient.AddReactor("get", "serviceinstances", func(action clientgotesting.Action) (bool, runtime.Object, error) { return true, instance, nil }) if err := reconcileServiceInstance(t, testController, instance); err == nil { t.Fatalf("expected reconcileServiceInstance to return an error, but there was none") } brokerActions := fakeBrokerClient.Actions() assertNumberOfBrokerActions(t, brokerActions, 0) // Verify no core kube actions occurred kubeActions := fakeKubeClient.Actions() assertNumberOfActions(t, kubeActions, 0) actions := fakeCatalogClient.Actions() assertNumberOfActions(t, actions, 1) updateObject := assertUpdateStatus(t, actions[0], instance) assertServiceInstanceErrorBeforeRequest(t, updateObject, errorDeprovisionBlockedByCredentialsReason, instance) events := getRecordedEvents(testController) expectedEvent := warningEventBuilder(errorDeprovisionBlockedByCredentialsReason).msg( "All associated ServiceBindings must be removed before this ServiceInstance can be deleted", ) if err := checkEvents(events, expectedEvent.stringArr()); err != nil { t.Fatal(err) } // delete credentials sharedInformers.ServiceBindings().Informer().GetStore().Delete(credentials) fakeCatalogClient.ClearActions() fakeKubeClient.ClearActions() // credentials were removed, verify the next reconcilation removes // the instance instance = updateObject.(*v1beta1.ServiceInstance) if err := reconcileServiceInstance(t, testController, instance); err != nil { t.Fatalf("unexpected error: %v", err) } instance = assertServiceInstanceDeprovisionInProgressIsTheOnlyCatalogClientAction(t, fakeCatalogClient, instance) fakeCatalogClient.ClearActions() fakeKubeClient.ClearActions() if err := reconcileServiceInstance(t, testController, instance); err != nil { t.Fatalf("This should not fail : %v", err) } brokerActions = fakeBrokerClient.Actions() assertNumberOfBrokerActions(t, brokerActions, 1) assertDeprovision(t, brokerActions[0], &osb.DeprovisionRequest{ AcceptsIncomplete: true, InstanceID: testServiceInstanceGUID, ServiceID: testClusterServiceClassGUID, PlanID: testClusterServicePlanGUID, }) // Verify no core kube actions occurred kubeActions = fakeKubeClient.Actions() assertNumberOfActions(t, kubeActions, 0) actions = fakeCatalogClient.Actions() // The actions should be: // 0. Updating the current operation // 1. Updating the ready condition assertNumberOfActions(t, actions, 1) updateObject = assertUpdateStatus(t, actions[0], instance) assertServiceInstanceOperationSuccess(t, updateObject, v1beta1.ServiceInstanceOperationDeprovision, testClusterServicePlanName, testClusterServicePlanGUID, instance) events = getRecordedEvents(testController) expectedEvent = normalEventBuilder(successDeprovisionReason).msg("The instance was deprovisioned successfully") if err := checkEvents(events, expectedEvent.stringArr()); err != nil { t.Fatal(err) } }
explode_data.jsonl/58149
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1361 }
[ 2830, 3393, 693, 40446, 457, 1860, 2523, 6435, 95847, 1359, 27025, 1155, 353, 8840, 836, 8, 341, 1166, 726, 42, 3760, 2959, 11, 12418, 41606, 2959, 11, 12418, 65545, 2959, 11, 1273, 2051, 11, 6094, 37891, 388, 1669, 501, 2271, 2051, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFlush(t *testing.T) { client := setupTestClientAndCreateIndex(t) // Flush all indices res, err := client.Flush().Do() if err != nil { t.Fatal(err) } if res == nil { t.Errorf("expected res to be != nil; got: %v", res) } }
explode_data.jsonl/74232
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 99 }
[ 2830, 3393, 46874, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 6505, 2271, 2959, 3036, 4021, 1552, 1155, 692, 197, 322, 57626, 678, 14937, 198, 10202, 11, 1848, 1669, 2943, 61559, 1005, 5404, 741, 743, 1848, 961, 2092, 341, 197, 3244, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) { fake := newFakeRouteTablesClient() cloud := &Cloud{ RouteTablesClient: fake, Config: Config{ RouteTableResourceGroup: "foo", RouteTableName: "bar", Location: "location", }, } cache, _ := cloud.newRouteTableCache() cloud.rtCache = cache expectedTable := network.RouteTable{ Name: &cloud.RouteTableName, Location: &cloud.Location, } fake.FakeStore = map[string]map[string]network.RouteTable{} fake.FakeStore[cloud.RouteTableResourceGroup] = map[string]network.RouteTable{ cloud.RouteTableName: expectedTable, } err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"}) if err != nil { t.Errorf("unexpected error create if not exists route table: %v", err) t.FailNow() } if len(fake.Calls) != 1 || fake.Calls[0] != "Get" { t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls) } }
explode_data.jsonl/70931
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 383 }
[ 2830, 3393, 4021, 4899, 2556, 2679, 2623, 15575, 62, 15575, 1155, 353, 8840, 836, 8, 341, 1166, 726, 1669, 501, 52317, 4899, 21670, 2959, 741, 197, 12361, 1669, 609, 16055, 515, 197, 47501, 21670, 2959, 25, 12418, 345, 197, 66156, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDateGenerator(t *testing.T) { date1 := "2012-01-01 00:00:00" date2 := "2016-02-29 00:00:00" format := "2006-01-02 15:04:05" //"yyyy-MM-dd hh:mm:ss" generator, err := NewDateGenerator(date1, date2, format) CheckTrue(t, err == nil, "Valid inputs should not cause constructor to fail") min, err := time.Parse(format, date1) CheckTrue(t, err == nil, "Should not fail to parse valid date") max, err := time.Parse(format, date2) CheckTrue(t, err == nil, "Should not fail to parse valid date") // generate some dates data c := generator.Generate() actual := []string{string(<-c), string(<-c), string(<-c), string(<-c), string(<-c), string(<-c), string(<-c), string(<-c)} // check all generated dates are between two input dates for _, d := range actual { date, err := time.Parse(format, d) if err != nil { t.Error("Should not fail to parse valid date") } if date.Before(min) { t.Error("Generated date", date, "should not be before", min) } if date.After(max) { t.Error("Generated date", date, "should not be after", max) } } }
explode_data.jsonl/12226
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 407 }
[ 2830, 3393, 1916, 12561, 1155, 353, 8840, 836, 8, 341, 44086, 16, 1669, 330, 17, 15, 16, 17, 12, 15, 16, 12, 15, 16, 220, 15, 15, 25, 15, 15, 25, 15, 15, 698, 44086, 17, 1669, 330, 17, 15, 16, 21, 12, 15, 17, 12, 17, 24, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestDataUri(t *testing.T) { p := UGCPolicy() p.AllowURLSchemeWithCustomPolicy( "data", func(url *url.URL) (allowUrl bool) { // Allows PNG images only const prefix = "image/png;base64," if !strings.HasPrefix(url.Opaque, prefix) { return false } if _, err := base64.StdEncoding.DecodeString(url.Opaque[len(prefix):]); err != nil { return false } if url.RawQuery != "" || url.Fragment != "" { return false } return true }, ) tests := []test{ { in: `<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==">`, expected: `<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==">`, }, { in: `<img src="data:text/javascript;charset=utf-8,alert('hi');">`, expected: ``, }, { in: `<img src="data:image/png;base64,charset=utf-8,alert('hi');">`, expected: ``, }, { in: `<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4-_8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==">`, expected: ``, }, } for ii, test := range tests { out := p.Sanitize(test.in) if out != test.expected { t.Errorf( "test %d failed;\ninput : %s\noutput : %s\nexpected: %s", ii, test.in, out, test.expected, ) } } }
explode_data.jsonl/28795
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 770 }
[ 2830, 93200, 13899, 1155, 353, 8840, 836, 8, 1476, 3223, 1669, 547, 38, 7123, 8018, 741, 3223, 29081, 3144, 28906, 2354, 10268, 13825, 1006, 197, 197, 97115, 756, 197, 29244, 6522, 353, 1085, 20893, 8, 320, 7183, 2864, 1807, 8, 341, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestVersionRange(t *testing.T) { a,_ := model.ParseVersion("0.1.2") b,_ := model.ParseVersion("0.2.1") c,_ := model.ParseVersion("1.0.1") assert.True(t, b.MinorNewer(a), "B is newer then A") assert.False(t, b.MajorNewer(a), "B is not major newer then A") assert.False(t, b.MajorNewer(c), "B is not major newer then A") assert.True(t, c.MajorNewer(b), "C is major newer then b") }
explode_data.jsonl/58524
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 5637, 6046, 1155, 353, 8840, 836, 8, 341, 11323, 20399, 1669, 1614, 8937, 5637, 445, 15, 13, 16, 13, 17, 1138, 2233, 20399, 1669, 1614, 8937, 5637, 445, 15, 13, 17, 13, 16, 1138, 1444, 20399, 1669, 1614, 8937, 5637, 445,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseWordsStopEarly(t *testing.T) { t.Parallel() p := NewParser() r := strings.NewReader("a\nb\nc\n") parsed := 0 err := p.Words(r, func(w *Word) bool { parsed++ return w.Lit() != "b" }) if err != nil { t.Fatalf("Expected no error: %v", err) } if want := 2; parsed != want { t.Fatalf("wanted %d words parsed, got %d", want, parsed) } }
explode_data.jsonl/55131
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 14463, 23489, 10674, 41198, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 3223, 1669, 1532, 6570, 741, 7000, 1669, 9069, 68587, 445, 64, 1699, 65, 59, 1016, 1699, 1138, 3223, 18112, 1669, 220, 15, 198, 9859, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSkipsConfigMapWhenNotServiceNamespace(t *testing.T) { t.Parallel() tc := testCase{ ns: &core_v1.Namespace{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ Name: namespaceName, // no labels means no service }, }, test: func(t *testing.T, cntrlr *Controller, ctx *ctrl.ProcessContext, tc *testCase) { _, err := cntrlr.Process(ctx) require.NoError(t, err) actions := tc.mainFake.Actions() _, createsFound := findCreatedConfigMap(actions, namespaceName, apisynchronization.DefaultServiceMetadataConfigMapName) assert.False(t, createsFound) _, updatesFound := findUpdatedConfigMap(actions, namespaceName, apisynchronization.DefaultServiceMetadataConfigMapName) assert.False(t, updatesFound) _, relCreatesFound := findCreatedConfigMap(actions, namespaceName, releases.DefaultReleaseMetadataConfigMapName) assert.False(t, relCreatesFound) _, relUpdatesFound := findUpdatedConfigMap(actions, namespaceName, releases.DefaultReleaseMetadataConfigMapName) assert.False(t, relUpdatesFound) }, } tc.run(t) }
explode_data.jsonl/4265
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 385 }
[ 2830, 3393, 19290, 3077, 2648, 2227, 4498, 2623, 1860, 22699, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78255, 1669, 54452, 515, 197, 84041, 25, 609, 2153, 2273, 16, 46011, 515, 298, 27725, 12175, 25, 8823, 2273, 16, 10184,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRequestManualStopWhileStopped(t *testing.T) { // Requesting a manual stop while stopped shouldn't cause problems (issue #2138). withTestProcess("issue2138", t, func(p *proc.Target, fixture protest.Fixture) { resumed := make(chan struct{}) setFileBreakpoint(p, t, fixture.Source, 8) assertNoError(p.Continue(), t, "Continue() 1") p.ResumeNotify(resumed) go func() { <-resumed time.Sleep(1 * time.Second) p.RequestManualStop() }() t.Logf("at time.Sleep call") assertNoError(p.Continue(), t, "Continue() 2") t.Logf("manually stopped") p.RequestManualStop() p.RequestManualStop() p.RequestManualStop() resumed = make(chan struct{}) p.ResumeNotify(resumed) go func() { <-resumed time.Sleep(1 * time.Second) p.RequestManualStop() }() t.Logf("resuming sleep") assertNoError(p.Continue(), t, "Continue() 3") t.Logf("done") }) }
explode_data.jsonl/56350
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 359 }
[ 2830, 3393, 1900, 52092, 10674, 7983, 59803, 1155, 353, 8840, 836, 8, 341, 197, 322, 6145, 287, 264, 11376, 2936, 1393, 10497, 13133, 944, 5240, 5322, 320, 11159, 671, 17, 16, 18, 23, 4292, 46948, 2271, 7423, 445, 11159, 17, 16, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLinkBasePrefix(t *testing.T) { luteEngine := lute.New() luteEngine.RenderOptions.LinkBase = "http://domain.com/path/" luteEngine.RenderOptions.LinkPrefix = "prefix:" for _, test := range linkBasePrefixTests { html := luteEngine.MarkdownStr(test.name, test.from) if test.to != html { t.Fatalf("test case [%s] failed\nexpected\n\t%q\ngot\n\t%q\noriginal markdown text\n\t%q", test.name, test.to, html, test.from) } } }
explode_data.jsonl/73360
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 177 }
[ 2830, 3393, 3939, 3978, 14335, 1155, 353, 8840, 836, 8, 341, 8810, 1070, 4571, 1669, 326, 1070, 7121, 741, 8810, 1070, 4571, 27386, 3798, 22534, 3978, 284, 330, 1254, 1110, 12204, 905, 50976, 29555, 8810, 1070, 4571, 27386, 3798, 22534, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMaxAge(t *testing.T) { resetTest() now := time.Now() respHeaders := http.Header{} respHeaders.Set("date", now.Format(time.RFC1123)) respHeaders.Set("cache-control", "max-age=2") reqHeaders := http.Header{} if getFreshness(respHeaders, reqHeaders) != fresh { t.Fatal("freshness isn't fresh") } clock = &fakeClock{elapsed: 3 * time.Second} if getFreshness(respHeaders, reqHeaders) != stale { t.Fatal("freshness isn't stale") } }
explode_data.jsonl/77633
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 175 }
[ 2830, 3393, 5974, 16749, 1155, 353, 8840, 836, 8, 341, 70343, 2271, 741, 80922, 1669, 882, 13244, 741, 34653, 10574, 1669, 1758, 15753, 16094, 34653, 10574, 4202, 445, 1028, 497, 1431, 9978, 9730, 2013, 6754, 16, 16, 17, 18, 1171, 34653...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFileHelper_GetPathFromPathFileName_08(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".") expectedDir := fh.AdjustPathSlash(".") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' "+ "Error='%v'", commonDir, err.Error()) } if false != isEmpty { t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid "+ "path/file name, instead got: %v", expectedDir, result) } }
explode_data.jsonl/14466
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 262 }
[ 2830, 3393, 1703, 5511, 13614, 1820, 3830, 1820, 10903, 62, 15, 23, 1155, 353, 8840, 836, 8, 341, 220, 36075, 1669, 2887, 5511, 31483, 220, 4185, 6184, 1669, 36075, 17865, 4250, 1820, 88004, 445, 31225, 220, 3601, 6184, 1669, 36075, 178...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestOperatorSync_NoOp(t *testing.T) { cases := []struct { platform v1.PlatformType expectedNoop bool }{ { platform: v1.AWSPlatformType, expectedNoop: false, }, { platform: v1.LibvirtPlatformType, expectedNoop: false, }, { platform: v1.OpenStackPlatformType, expectedNoop: false, }, { platform: v1.AzurePlatformType, expectedNoop: false, }, { platform: v1.BareMetalPlatformType, expectedNoop: false, }, { platform: kubemarkPlatform, expectedNoop: false, }, { platform: v1.VSpherePlatformType, expectedNoop: true, }, { platform: v1.NonePlatformType, expectedNoop: true, }, { platform: "bad-platform", expectedNoop: true, }, } tempDir, err := ioutil.TempDir("", "TestOperatorSync") if err != nil { t.Fatalf("could not create the temp dir: %v", err) } defer os.RemoveAll(tempDir) images := Images{ MachineAPIOperator: "test-mao", } imagesAsJSON, err := json.Marshal(images) if err != nil { t.Fatalf("failed to marshal images: %v", err) } imagesFilePath := filepath.Join(tempDir, "test-images.json") if err := ioutil.WriteFile(imagesFilePath, imagesAsJSON, 0666); err != nil { t.Fatalf("could not write the images file: %v", err) } for _, tc := range cases { t.Run(string(tc.platform), func(t *testing.T) { infra := &v1.Infrastructure{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, Status: v1.InfrastructureStatus{ Platform: tc.platform, }, } stopCh := make(<-chan struct{}) optr := newFakeOperator(nil, []runtime.Object{infra}, stopCh) optr.imagesFile = imagesFilePath err = optr.sync("test-key") if !assert.NoError(t, err, "unexpected sync failure") { t.Fatal() } err = wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { _, err := optr.deployLister.Deployments(targetNamespace).Get(deploymentName) if err != nil { t.Logf("Failed to get %q deployment: %v", deploymentName, err) return false, nil } return true, nil }) if tc.expectedNoop != (err != nil) { t.Errorf("Failed to verify deployment %q with platform %s", deploymentName, tc.platform) } o, err := optr.osClient.ConfigV1().ClusterOperators().Get(clusterOperatorName, metav1.GetOptions{}) if !assert.NoError(t, err, "failed to get clusteroperator") { t.Fatal() } expectedConditions := map[v1.ClusterStatusConditionType]v1.ConditionStatus{ v1.OperatorAvailable: v1.ConditionTrue, v1.OperatorProgressing: v1.ConditionFalse, v1.OperatorDegraded: v1.ConditionFalse, } for _, c := range o.Status.Conditions { assert.Equal(t, expectedConditions[c.Type], c.Status, fmt.Sprintf("unexpected clusteroperator condition %s status", c.Type)) } }) } }
explode_data.jsonl/16339
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1214 }
[ 2830, 3393, 18461, 12154, 36989, 7125, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 197, 15734, 257, 348, 16, 51227, 929, 198, 197, 42400, 2753, 453, 1807, 198, 197, 59403, 197, 197, 515, 298, 197, 15734, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStringSliceContain(t *testing.T) { var testCases = []struct { givenSlice []string given string expect bool }{ { givenSlice: []string{ "Jimmy", "Gucci", "Kobe", "Jack", }, given: "Frank", expect: false, }, { givenSlice: []string{ "Jimmy", "Gucci", "Kobe", "Jack", }, given: "Kobe", expect: true, }, } for i, tc := range testCases { var ret = StringSliceContain(tc.givenSlice, tc.given) if ret != tc.expect { t.Errorf("case %v: expected %s, got %s", i+1, spew.Sprintf("%#v", tc.expect), spew.Sprintf("%#v", ret)) } } }
explode_data.jsonl/82186
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 318 }
[ 2830, 3393, 703, 33236, 46522, 1155, 353, 8840, 836, 8, 341, 2405, 1273, 37302, 284, 3056, 1235, 341, 197, 3174, 2071, 33236, 3056, 917, 198, 197, 3174, 2071, 414, 914, 198, 197, 24952, 257, 1807, 198, 197, 59403, 197, 197, 515, 298, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDial(t *testing.T) { if testing.Short() { t.Skip("skipping syslog test during -short") } f, err := Dial("", "", (LOG_LOCAL7|LOG_DEBUG)+1, "syslog_test") if f != nil { t.Fatalf("Should have trapped bad priority") } f, err = Dial("", "", -1, "syslog_test") if f != nil { t.Fatalf("Should have trapped bad priority") } l, err := Dial("", "", LOG_USER|LOG_ERR, "syslog_test") if err != nil { if err.Error() == "Unix syslog delivery error" { t.Skip("skipping: syslogd not running") } t.Fatalf("Dial() failed: %s", err) } l.Close() }
explode_data.jsonl/17087
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 235 }
[ 2830, 3393, 35, 530, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 4886, 5654, 74487, 1273, 2337, 481, 8676, 1138, 197, 532, 1166, 11, 1848, 1669, 66155, 19814, 7342, 320, 7243, 28399, 22, 91, 7243, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestAccAWSENI_computedIPs(t *testing.T) { var conf ec2.NetworkInterface resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, IDRefreshName: "aws_network_interface.bar", Providers: testAccProviders, CheckDestroy: testAccCheckAWSENIDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAWSENIConfigWithNoPrivateIPs, Check: resource.ComposeTestCheckFunc( testAccCheckAWSENIExists("aws_network_interface.bar", &conf), resource.TestCheckResourceAttr( "aws_network_interface.bar", "private_ips.#", "1"), ), }, }, }) }
explode_data.jsonl/30827
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 256 }
[ 2830, 3393, 14603, 14419, 925, 14912, 2965, 19292, 3298, 82, 1155, 353, 8840, 836, 8, 341, 2405, 2335, 11942, 17, 30149, 5051, 271, 50346, 8787, 1155, 11, 5101, 31363, 515, 197, 197, 4703, 3973, 25, 414, 2915, 368, 314, 1273, 14603, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLogFileCreation(t *testing.T) { if LogDir == "" || LogFile == "" { t.Error("The log file and directory should be set correctly") } _, err := os.Stat(LogFile) if err != nil { t.Error("The log file and directory should be created successfully") } }
explode_data.jsonl/54717
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 89 }
[ 2830, 3393, 98857, 32701, 1155, 353, 8840, 836, 8, 341, 743, 2835, 6184, 621, 1591, 1369, 2835, 1703, 621, 1591, 341, 197, 3244, 6141, 445, 785, 1487, 1034, 323, 6220, 1265, 387, 738, 12440, 1138, 197, 630, 197, 6878, 1848, 1669, 2643...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestInitRegistry(t *testing.T) { r := InitRegistry(Config{ AuthFilters: []*HandlerConfig{{Name: "DefaultAuth"}}, Decorators: []*HandlerConfig{{Name: "DefaultDecorator"}}, }) assert.NotNil(t, r) authHandlers := r.Lookup(Auth) assert.NotNil(t, authHandlers) filters, isAuthFilters := authHandlers.([]auth.Filter) assert.True(t, isAuthFilters) assert.Len(t, filters, 1) decorationHandlers := r.Lookup(Decoration) assert.NotNil(t, decorationHandlers) decorators, isDecorators := decorationHandlers.([]decoration.Decorator) assert.True(t, isDecorators) assert.Len(t, decorators, 1) }
explode_data.jsonl/48086
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 230 }
[ 2830, 3393, 3803, 15603, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 15690, 15603, 33687, 515, 197, 197, 5087, 28351, 25, 29838, 3050, 2648, 2979, 675, 25, 330, 3675, 5087, 48085, 197, 197, 35227, 2973, 25, 220, 29838, 3050, 2648, 2979, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTempRand3(t *testing.T) { seed := time.Now().UnixNano() r := rand.New(rand.NewSource(seed)) n := 1000 m := 100000 edges := make([][]int, m) for i := range edges { s := r.Intn(999) + 1 t := r.Intn(999) + 1 c := r.Intn(1000000000) edges[i] = []int{s, t, c} } temp(n, edges) t.Log("n", n) t.Log(seed) }
explode_data.jsonl/52290
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 163 }
[ 2830, 3393, 12151, 56124, 18, 1155, 353, 8840, 836, 8, 341, 197, 22602, 1669, 882, 13244, 1005, 55832, 83819, 741, 7000, 1669, 10382, 7121, 37595, 7121, 3608, 44163, 1171, 9038, 1669, 220, 16, 15, 15, 15, 198, 2109, 1669, 220, 16, 15,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUpdateGenColInTxn(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec(`create table t(a bigint, b bigint as (a+1));`) tk.MustExec(`begin;`) tk.MustExec(`insert into t(a) values(1);`) err := tk.ExecToErr(`update t set b=6 where b=2;`) require.Equal(t, "[planner:3105]The value specified for generated column 'b' in table 't' is not allowed.", err.Error()) tk.MustExec(`commit;`) tk.MustQuery(`select * from t;`).Check(testkit.Rows( `1 2`)) }
explode_data.jsonl/76259
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 231 }
[ 2830, 3393, 4289, 9967, 6127, 641, 31584, 77, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1273, 8226, 7251, 11571, 6093, 1155, 340, 16867, 4240, 741, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3553, 340, 3244, 74...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestShutdown(t *testing.T) { vdrs := validators.NewSet() benchlist := benchlist.NewNoBenchlist() tm := timeout.Manager{} err := tm.Initialize(&timer.AdaptiveTimeoutConfig{ InitialTimeout: time.Millisecond, MinimumTimeout: time.Millisecond, MaximumTimeout: 10 * time.Second, TimeoutCoefficient: 1.25, TimeoutHalflife: 5 * time.Minute, MetricsNamespace: "", Registerer: prometheus.NewRegistry(), }, benchlist) if err != nil { t.Fatal(err) } go tm.Dispatch() chainRouter := ChainRouter{} err = chainRouter.Initialize(ids.ShortEmpty, logging.NoLog{}, &tm, time.Hour, time.Second, ids.Set{}, nil, HealthConfig{}, "", prometheus.NewRegistry()) assert.NoError(t, err) engine := common.EngineTest{T: t} engine.Default(false) shutdownCalled := make(chan struct{}, 1) engine.ContextF = snow.DefaultContextTest engine.ShutdownF = func() error { shutdownCalled <- struct{}{}; return nil } handler := &Handler{} err = handler.Initialize( &engine, vdrs, nil, 1, DefaultMaxNonStakerPendingMsgs, DefaultStakerPortion, DefaultStakerPortion, "", prometheus.NewRegistry(), ) assert.NoError(t, err) go handler.Dispatch() chainRouter.AddChain(handler) chainRouter.Shutdown() ticker := time.NewTicker(20 * time.Millisecond) select { case <-ticker.C: t.Fatalf("Handler shutdown was not called or timed out after 20ms during chainRouter shutdown") case <-shutdownCalled: } select { case <-handler.closed: default: t.Fatal("handler shutdown but never closed its closing channel") } }
explode_data.jsonl/3077
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 587 }
[ 2830, 3393, 62004, 1155, 353, 8840, 836, 8, 341, 5195, 93246, 1669, 38588, 7121, 1649, 741, 2233, 19762, 1607, 1669, 13425, 1607, 7121, 2753, 33, 19762, 1607, 741, 3244, 76, 1669, 9632, 58298, 16094, 9859, 1669, 17333, 45829, 2099, 19278,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestJsonSamples(t *testing.T) { bidder, buildErr := Builder(openrtb_ext.BidderAMX, config.Adapter{ Endpoint: amxTestEndpoint}) if buildErr != nil { t.Fatalf("Builder returned unexpected error %v", buildErr) } adapterstest.RunJSONBidderTest(t, "amxtest", bidder) }
explode_data.jsonl/7169
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 109 }
[ 2830, 3393, 5014, 39571, 1155, 353, 8840, 836, 8, 341, 2233, 307, 1107, 11, 1936, 7747, 1669, 20626, 30981, 3342, 65, 9927, 1785, 307, 1107, 1402, 55, 11, 2193, 34190, 515, 197, 197, 27380, 25, 1079, 87, 2271, 27380, 8824, 743, 1936, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestFindPSPDirectoryTable(t *testing.T) { firmwareChunk := []byte{ 0x12, 0x00, 0x15, 0x00, 0x15, // some prefix } t.Run("no_psp_table_cookie", func(t *testing.T) { table, _, err := FindPSPDirectoryTable(firmwareChunk) if err == nil { t.Errorf("Expected an error when finding psp directory table in a broken firmware") } if table != nil { t.Errorf("Returned PSP Directory table is not nil") } }) t.Run("psp_table_cookie_found", func(t *testing.T) { table, r, err := FindPSPDirectoryTable(append(firmwareChunk, pspDirectoryTableDataChunk...)) if err != nil { t.Fatalf("Unexecpted error when finding PSP Directory table") } if r.Offset != uint64(len(firmwareChunk)) { t.Fatalf("PSP Directory Table address is incorrect: %d, expected: %d", r.Offset, uint64(len(firmwareChunk))) } if r.Length != uint64(len(pspDirectoryTableDataChunk)) { t.Errorf("PSP Directory Table size is incorrect: %d, expected: %d", r.Length, uint64(len(pspDirectoryTableDataChunk))) } if table == nil { t.Fatal("Returned PSP Directory table is nil") } }) }
explode_data.jsonl/22536
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 427 }
[ 2830, 3393, 9885, 47, 4592, 9310, 2556, 1155, 353, 8840, 836, 8, 341, 1166, 53260, 28304, 1669, 3056, 3782, 515, 197, 197, 15, 87, 16, 17, 11, 220, 15, 87, 15, 15, 11, 220, 15, 87, 16, 20, 11, 220, 15, 87, 15, 15, 11, 220, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGenerateNameReactor(t *testing.T) { tests := []struct { name string deployment *appsv1.Deployment expectedName string }{{ name: "resource with name", expectedName: "basic", deployment: &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "basic", }, }, }, { name: "resource with generatedName", expectedName: "fancy-00001", deployment: &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "fancy-", }, }, }, { name: "resource with name and generatedName", expectedName: "fancy-00002", deployment: &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "fancy-00002", GenerateName: "fancy-", }, }, }, { name: "broken resource with no names", expectedName: "", deployment: &appsv1.Deployment{}, }} for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { lastHandlerInvoked := false fake := &clientgotesting.Fake{} fake.AddReactor("*", "*", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { lastHandlerInvoked = true return false, nil, nil }) PrependGenerateNameReactor(fake) mutated := tc.deployment.DeepCopy() action := clientgotesting.NewCreateAction(deploymentsResource, "namespace", mutated) fake.Invokes(action, &appsv1.Deployment{}) if diff := cmp.Diff(tc.expectedName, mutated.GetName()); diff != "" { t.Error(diff) } if !lastHandlerInvoked { t.Error("GenreateNameReactor should not interfere with the fake's ReactionChain") } }) } }
explode_data.jsonl/31221
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 673 }
[ 2830, 3393, 31115, 675, 693, 5621, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 260, 914, 198, 197, 197, 82213, 256, 353, 676, 3492, 16, 34848, 39130, 198, 197, 42400, 675, 914, 198, 197, 15170, 515, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTx_ForEach_NoError(t *testing.T) { db := MustOpenDB() defer db.MustClose() if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } if err := b.Put([]byte("foo"), []byte("bar")); err != nil { t.Fatal(err) } if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { return nil }); err != nil { t.Fatal(err) } return nil }); err != nil { t.Fatal(err) } }
explode_data.jsonl/1698
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 217 }
[ 2830, 3393, 31584, 84368, 4854, 36989, 1454, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 15465, 5002, 3506, 741, 16867, 2927, 50463, 7925, 741, 743, 1848, 1669, 2927, 16689, 18552, 27301, 353, 52433, 81362, 8, 1465, 341, 197, 2233, 11, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMagic(t *testing.T) { t.Parallel() Convey("Magic", t, func() { Convey("write", func() { buf := &bytes.Buffer{} So(WriteMagic(buf), ShouldBeNil) So(buf.Bytes(), ShouldResemble, []byte{'S', 'A', 'R', 1}) }) Convey("read", func() { Convey("good", func() { Convey("matching version", func() { buf := bytes.NewReader([]byte{'S', 'A', 'R', 1}) v, err := ReadMagic(buf) So(err, ShouldBeNil) So(v, ShouldEqual, 1) }) Convey("older version", func() { buf := bytes.NewReader([]byte{'S', 'A', 'R', 0}) v, err := ReadMagic(buf) So(err, ShouldBeNil) So(v, ShouldEqual, 0) }) }) Convey("bad", func() { Convey("bad prefix", func() { buf := bytes.NewReader([]byte{'P', 'K', 3, 4}) _, err := ReadMagic(buf) So(err, ShouldErrLike, `bad magic: "PK\x03"`) }) Convey("newer version", func() { buf := bytes.NewReader([]byte{'S', 'A', 'R', 4}) _, err := ReadMagic(buf) So(err, ShouldErrLike, `bad version: 4 > 1`) }) Convey("short read", func() { buf := bytes.NewReader([]byte{'S', 'A'}) _, err := ReadMagic(buf) So(err, ShouldErrLike, io.ErrUnexpectedEOF) }) }) }) }) }
explode_data.jsonl/54308
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 600 }
[ 2830, 3393, 43538, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 93070, 5617, 445, 43538, 497, 259, 11, 2915, 368, 341, 197, 93070, 5617, 445, 4934, 497, 2915, 368, 341, 298, 26398, 1669, 609, 9651, 22622, 16094, 298, 76912, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewHTTPSigner(t *testing.T) { t.Run("E2E signature computation and verification", func(t *testing.T) { agent := newAgent(t) verMethod := newVerMethod(t, agent.KMS()) sign := zcapld.NewHTTPSigner( verMethod, "mockZCAP", action("test", nil), &zcapld.DIDSecrets{ Secrets: map[string]httpsignatures.Secrets{ "key": &zcapld2.AriesDIDKeySecrets{}, }, }, &zcapld.DIDSignatureHashAlgorithms{ KMS: agent.KMS(), Crypto: agent.Crypto(), Resolvers: []zcapld.DIDResolver{key.New()}, }, ) request := httptest.NewRequest(http.MethodGet, "/some/path", nil) headers, err := sign(request) require.NoError(t, err) require.NotEmpty(t, headers.Get("capability-invocation")) require.NotEmpty(t, headers.Get("signature")) request.Header = *headers hs := httpsignatures.NewHTTPSignatures(&zcapld.DIDSecrets{ Secrets: map[string]httpsignatures.Secrets{ "key": &zcapld2.AriesDIDKeySecrets{}, }, }) hs.SetSignatureHashAlgorithm(&zcapld.DIDSignatureHashAlgorithms{ KMS: agent.KMS(), Crypto: agent.Crypto(), Resolvers: []zcapld.DIDResolver{key.New()}, }) err = hs.Verify(request) require.NoError(t, err) }) t.Run("wraps action error", func(t *testing.T) { expected := errors.New("test error") agent := newAgent(t) verMethod := newVerMethod(t, agent.KMS()) sign := zcapld.NewHTTPSigner( verMethod, "mockZCAP", action("", expected), &zcapld.DIDSecrets{}, &zcapld.DIDSignatureHashAlgorithms{}, ) request := httptest.NewRequest(http.MethodGet, "/some/path", nil) _, err := sign(request) require.Error(t, err) require.Contains(t, err.Error(), expected.Error()) }) t.Run("wraps signature error", func(t *testing.T) { expected := errors.New("test error") agent := newAgent(t) verMethod := newVerMethod(t, agent.KMS()) sign := zcapld.NewHTTPSigner( verMethod, "mockZCAP", action("test", nil), &zcapld.DIDSecrets{ Secrets: map[string]httpsignatures.Secrets{ "key": &zcapld2.AriesDIDKeySecrets{}, }, }, &zcapld.DIDSignatureHashAlgorithms{ KMS: &mockkms.KeyManager{GetKeyErr: expected}, Crypto: agent.Crypto(), Resolvers: []zcapld.DIDResolver{key.New()}, }, ) request := httptest.NewRequest(http.MethodGet, "/some/path", nil) _, err := sign(request) require.Error(t, err) require.Contains(t, err.Error(), expected.Error()) }) }
explode_data.jsonl/71474
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1081 }
[ 2830, 3393, 3564, 9230, 7264, 261, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 36, 17, 36, 11957, 34447, 323, 22901, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 197, 8092, 1669, 501, 16810, 1155, 340, 197, 197, 423, 3523, 1669...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInvalidFlagCombinations(t *testing.T) { t.Parallel() tests := []ScriptFlags{ ScriptVerifyCleanStack, } // tx with almost empty scripts. tx := &wire.MsgTx{ Version: 1, TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ Hash: chainhash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, }), Index: 0, }, SignatureScript: []uint8{OP_NOP}, Sequence: 4294967295, }, }, TxOut: []*wire.TxOut{ { Value: 1000000000, PkScript: nil, }, }, LockTime: 0, } pkScript := []byte{OP_NOP} for i, test := range tests { _, err := NewEngine(pkScript, tx, 0, test, nil, nil, -1) if !IsErrorCode(err, ErrInvalidFlags) { t.Fatalf("TestInvalidFlagCombinations #%d unexpected "+ "error: %v", i, err) } } }
explode_data.jsonl/45211
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 551 }
[ 2830, 3393, 7928, 12135, 1092, 73629, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78216, 1669, 3056, 5910, 9195, 515, 197, 197, 5910, 32627, 27529, 4336, 345, 197, 630, 197, 322, 9854, 448, 4558, 4287, 19502, 624, 46237, 1669...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMultistoreRestore_Errors(t *testing.T) { store := newMultiStoreWithMixedMounts(dbm.NewMemDB()) testcases := map[string]struct { height uint64 format uint32 expectType error }{ "0 height": {0, snapshottypes.CurrentFormat, nil}, "0 format": {1, 0, snapshottypes.ErrUnknownFormat}, "unknown format": {1, 9, snapshottypes.ErrUnknownFormat}, } for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { err := store.Restore(tc.height, tc.format, nil, nil) require.Error(t, err) if tc.expectType != nil { assert.True(t, errors.Is(err, tc.expectType)) } }) } }
explode_data.jsonl/44876
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 272 }
[ 2830, 3393, 40404, 380, 460, 56284, 93623, 1087, 1155, 353, 8840, 836, 8, 341, 57279, 1669, 501, 20358, 6093, 2354, 86433, 16284, 82, 9791, 76, 7121, 18816, 3506, 12367, 18185, 23910, 1669, 2415, 14032, 60, 1235, 341, 197, 30500, 257, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestZip64EdgeCase(t *testing.T) { if testing.Short() { t.Skip("slow test; skipping") } // Test a zip file with uncompressed size 0xFFFFFFFF. // That's the magic marker for a 64-bit file, so even though // it fits in a 32-bit field we must use the 64-bit field. // Go 1.5 and earlier got this wrong, // writing an invalid zip file. const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part buf := testZip64(t, size) testZip64DirectoryRecordLength(buf, t) }
explode_data.jsonl/18863
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 169 }
[ 2830, 3393, 31047, 21, 19, 11656, 4207, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 35211, 1273, 26, 42659, 1138, 197, 532, 197, 322, 3393, 264, 10308, 1034, 448, 92382, 1379, 220, 15, 22620, 624, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMuxPlain(t *testing.T) { r := NewRouter() r.Get("/hi", HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte("bye")) })) r.NotFound(HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Response.SetStatusCode(404) rc.Write([]byte("nothing here")) })) ts := NewTestServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/hi", nil); body != "bye" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/nothing-here", nil); body != "nothing here" { t.Fatalf(body) } }
explode_data.jsonl/47941
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 235 }
[ 2830, 3393, 44, 2200, 26982, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 9523, 741, 7000, 2234, 4283, 6023, 497, 19954, 9626, 18552, 7502, 2266, 9328, 11, 10192, 353, 9349, 1254, 9659, 23684, 8, 341, 197, 30295, 4073, 10556, 3782, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReconcile_DeletionFailed(t *testing.T) { var ( e = &mocks.GoMockExecutor{} req = ctrl.Request{NamespacedName: types.NamespacedName{Namespace: ns, Name: lvgCR1.Name}} ) lvgToDell := lvgCR1 lvgToDell.ObjectMeta.DeletionTimestamp = &v1.Time{Time: time.Now()} lvgToDell.ObjectMeta.Finalizers = []string{lvgFinalizer} c := setup(t, node1ID, lvgToDell) c.lvmOps = lvm.NewLVM(e, testLogger) // expect that LogicalVolumeGroup still contains LV e.OnCommand(fmt.Sprintf(lvm.LVsInVGCmdTmpl, lvgCR1.Name)).Return("lv1", "", nil) res, err := c.Reconcile(req) assert.NotNil(t, err) assert.Contains(t, err.Error(), "there are LVs in LogicalVolumeGroup") assert.Equal(t, res, ctrl.Result{}) }
explode_data.jsonl/51721
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 306 }
[ 2830, 3393, 693, 40446, 457, 24597, 52625, 9408, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 7727, 256, 284, 609, 16712, 82, 67131, 11571, 25255, 16094, 197, 24395, 284, 23743, 9659, 90, 7980, 68552, 675, 25, 4494, 98932, 68552, 675,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTraceKVStoreSet(t *testing.T) { testCases := []struct { key []byte value []byte expectedOut string }{ { key: []byte{}, value: nil, expectedOut: "{\"operation\":\"write\",\"key\":\"\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", }, { key: kvPairs[0].Key, value: kvPairs[0].Value, expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", }, } for _, tc := range testCases { var buf bytes.Buffer store := newEmptyTraceKVStore(&buf) buf.Reset() store.Set(tc.key, tc.value) require.Equal(t, tc.expectedOut, buf.String()) } }
explode_data.jsonl/52004
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 336 }
[ 2830, 3393, 6550, 82707, 6093, 1649, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 23634, 260, 3056, 3782, 198, 197, 16309, 981, 3056, 3782, 198, 197, 42400, 2662, 914, 198, 197, 59403, 197, 197, 515, 298, 23...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPhaseCluster(t *testing.T) { // TODO fix tf for phase, and allow override on validation t.Skip("unable to test w/o allowing failed validation") runTestPhase(t, "lifecyclephases.example.com", "lifecycle_phases", "v1alpha2", true, 1, cloudup.PhaseCluster) }
explode_data.jsonl/17510
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 91 }
[ 2830, 3393, 30733, 28678, 1155, 353, 8840, 836, 8, 341, 197, 322, 5343, 5046, 6409, 369, 10262, 11, 323, 2138, 2812, 389, 10519, 198, 3244, 57776, 445, 45928, 311, 1273, 289, 20271, 10693, 4641, 10519, 1138, 56742, 2271, 30733, 1155, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAddDevice(t *testing.T) { testDevice := buildTestDeviceRequest() deviceModel := requests.AddDeviceReqToDeviceModels([]requests.AddDeviceRequest{testDevice})[0] expectedRequestId := ExampleUUID dic := mockDic() dbClientMock := &dbMock.DBClient{} valid := testDevice dbClientMock.On("DeviceServiceNameExists", deviceModel.ServiceName).Return(true, nil) dbClientMock.On("DeviceProfileNameExists", deviceModel.ProfileName).Return(true, nil) dbClientMock.On("AddDevice", deviceModel).Return(deviceModel, nil) dbClientMock.On("DeviceServiceByName", deviceModel.ServiceName).Return(models.DeviceService{BaseAddress: testBaseAddress}, nil) notFoundService := testDevice notFoundService.Device.ServiceName = "notFoundService" dbClientMock.On("DeviceServiceNameExists", notFoundService.Device.ServiceName).Return(false, nil) notFoundProfile := testDevice notFoundProfile.Device.ProfileName = "notFoundProfile" dbClientMock.On("DeviceProfileNameExists", notFoundProfile.Device.ProfileName).Return(false, nil) noName := testDevice noName.Device.Name = "" noAdminState := testDevice noAdminState.Device.AdminState = "" noOperatingState := testDevice noOperatingState.Device.OperatingState = "" invalidAdminState := testDevice invalidAdminState.Device.AdminState = "invalidAdminState" invalidOperatingState := testDevice invalidOperatingState.Device.OperatingState = "invalidOperatingState" noServiceName := testDevice noServiceName.Device.ServiceName = "" noProfileName := testDevice noProfileName.Device.ProfileName = "" noProtocols := testDevice noProtocols.Device.Protocols = nil emptyProtocols := testDevice emptyProtocols.Device.Protocols = map[string]dtos.ProtocolProperties{} dic.Update(di.ServiceConstructorMap{ container.DBClientInterfaceName: func(get di.Get) interface{} { return dbClientMock }, }) controller := NewDeviceController(dic) assert.NotNil(t, controller) tests := []struct { name string request []requests.AddDeviceRequest expectedStatusCode int }{ {"Valid", []requests.AddDeviceRequest{valid}, http.StatusCreated}, {"Invalid - not found service", []requests.AddDeviceRequest{notFoundService}, http.StatusNotFound}, {"Invalid - not found profile", []requests.AddDeviceRequest{notFoundProfile}, http.StatusNotFound}, {"Invalid - no name", []requests.AddDeviceRequest{noName}, http.StatusBadRequest}, {"Invalid - no adminState", []requests.AddDeviceRequest{noAdminState}, http.StatusBadRequest}, {"Invalid - no operatingState", []requests.AddDeviceRequest{noOperatingState}, http.StatusBadRequest}, {"Invalid - invalid adminState", []requests.AddDeviceRequest{invalidAdminState}, http.StatusBadRequest}, {"Invalid - invalid operatingState", []requests.AddDeviceRequest{invalidOperatingState}, http.StatusBadRequest}, {"Invalid - no service name", []requests.AddDeviceRequest{noServiceName}, http.StatusBadRequest}, {"Invalid - no profile name", []requests.AddDeviceRequest{noProfileName}, http.StatusBadRequest}, {"Invalid - no protocols", []requests.AddDeviceRequest{noProtocols}, http.StatusBadRequest}, {"Invalid - empty protocols", []requests.AddDeviceRequest{emptyProtocols}, http.StatusBadRequest}, } for _, testCase := range tests { t.Run(testCase.name, func(t *testing.T) { jsonData, err := json.Marshal(testCase.request) require.NoError(t, err) reader := strings.NewReader(string(jsonData)) req, err := http.NewRequest(http.MethodPost, common.ApiDeviceRoute, reader) require.NoError(t, err) // Act recorder := httptest.NewRecorder() handler := http.HandlerFunc(controller.AddDevice) handler.ServeHTTP(recorder, req) if testCase.expectedStatusCode == http.StatusBadRequest { var res commonDTO.BaseResponse err = json.Unmarshal(recorder.Body.Bytes(), &res) require.NoError(t, err) // Assert assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected") assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected") assert.Equal(t, testCase.expectedStatusCode, res.StatusCode, "BaseResponse status code not as expected") assert.NotEmpty(t, res.Message, "Message is empty") } else { var res []commonDTO.BaseResponse err = json.Unmarshal(recorder.Body.Bytes(), &res) require.NoError(t, err) // Assert assert.Equal(t, http.StatusMultiStatus, recorder.Result().StatusCode, "HTTP status code not as expected") assert.Equal(t, common.ApiVersion, res[0].ApiVersion, "API Version not as expected") if res[0].RequestId != "" { assert.Equal(t, expectedRequestId, res[0].RequestId, "RequestID not as expected") } assert.Equal(t, testCase.expectedStatusCode, res[0].StatusCode, "BaseResponse status code not as expected") } }) } }
explode_data.jsonl/9301
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1597 }
[ 2830, 3393, 2212, 6985, 1155, 353, 8840, 836, 8, 341, 18185, 6985, 1669, 1936, 2271, 6985, 1900, 741, 54719, 1712, 1669, 7388, 1904, 6985, 27234, 94517, 16969, 10556, 36242, 1904, 6985, 1900, 90, 1944, 6985, 5410, 58, 15, 921, 42400, 61...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLastLine(t *testing.T) { filename, err := fs.TempFilenameWithText(text) assert.Nil(t, err) defer os.Remove(filename) val, err := LastLine(filename) assert.Nil(t, err) assert.Equal(t, longLine, val) }
explode_data.jsonl/37835
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 86 }
[ 2830, 3393, 5842, 2460, 1155, 353, 8840, 836, 8, 341, 66434, 11, 1848, 1669, 8619, 65009, 20759, 2354, 1178, 7235, 340, 6948, 59678, 1155, 11, 1848, 340, 16867, 2643, 13270, 10961, 692, 19302, 11, 1848, 1669, 7996, 2460, 10961, 340, 694...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestExecutorsendFrameworkMessage(t *testing.T) { mockDriver := &MockExecutorDriver{} executor := NewTestKubernetesExecutor() executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) called := make(chan struct{}) mockDriver.On( "SendFrameworkMessage", "foo bar baz", ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() executor.sendFrameworkMessage(mockDriver, "foo bar baz") // guard against data race in mock driver between AssertExpectations and Called select { case <-called: // expected case <-time.After(5 * time.Second): t.Fatalf("expected call to SendFrameworkMessage") } mockDriver.AssertExpectations(t) }
explode_data.jsonl/59746
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 247 }
[ 2830, 3393, 10216, 9475, 408, 14837, 2052, 1155, 353, 8840, 836, 8, 341, 77333, 11349, 1669, 609, 11571, 25255, 11349, 16094, 67328, 4831, 1669, 1532, 2271, 42, 29827, 25255, 2822, 67328, 4831, 26849, 30389, 11349, 340, 67328, 4831, 19983, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQuery(t *testing.T) { query := query("//:skaffold_example.tar") expectedQuery := `kind('source file', deps('//:skaffold_example.tar')) union buildfiles('//:skaffold_example.tar')` if query != expectedQuery { t.Errorf("Expected [%s]. Got [%s]", expectedQuery, query) } }
explode_data.jsonl/61217
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 105 }
[ 2830, 3393, 2859, 1155, 353, 8840, 836, 8, 341, 27274, 1669, 3239, 29076, 25, 4886, 2649, 813, 39304, 28048, 5130, 42400, 2859, 1669, 1565, 15314, 492, 2427, 1034, 516, 48178, 53279, 25, 4886, 2649, 813, 39304, 28048, 3789, 11300, 1936, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPNCounter_DecrementAndGet(t *testing.T) { it.PNCounterTester(t, func(t *testing.T, pn *hz.PNCounter) { v, err := pn.DecrementAndGet(context.Background()) if err != nil { t.Fatal(err) } assert.Equal(t, int64(-1), v) v, err = pn.DecrementAndGet(context.Background()) if err != nil { t.Fatal(err) } assert.Equal(t, int64(-2), v) }) }
explode_data.jsonl/48302
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 171 }
[ 2830, 3393, 17896, 14099, 78668, 54655, 97726, 1155, 353, 8840, 836, 8, 341, 23374, 1069, 45, 14099, 58699, 1155, 11, 2915, 1155, 353, 8840, 836, 11, 43050, 353, 37259, 1069, 45, 14099, 8, 341, 197, 5195, 11, 1848, 1669, 43050, 22442, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMatch_Int64(t *testing.T) { var testData = []struct { rawYql string data map[string]interface{} out bool }{ { rawYql: `a=10`, data: map[string]interface{}{ "a": int64(9), }, out: false, }, { rawYql: `a=10`, data: map[string]interface{}{ "a": int64(10), }, out: true, }, { rawYql: `a>10`, data: map[string]interface{}{ "a": int64(10), }, out: false, }, { rawYql: `a>10`, data: map[string]interface{}{ "a": int64(11), }, out: true, }, { rawYql: `a>=10`, data: map[string]interface{}{ "a": int64(10), }, out: true, }, { rawYql: `a>=10`, data: map[string]interface{}{ "a": int64(11), }, out: true, }, { rawYql: `a>=10`, data: map[string]interface{}{ "a": int64(1), }, out: false, }, { rawYql: `a<10`, data: map[string]interface{}{ "a": int64(1), }, out: true, }, { rawYql: `a<10`, data: map[string]interface{}{ "a": int64(10), }, out: false, }, { rawYql: `a<10`, data: map[string]interface{}{ "a": int64(11), }, out: false, }, } ass := assert.New(t) for _, tc := range testData { ok, err := Match(tc.rawYql, tc.data) ass.NoError(err) ass.Equal(tc.out, ok, "rawYql=%s||data=%+v", tc.rawYql, tc.data) } }
explode_data.jsonl/65936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 753 }
[ 2830, 3393, 8331, 32054, 21, 19, 1155, 353, 8840, 836, 8, 341, 2405, 67348, 284, 3056, 1235, 341, 197, 76559, 56, 1470, 914, 198, 197, 8924, 256, 2415, 14032, 31344, 16094, 197, 13967, 262, 1807, 198, 197, 59403, 197, 197, 515, 298, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRewrittenVarsInErrors(t *testing.T) { _, errs := newTypeChecker().WithVarRewriter(rewriteVarsInRef(map[Var]Var{ "__local0__": "foo", "__local1__": "bar", })).CheckBody(nil, MustParseBody(`__local0__ = [[1]]; __local1__ = "bar"; __local0__[0][__local1__]`)) if len(errs) != 1 { t.Fatal("expected exactly one error but got:", len(errs)) } detail, ok := errs[0].Details.(*RefErrInvalidDetail) if !ok { t.Fatal("expected invalid ref detail but got:", errs[0].Details) } if !detail.Ref.Equal(MustParseRef("foo[0][bar]")) { t.Fatal("expected ref to be foo[0][bar] but got:", detail.Ref) } }
explode_data.jsonl/14573
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 261 }
[ 2830, 3393, 58465, 12153, 28305, 641, 13877, 1155, 353, 8840, 836, 8, 1476, 197, 6878, 70817, 1669, 501, 929, 35188, 1005, 2354, 3962, 58465, 2542, 7, 52473, 28305, 641, 3945, 9147, 58, 3962, 60, 3962, 515, 197, 197, 1, 563, 2438, 15,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_DeleteCycle(t *testing.T) { if movieFail || cycleFail || testMovie != nil || testCycle == nil || testCycle.Id < 1 { t.Skip("Skipping due to previous failure") } err := conn.DeleteCycle(testCycle.Id) if err != nil { t.Fatal(err) } testCycle = nil }
explode_data.jsonl/22025
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 105 }
[ 2830, 3393, 57418, 44820, 1155, 353, 8840, 836, 8, 341, 743, 5700, 19524, 1369, 10775, 19524, 1369, 1273, 19668, 961, 2092, 1369, 1273, 44820, 621, 2092, 1369, 1273, 44820, 6444, 366, 220, 16, 341, 197, 3244, 57776, 445, 85945, 4152, 31...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestWithAcquire(t *testing.T) { t.Parallel() migration := sqltest.New(t, sqltest.Options{ Force: *force, Path: "../../migrations", }) pool := migration.Setup(context.Background(), "") db := &DB{ Postgres: pool, } // Reuse the same connection for executing SQL commands. dbCtx, err := db.WithAcquire(context.Background()) if err != nil { t.Fatalf("unexpected DB.WithAcquire() error = %v", err) } defer db.Release(dbCtx) // Check if we can acquire a connection only for a given context. defer func() { want := "context already has a connection acquired" if r := recover(); r != want { t.Errorf("expected panic %v, got %v instead", want, r) } }() db.WithAcquire(dbCtx) }
explode_data.jsonl/25447
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 264 }
[ 2830, 3393, 2354, 11654, 984, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 2109, 5033, 1669, 5704, 1944, 7121, 1155, 11, 5704, 1944, 22179, 515, 197, 197, 18573, 25, 353, 8833, 345, 197, 69640, 25, 220, 10208, 76, 17824, 756,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDb_ClosedIsClosed(t *testing.T) { h := newDbHarness(t) db := h.db var iter, iter2 iterator.Iterator var snap *Snapshot func() { defer h.close() h.put("k", "v") h.getVal("k", "v") iter = db.NewIterator(nil, h.ro) iter.Seek([]byte("k")) testKeyVal(t, iter, "k->v") var err error snap, err = db.GetSnapshot() if err != nil { t.Fatal("GetSnapshot: got error: ", err) } h.getValr(snap, "k", "v") iter2 = snap.NewIterator(nil, h.ro) iter2.Seek([]byte("k")) testKeyVal(t, iter2, "k->v") h.put("foo", "v2") h.delete("foo") // closing DB iter.Release() iter2.Release() }() assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) _, err := db.Get([]byte("k"), h.ro) assertErr(t, err, true) if iter.Valid() { t.Errorf("iter.Valid should false") } assertErr(t, iter.Error(), false) testKeyVal(t, iter, "->") if iter.Seek([]byte("k")) { t.Errorf("iter.Seek should false") } assertErr(t, iter.Error(), true) assertErr(t, iter2.Error(), false) _, err = snap.Get([]byte("k"), h.ro) assertErr(t, err, true) _, err = db.GetSnapshot() assertErr(t, err, true) iter3 := db.NewIterator(nil, h.ro) assertErr(t, iter3.Error(), true) iter3 = snap.NewIterator(nil, h.ro) assertErr(t, iter3.Error(), true) assertErr(t, db.Delete([]byte("k"), h.wo), true) _, err = db.GetProperty("leveldb.stats") assertErr(t, err, true) _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) assertErr(t, err, true) assertErr(t, db.CompactRange(util.Range{}), true) assertErr(t, db.Close(), true) }
explode_data.jsonl/6034
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 718 }
[ 2830, 3393, 7994, 920, 9259, 3872, 26884, 1155, 353, 8840, 836, 8, 341, 9598, 1669, 501, 7994, 74248, 1155, 340, 20939, 1669, 305, 7076, 271, 2405, 5367, 11, 5367, 17, 15091, 40846, 198, 2405, 10658, 353, 15009, 198, 29244, 368, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPermissionTarget(t *testing.T) { initArtifactoryTest(t) params := services.NewPermissionTargetParams() params.Name = fmt.Sprintf("%s-%s", PermissionTargetNamePrefix, getRunId()) params.Repo = &services.PermissionTargetSection{} params.Repo.Repositories = []string{"ANY"} params.Repo.ExcludePatterns = []string{"dir/*"} params.Repo.Actions = &services.Actions{} params.Repo.Actions.Users = map[string][]string{ "anonymous": {"read"}, } params.Build = &services.PermissionTargetSection{} params.Build.Repositories = []string{"artifactory-build-info"} params.Build.Actions = &services.Actions{} params.Build.Actions.Users = map[string][]string{ "anonymous": {"annotate"}, } err := testsPermissionTargetService.Create(params) assert.NoError(t, err) // Fill in default values before validation params.Repo.IncludePatterns = []string{"**"} params.Build.Repositories = []string{"artifactory-build-info"} params.Build.IncludePatterns = []string{"**"} params.Build.ExcludePatterns = []string{} validatePermissionTarget(t, params) params.Repo.Actions.Users = nil params.Repo.Repositories = []string{"ANY REMOTE"} err = testsPermissionTargetService.Update(params) validatePermissionTarget(t, params) assert.NoError(t, err) err = testsPermissionTargetService.Delete(params.Name) assert.NoError(t, err) targetParams, err := getPermissionTarget(params.Name) assert.NoError(t, err) assert.Nil(t, targetParams) }
explode_data.jsonl/7215
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 496 }
[ 2830, 3393, 14966, 6397, 1155, 353, 8840, 836, 8, 341, 28248, 9286, 333, 2919, 2271, 1155, 340, 25856, 1669, 3516, 7121, 14966, 6397, 4870, 741, 25856, 2967, 284, 8879, 17305, 4430, 82, 11069, 82, 497, 18135, 6397, 675, 14335, 11, 633, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestComputedStyle(t *testing.T) { t.Parallel() tests := []struct { sel string by QueryOption }{ {`//*[@id="input1"]`, BySearch}, {`body > input[type="number"]:nth-child(1)`, ByQueryAll}, {`body > input[type="number"]:nth-child(1)`, ByQuery}, {`#input1`, ByID}, {`document.querySelector('#input1')`, ByJSPath}, } for i, test := range tests { test := test t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { t.Parallel() ctx, cancel := testAllocate(t, "js.html") defer cancel() var styles []*css.ComputedProperty if err := Run(ctx, ComputedStyle(test.sel, &styles, test.by)); err != nil { t.Fatalf("got error: %v", err) } for _, style := range styles { if style.Name == "background-color" { if style.Value != "rgb(255, 0, 0)" { t.Logf("expected style 'rgb(255, 0, 0)' got: %s", style.Value) } } } if err := Run(ctx, Click("#input1", ByID), ComputedStyle(test.sel, &styles, test.by), ); err != nil { t.Fatalf("got error: %v", err) } for _, style := range styles { if style.Name == "background-color" { if style.Value != "rgb(255, 255, 0)" { t.Fatalf("expected style 'rgb(255, 255, 0)' got: %s", style.Value) } } } }) } }
explode_data.jsonl/59487
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 581 }
[ 2830, 3393, 89381, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78216, 1669, 3056, 1235, 341, 197, 1903, 301, 914, 198, 197, 197, 1694, 220, 11361, 5341, 198, 197, 59403, 197, 197, 90, 63, 37318, 307, 428, 1355, 16, 1341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func Test_info_View(t *testing.T) { t.Parallel() type fields struct { name string fullname string info metrics.Int64Measure kvs map[metrics.Key]string } type want struct { want []*metrics.View } type test struct { name string fields fields want want checkFunc func(want, []*metrics.View) error beforeFunc func() afterFunc func() } defaultCheckFunc := func(w want, got []*metrics.View) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } tests := []test{ func() test { m := metrics.Int64(metrics.ValdOrg+"/test", "test", metrics.UnitDimensionless) kvs := map[metrics.Key]string{ metrics.MustNewKey("a"): "", } return test{ name: "always returns view", fields: fields{ name: "name", fullname: "fullname", info: *m, kvs: kvs, }, want: want{ want: []*metrics.View{ { Name: "fullname", Description: m.Description(), TagKeys: []metrics.Key{ metrics.MustNewKey("a"), }, Measure: m, Aggregation: metrics.LastValue(), }, }, }, checkFunc: func(w want, got []*metrics.View) error { if !reflect.DeepEqual(got[0].Name, w.want[0].Name) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } if !reflect.DeepEqual(got[0].Description, w.want[0].Description) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } if !reflect.DeepEqual(got[0].TagKeys, w.want[0].TagKeys) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } if !reflect.DeepEqual(got[0].Measure, w.want[0].Measure) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } if !reflect.DeepEqual(got[0].Aggregation.Type.String(), w.want[0].Aggregation.Type.String()) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil }, } }(), } for _, tc := range tests { test := tc t.Run(test.name, func(tt *testing.T) { tt.Parallel() defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) if test.beforeFunc != nil { test.beforeFunc() } if test.afterFunc != nil { defer test.afterFunc() } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } i := &info{ name: test.fields.name, fullname: test.fields.fullname, info: test.fields.info, kvs: test.fields.kvs, } got := i.View() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } }) } }
explode_data.jsonl/70580
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1378 }
[ 2830, 3393, 3109, 50711, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 13158, 5043, 2036, 341, 197, 11609, 257, 914, 198, 197, 94042, 606, 914, 198, 197, 27043, 257, 16734, 7371, 21, 19, 32236, 198, 197, 16463, 11562, 414, 241...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDeleteByQueryCollectionNull(t *testing.T) { k, _ := kuzzle.NewKuzzle(&internal.MockedConnection{}, nil) d := document.NewDocument(k) _, err := d.DeleteByQuery("index", "", json.RawMessage(`{"foo": "bar"}`), nil) assert.NotNil(t, err) }
explode_data.jsonl/75165
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 94 }
[ 2830, 3393, 6435, 1359, 2859, 6482, 3280, 1155, 353, 8840, 836, 8, 341, 16463, 11, 716, 1669, 595, 14945, 7121, 42, 14945, 2099, 10481, 24664, 291, 4526, 22655, 2092, 340, 2698, 1669, 2197, 7121, 7524, 5969, 692, 197, 6878, 1848, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestScript(t *testing.T) { tests := []struct { loc, scr string conf Confidence }{ {"und", "Latn", Low}, {"en-Latn", "Latn", Exact}, {"en", "Latn", High}, {"sr", "Cyrl", Low}, {"kk", "Cyrl", High}, {"kk-CN", "Arab", Low}, {"cmn", "Hans", Low}, {"ru", "Cyrl", High}, {"ru-RU", "Cyrl", High}, {"yue", "Hant", Low}, {"x-abc", "Zzzz", Low}, {"und-zyyy", "Zyyy", Exact}, } for i, tt := range tests { loc, _ := Parse(tt.loc) sc, conf := loc.Script() if sc.String() != tt.scr { t.Errorf("%d:%s: script was %s; want %s", i, tt.loc, sc, tt.scr) } if conf != tt.conf { t.Errorf("%d:%s: confidence was %d; want %d", i, tt.loc, conf, tt.conf) } } }
explode_data.jsonl/15834
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 359 }
[ 2830, 3393, 5910, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 71128, 11, 9224, 914, 198, 197, 67850, 257, 86324, 198, 197, 59403, 197, 197, 4913, 1241, 497, 330, 23140, 77, 497, 12041, 1583, 197, 197, 4913, 268, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDocsCommand(t *testing.T) { dir, err := ioutil.TempDir("", "docs") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) table := rifftesting.CommandTable{ { Name: "generate docs", Args: []string{cli.DirectoryFlagName, dir}, Prepare: func(t *testing.T, ctx context.Context, c *cli.Config) (context.Context, error) { // ensure the directory is empty os.RemoveAll(dir) return ctx, nil }, CleanUp: func(t *testing.T, ctx context.Context, c *cli.Config) error { files, err := ioutil.ReadDir(dir) if err != nil { t.Error(err) } // expect a single file because the docs command is currently the root command if expected, actual := 1, len(files); expected != actual { t.Errorf("expected %d file, found %d files", expected, actual) } else if expected, actual := "docs.md", files[0].Name(); expected != actual { t.Errorf("expected file name %q, found %q", expected, actual) } return os.RemoveAll(dir) }, }, } table.Run(t, commands.NewDocsCommand) }
explode_data.jsonl/78287
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 417 }
[ 2830, 3393, 63107, 4062, 1155, 353, 8840, 836, 8, 341, 48532, 11, 1848, 1669, 43144, 65009, 6184, 19814, 330, 14120, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 16867, 2643, 84427, 14161, 692, 26481, 1669, 36...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInterlevedExporters(t *testing.T) { // Disabling this test as it fails intermittently. // Refer to https://github.com/knative/pkg/issues/406 t.Skip() // First create a stackdriver exporter _, err := newMetricsExporter(&metricsConfig{ domain: servingDomain, component: testComponent, backendDestination: Stackdriver, stackdriverProjectID: testProj}, TestLogger(t)) if err != nil { t.Error(err) } expectNoPromSrv(t) // Then switch to prometheus exporter _, err = newMetricsExporter(&metricsConfig{ domain: servingDomain, component: testComponent, backendDestination: Prometheus, prometheusPort: 9090}, TestLogger(t)) if err != nil { t.Error(err) } expectPromSrv(t, ":9090") // Finally switch to stackdriver exporter _, err = newMetricsExporter(&metricsConfig{ domain: servingDomain, component: testComponent, backendDestination: Stackdriver, stackdriverProjectID: testProj}, TestLogger(t)) if err != nil { t.Error(err) } }
explode_data.jsonl/29323
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 430 }
[ 2830, 3393, 3306, 273, 2066, 16894, 388, 1155, 353, 8840, 836, 8, 341, 197, 322, 4093, 18768, 419, 1273, 438, 432, 14525, 59669, 4402, 624, 197, 322, 28634, 311, 3703, 1110, 5204, 905, 14109, 29738, 22523, 38745, 14, 19, 15, 21, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestNewTodo(t *testing.T) { createDto := TodoCreateDTO{} todo := NewTodo(createDto) assert.NotNil(t, todo) assert.Equal(t, 11, len(todo.ID)) assert.Equal(t, "", todo.Title) assert.Equal(t, "", todo.Description) assert.False(t, todo.Completed) }
explode_data.jsonl/61581
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 3564, 24176, 1155, 353, 8840, 836, 8, 341, 39263, 14797, 1669, 25404, 4021, 14923, 16094, 3244, 6004, 1669, 1532, 24176, 32602, 14797, 692, 6948, 93882, 1155, 11, 11804, 340, 6948, 12808, 1155, 11, 220, 16, 16, 11, 2422, 539...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParser(t *testing.T) { r := require.New(t) p := &provider{ apply: simpleComponentApplyForTest, } act := &mock.Action{} v, err := value.NewValue("", nil, "") r.NoError(err) err = p.ApplyComponent(nil, v, act) r.Equal(err.Error(), "var(path=value) not exist") v.FillObject(map[string]interface{}{}, "value") err = p.ApplyComponent(nil, v, act) r.NoError(err) output, err := v.LookupValue("output") r.NoError(err) outStr, err := output.String() r.NoError(err) r.Equal(outStr, `apiVersion: "v1" kind: "Pod" metadata: { name: "rss-site" labels: { app: "web" } } `) outputs, err := v.LookupValue("outputs") r.NoError(err) outsStr, err := outputs.String() r.NoError(err) r.Equal(outsStr, `service: { apiVersion: "v1" kind: "Service" metadata: { name: "service" labels: { "trait.oam.dev/resource": "service" } } } `) r.Equal(act.Phase, "Wait") testHealthy = true act = &mock.Action{} _, err = value.NewValue("", nil, "") r.NoError(err) r.Equal(act.Phase, "") }
explode_data.jsonl/8854
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 468 }
[ 2830, 3393, 6570, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1373, 7121, 1155, 340, 3223, 1669, 609, 19979, 515, 197, 197, 10280, 25, 4285, 2189, 28497, 2461, 2271, 345, 197, 532, 92699, 1669, 609, 16712, 11360, 16094, 5195, 11, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateOrganization(t *testing.T) { // successful creation of org assert.NoError(t, PrepareTestDatabase()) owner := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User) const newOrgName = "neworg" org := &User{ Name: newOrgName, } AssertNotExistsBean(t, &User{Name: newOrgName, Type: UserTypeOrganization}) assert.NoError(t, CreateOrganization(org, owner)) org = AssertExistsAndLoadBean(t, &User{Name: newOrgName, Type: UserTypeOrganization}).(*User) ownerTeam := AssertExistsAndLoadBean(t, &Team{Name: ownerTeamName, OrgID: org.ID}).(*Team) AssertExistsAndLoadBean(t, &TeamUser{UID: owner.ID, TeamID: ownerTeam.ID}) CheckConsistencyFor(t, &User{}, &Team{}) }
explode_data.jsonl/71054
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 263 }
[ 2830, 3393, 4021, 41574, 1155, 353, 8840, 836, 8, 341, 197, 322, 6849, 9688, 315, 1240, 198, 6948, 35699, 1155, 11, 31166, 2271, 5988, 12367, 197, 8118, 1669, 5319, 15575, 3036, 5879, 10437, 1155, 11, 609, 1474, 90, 915, 25, 220, 17, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClearRegistry(t *testing.T) { r := new(MeterRegistry) m1 := r.Get("first") m2 := r.Get("second") m1.Mark(10) m2.Mark(30) time.Sleep(2 * time.Second) r.Clear() r.ForEach(func(n string, _m *Meter) { t.Errorf("expected no meters at all, found a meter %s", n) }) if total := r.Get("first").Snapshot().Total; total != 0 { t.Errorf("expected first total to be 0, got %d", total) } if total := r.Get("second").Snapshot().Total; total != 0 { t.Errorf("expected second total to be 0, got %d", total) } }
explode_data.jsonl/21696
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 14008, 15603, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 501, 3189, 1404, 15603, 340, 2109, 16, 1669, 435, 2234, 445, 3896, 1138, 2109, 17, 1669, 435, 2234, 445, 5569, 5130, 2109, 16, 75888, 7, 16, 15, 340, 2109, 17, 7588...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDockerPSSuccess(t *testing.T) { composeMock, docker, _ := getComposeMocks() composeMock.On("Ps", mock.Anything, mock.Anything, mock.Anything).Return([]api.ContainerSummary{{Name: "test", State: "running", Publishers: api.PortPublishers{api.PortPublisher{PublishedPort: 8888}}}}, nil) err := docker.PS() assert.NoError(t, err) }
explode_data.jsonl/50546
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 115 }
[ 2830, 3393, 35, 13659, 47, 1220, 2557, 1155, 353, 8840, 836, 8, 341, 32810, 2900, 11571, 11, 26588, 11, 716, 1669, 633, 70492, 72577, 741, 32810, 2900, 11571, 8071, 445, 20420, 497, 7860, 13311, 1596, 11, 7860, 13311, 1596, 11, 7860, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInt8(t *testing.T) { if Int8(int8(0), int8(1)) { t.Fatalf("desc.TestInt8 failed: %v\n", greaterThanErr) } }
explode_data.jsonl/78907
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 60 }
[ 2830, 3393, 1072, 23, 1155, 353, 8840, 836, 8, 341, 743, 1333, 23, 1548, 23, 7, 15, 701, 526, 23, 7, 16, 593, 341, 197, 3244, 30762, 445, 8614, 8787, 1072, 23, 4641, 25, 1018, 85, 1699, 497, 7046, 26067, 7747, 340, 197, 532, 92 ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestCreatePassiveSellOffer(t *testing.T) { kp0 := newKeypair0() kp1 := newKeypair1() sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100)) createPassiveOffer := CreatePassiveSellOffer{ Selling: NativeAsset{}, Buying: CreditAsset{"ABCD", kp0.Address()}, Amount: "10", Price: "1.0", } received, err := newSignedTransaction( TransactionParams{ SourceAccount: &sourceAccount, IncrementSequenceNum: true, Operations: []Operation{&createPassiveOffer}, BaseFee: MinBaseFee, Timebounds: NewInfiniteTimeout(), }, network.TestNetworkPassphrase, kp1, ) assert.NoError(t, err) expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABAAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAF9eEAAAAAAQAAAAEAAAAAAAAAAdKHZH4AAABAIFA+zNVC+8dptptusks3Eh8SJ3jk+/6/rPxy7IFg4+gpqUotRma5b7QR/gjbnoAsL1tPU0WSYae2y8sJGhQqCg==" assert.Equal(t, expected, received, "Base 64 XDR should match") }
explode_data.jsonl/20681
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 505 }
[ 2830, 3393, 4021, 12187, 533, 68533, 39462, 1155, 353, 8840, 836, 8, 341, 16463, 79, 15, 1669, 501, 6608, 1082, 1310, 15, 741, 16463, 79, 16, 1669, 501, 6608, 1082, 1310, 16, 741, 47418, 7365, 1669, 1532, 16374, 7365, 5969, 79, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQueryPendingSendToEth(t *testing.T) { input := CreateTestEnv(t) ctx := input.Context var ( now = time.Now().UTC() mySender, _ = sdk.AccAddressFromBech32("cosmos1ahx7f8wyertuus9r20284ej0asrs085case3kn") myReceiver = "0xd041c41EA1bf0F006ADBb6d2c9ef9D425dE5eaD7" myTokenContractAddr = "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5" // Pickle allVouchers = sdk.NewCoins( types.NewERC20Token(99999, myTokenContractAddr).GravityCoin(), ) ) // mint some voucher first require.NoError(t, input.BankKeeper.MintCoins(ctx, types.ModuleName, allVouchers)) // set senders balance input.AccountKeeper.NewAccountWithAddress(ctx, mySender) require.NoError(t, input.BankKeeper.SetBalances(ctx, mySender, allVouchers)) // CREATE FIRST BATCH // ================== // add some TX to the pool for i, v := range []uint64{2, 3, 2, 1} { amount := types.NewERC20Token(uint64(i+100), myTokenContractAddr).GravityCoin() fee := types.NewERC20Token(v, myTokenContractAddr).GravityCoin() _, err := input.GravityKeeper.AddToOutgoingPool(ctx, mySender, myReceiver, amount, fee) require.NoError(t, err) // Should create: // 1: amount 100, fee 2 // 2: amount 101, fee 3 // 3: amount 102, fee 2 // 4: amount 104, fee 1 } // when ctx = ctx.WithBlockTime(now) // tx batch size is 2, so that some of them stay behind // Should contain 2 and 3 from above _, err := input.GravityKeeper.BuildOutgoingTXBatch(ctx, myTokenContractAddr, 2) require.NoError(t, err) // Should receive 1 and 4 unbatched, 2 and 3 batched in response response, err := queryPendingSendToEth(ctx, mySender.String(), input.GravityKeeper) require.NoError(t, err) expectedJSON := []byte(`{ "transfers_in_batches": [ { "id": "2", "sender": "cosmos1ahx7f8wyertuus9r20284ej0asrs085case3kn", "dest_address": "0xd041c41EA1bf0F006ADBb6d2c9ef9D425dE5eaD7", "erc20_token": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "101" }, "erc20_fee": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "3" } }, { "id": "3", "sender": "cosmos1ahx7f8wyertuus9r20284ej0asrs085case3kn", "dest_address": "0xd041c41EA1bf0F006ADBb6d2c9ef9D425dE5eaD7", "erc20_token": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "102" }, "erc20_fee": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "2" } } ], "unbatched_transfers": [ { "id": "1", "sender": "cosmos1ahx7f8wyertuus9r20284ej0asrs085case3kn", "dest_address": "0xd041c41EA1bf0F006ADBb6d2c9ef9D425dE5eaD7", "erc20_token": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "100" }, "erc20_fee": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "2" } }, { "id": "4", "sender": "cosmos1ahx7f8wyertuus9r20284ej0asrs085case3kn", "dest_address": "0xd041c41EA1bf0F006ADBb6d2c9ef9D425dE5eaD7", "erc20_token": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "103" }, "erc20_fee": { "contract": "0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5", "amount": "1" } } ]} `) assert.JSONEq(t, string(expectedJSON), string(response), "json is equal") }
explode_data.jsonl/8806
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1794 }
[ 2830, 3393, 2859, 32027, 11505, 1249, 65390, 1155, 353, 8840, 836, 8, 341, 22427, 1669, 4230, 2271, 14359, 1155, 340, 20985, 1669, 1946, 9328, 198, 2405, 2399, 197, 80922, 338, 284, 882, 13244, 1005, 21183, 741, 197, 13624, 20381, 11, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestURLsAreEqual(t *testing.T) { check := func(t *testing.T, u1Str, u2Str string, expectedSame bool) { t.Helper() u1, err := url.Parse(u1Str) if err != nil { t.Fatalf("Error parsing url %q: %v", u1Str, err) } u2, err := url.Parse(u2Str) if err != nil { t.Fatalf("Error parsing url %q: %v", u2Str, err) } same := urlsAreEqual(u1, u2) if expectedSame && !same { t.Fatalf("Expected %v and %v to be the same, they were not", u1, u2) } else if !expectedSame && same { t.Fatalf("Expected %v and %v to be different, they were not", u1, u2) } } check(t, "nats://localhost:4222", "nats://localhost:4222", true) check(t, "nats://ivan:pwd@localhost:4222", "nats://ivan:pwd@localhost:4222", true) check(t, "nats://ivan@localhost:4222", "nats://ivan@localhost:4222", true) check(t, "nats://ivan:@localhost:4222", "nats://ivan:@localhost:4222", true) check(t, "nats://host1:4222", "nats://host2:4222", false) }
explode_data.jsonl/17006
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 438 }
[ 2830, 3393, 3144, 82, 11526, 2993, 1155, 353, 8840, 836, 8, 972, 25157, 1669, 2915, 1155, 353, 8840, 836, 11, 575, 16, 2580, 11, 575, 17, 2580, 914, 11, 3601, 19198, 1807, 8, 972, 197, 3244, 69282, 3568, 197, 10676, 16, 11, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestProcess_SetPrefix(t *testing.T) { path := pathToPhpFpm process := phpfpm.NewProcess(path) process.SetDatadir(basepath + "/var") if want, have := basepath+"/var/phpfpm.pid", process.PidFile; want != have { t.Errorf("expected %#v, got %#v", want, have) } if want, have := basepath+"/var/phpfpm.error_log", process.ErrorLog; want != have { t.Errorf("expected %#v, got %#v", want, have) } if want, have := basepath+"/var/phpfpm.sock", process.Listen; want != have { t.Errorf("expected %#v, got %#v", want, have) } }
explode_data.jsonl/49280
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 224 }
[ 2830, 3393, 7423, 14812, 14335, 1155, 353, 8840, 836, 8, 341, 26781, 1669, 1815, 1249, 50144, 37, 5187, 198, 53314, 1669, 1319, 15897, 5187, 7121, 7423, 5581, 340, 53314, 4202, 45696, 41173, 12663, 2343, 488, 3521, 947, 1138, 743, 1366, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_provider_doTaskReporter(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() logger := NewMockLogger(ctrl) logger.EXPECT().Infof("begin do task report, taskID: %d", uint64(1)).Return() var ( bdl *bundle.Bundle dbClient *dbclient.Client ) monkey.PatchInstanceMethod(reflect.TypeOf(bdl), "PipelineCallback", func(_ *bundle.Bundle, _ apistructs.PipelineCallbackRequest, openapiAddr, token string) error { return nil }) monkey.PatchInstanceMethod(reflect.TypeOf(dbClient), "GetPipelineTask", func(_ *dbclient.Client, _ interface{}) (spec.PipelineTask, error) { return spec.PipelineTask{ID: 1}, nil }) defer monkey.UnpatchAll() type fields struct { bdl *bundle.Bundle dbClient *db.Client Log logs.Logger config *config } type args struct { ctx context.Context taskID uint64 } tests := []struct { name string fields fields args args wantErr bool }{ { name: "test doTaskReporter", fields: fields{ bdl: bdl, dbClient: &db.Client{Client: dbClient}, Log: logger, config: &config{}, }, args: args{ ctx: context.Background(), taskID: 1, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &provider{ bdl: tt.fields.bdl, dbClient: tt.fields.dbClient, Log: tt.fields.Log, Cfg: tt.fields.config, EdgeRegister: &edgepipeline_register.MockEdgeRegister{}, } if err := p.doTaskReporter(tt.args.ctx, tt.args.taskID); (err != nil) != tt.wantErr { t.Errorf("doTaskReporter() error = %v, wantErr %v", err, tt.wantErr) } }) } }
explode_data.jsonl/34301
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 776 }
[ 2830, 3393, 29518, 26309, 6262, 52766, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 741, 17060, 1669, 1532, 11571, 7395, 62100, 692, 17060, 22402, 7285, 1005, 1731, 69, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStrangeAssignmentSameLine(t *testing.T) { _, errs := ParseString(`x = 7 y = 6`) bvmUtils.AssertNow(t, len(errs) == 1, errs.Format()) }
explode_data.jsonl/49777
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 64 }
[ 2830, 3393, 91334, 41613, 19198, 2460, 1155, 353, 8840, 836, 8, 341, 197, 6878, 70817, 1669, 14775, 703, 5809, 87, 284, 220, 22, 220, 379, 284, 220, 21, 24183, 2233, 7338, 4209, 11711, 7039, 1155, 11, 2422, 3964, 82, 8, 621, 220, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestCallGetBasicInfo(t *testing.T) { client, err := testClient() if err != nil { t.Fatalf("Should have instantiated a new client with valid config and http client, but it threw error: %s", err.Error()) } _, err = client.CallGetBasicInfo() if err != nil { t.Fatalf("Could not call get basic info: %s", err.Error()) } }
explode_data.jsonl/26391
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 114 }
[ 2830, 3393, 7220, 1949, 15944, 1731, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 1273, 2959, 741, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 14996, 614, 54586, 264, 501, 2943, 448, 2697, 2193, 323, 1758, 2943, 11, 714,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestAppAdvsettings(t *testing.T) { dbfile := tests.Tempfile() defer os.Remove(dbfile) os.Setenv("HEKETI_EXECUTOR", "mock") defer os.Unsetenv("HEKETI_EXECUTOR") os.Setenv("HEKETI_DB_PATH", dbfile) defer os.Unsetenv("HEKETI_DB_PATH") conf := &GlusterFSConfig{ Executor: "crazyexec", Allocator: "simple", DBfile: "/path/to/nonexistent/heketi.db", BrickMaxSize: 1024, BrickMinSize: 4, BrickMaxNum: 33, } bmax, bmin, bnum := BrickMaxSize, BrickMinSize, BrickMaxNum defer func() { BrickMaxSize, BrickMinSize, BrickMaxNum = bmax, bmin, bnum }() app := NewApp(conf) defer app.Close() tests.Assert(t, app != nil) tests.Assert(t, app.conf.Executor == "mock") tests.Assert(t, app.conf.DBfile == dbfile) tests.Assert(t, BrickMaxNum == 33) tests.Assert(t, BrickMaxSize == 1*TB) tests.Assert(t, BrickMinSize == 4*GB) }
explode_data.jsonl/51869
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 388 }
[ 2830, 3393, 2164, 23453, 6511, 1155, 353, 8840, 836, 8, 1476, 20939, 1192, 1669, 7032, 65009, 1192, 741, 16867, 2643, 13270, 9791, 1192, 340, 25078, 4202, 3160, 445, 1799, 42, 1348, 40, 38235, 83321, 497, 330, 16712, 1138, 16867, 2643, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetDescSuccess(t *testing.T) { commandMock := new(commandDescMock) commandMock.On("GetCurrentBranch").Return("test-branch", nil) commandMock.On("GetConfigValue").Return("test-value", nil) d := &DescriptionImpl{ Command: commandMock, } result, err := d.GetDesc() assert := assert.New(t) assert.Equal(result, "test-value") assert.NoError(err) }
explode_data.jsonl/50026
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 134 }
[ 2830, 3393, 1949, 11065, 7188, 1155, 353, 8840, 836, 8, 341, 45566, 11571, 1669, 501, 15143, 11065, 11571, 340, 45566, 11571, 8071, 445, 62981, 18197, 1827, 5598, 445, 1944, 12, 17940, 497, 2092, 340, 45566, 11571, 8071, 445, 1949, 2648, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLocalhostMcrouter(t *testing.T) { if !setup(t, testMcrouterServer) { return } c := New(testMcrouterServer) c.GetMultiSupported = false checkErr := func(err error, format string, args ...interface{}) { if err != nil { t.Fatalf(format, args...) } } t.Run("GetMulti", func(t *testing.T) { _ = c.Add(&Item{Key: "foo", Value: []byte("fooval")}) _ = c.Add(&Item{Key: "bar", Value: []byte("barval")}) m, err := c.GetMulti([]string{"foo", "bar"}) checkErr(err, "GetMulti: %v", err) if g, e := len(m), 2; g != e { t.Errorf("GetMulti: got len(map) = %d, want = %d", g, e) } if _, ok := m["foo"]; !ok { t.Fatalf("GetMulti: didn't get key 'foo'") } if _, ok := m["bar"]; !ok { t.Fatalf("GetMulti: didn't get key 'bar'") } if g, e := string(m["foo"].Value), "fooval"; g != e { t.Errorf("GetMulti: foo: got %q, want %q", g, e) } if g, e := string(m["bar"].Value), "barval"; g != e { t.Errorf("GetMulti: bar: got %q, want %q", g, e) } }) }
explode_data.jsonl/81192
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 460 }
[ 2830, 3393, 7319, 3790, 44, 5082, 2676, 1155, 353, 8840, 836, 8, 341, 743, 753, 15188, 1155, 11, 1273, 44, 5082, 2676, 5475, 8, 341, 197, 853, 198, 197, 532, 1444, 1669, 1532, 8623, 44, 5082, 2676, 5475, 340, 1444, 2234, 20358, 3463...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestApplicationContext_RegisterMethodBean(t *testing.T) { t.Run("method bean", func(t *testing.T) { c, ch := container() c.Property("server.version", "1.0.0") parent := c.Object(new(Server)) bd := c.Provide((*Server).Consumer, parent.ID()) err := c.Refresh() assert.Nil(t, err) assert.Equal(t, bd.BeanName(), "Consumer") p := <-ch var s *Server err = p.BeanRegistry().Get(&s) assert.Nil(t, err) assert.Equal(t, s.Version, "1.0.0") s.Version = "2.0.0" var consumer *Consumer err = p.BeanRegistry().Get(&consumer) assert.Nil(t, err) assert.Equal(t, consumer.s.Version, "2.0.0") }) t.Run("method bean arg", func(t *testing.T) { c, ch := container() c.Property("server.version", "1.0.0") parent := c.Object(new(Server)) // c.Bean((*Server).ConsumerArg, "", "${i:=9}") c.Provide((*Server).ConsumerArg, parent.ID(), "${i:=9}") err := c.Refresh() assert.Nil(t, err) p := <-ch var s *Server err = p.BeanRegistry().Get(&s) assert.Nil(t, err) assert.Equal(t, s.Version, "1.0.0") s.Version = "2.0.0" var consumer *Consumer err = p.BeanRegistry().Get(&consumer) assert.Nil(t, err) assert.Equal(t, consumer.s.Version, "2.0.0") }) t.Run("method bean wire to other bean", func(t *testing.T) { c, ch := container() c.Property("server.version", "1.0.0") parent := c.Provide(NewServerInterface) // c.Provide(ServerInterface.Consumer, "").DependsOn("ServerInterface") c.Provide(ServerInterface.Consumer, parent.ID()).DependsOn("ServerInterface") c.Object(new(Service)) err := c.Refresh() assert.Nil(t, err) p := <-ch var si ServerInterface err = p.BeanRegistry().Get(&si) assert.Nil(t, err) s := si.(*Server) assert.Equal(t, s.Version, "1.0.0") s.Version = "2.0.0" var consumer *Consumer err = p.BeanRegistry().Get(&consumer) assert.Nil(t, err) assert.Equal(t, consumer.s.Version, "2.0.0") }) t.Run("circle autowire", func(t *testing.T) { okCount := 0 errCount := 0 for i := 0; i < 20; i++ { // 不要排序 func() { defer func() { if err := recover(); err != nil { errCount++ var v string switch e := err.(type) { case error: v = e.Error() case string: v = e } if !strings.Contains(v, "found circle autowire") { panic(errors.New("test error")) } } else { okCount++ } }() c := gs.New() c.Property("server.version", "1.0.0") parent := c.Object(new(Server)).DependsOn("Service") c.Provide((*Server).Consumer, parent.ID()).DependsOn("Server") c.Object(new(Service)) err := c.Refresh() util.Panic(err).When(err != nil) }() } fmt.Printf("ok:%d err:%d\n", okCount, errCount) }) t.Run("method bean autowire", func(t *testing.T) { c, ch := container() c.Property("server.version", "1.0.0") c.Object(new(Server)) err := c.Refresh() assert.Nil(t, err) p := <-ch var s *Server err = p.BeanRegistry().Get(&s) assert.Nil(t, err) assert.Equal(t, s.Version, "1.0.0") }) t.Run("method bean selector type", func(t *testing.T) { c, ch := container() c.Property("server.version", "1.0.0") c.Object(new(Server)) c.Provide(func(s *Server) *Consumer { return s.Consumer() }, (*Server)(nil)) err := c.Refresh() assert.Nil(t, err) p := <-ch var s *Server err = p.BeanRegistry().Get(&s) assert.Nil(t, err) assert.Equal(t, s.Version, "1.0.0") s.Version = "2.0.0" var consumer *Consumer err = p.BeanRegistry().Get(&consumer) assert.Nil(t, err) assert.Equal(t, consumer.s.Version, "2.0.0") }) t.Run("method bean selector type error", func(t *testing.T) { c := gs.New() c.Property("server.version", "1.0.0") c.Object(new(Server)) c.Provide(func(s *Server) *Consumer { return s.Consumer() }, (*int)(nil)) err := c.Refresh() assert.Error(t, err, "can't find bean, bean:\"int:\" type:\"\\*gs_test.Server\"") }) t.Run("method bean selector beanId", func(t *testing.T) { c, ch := container() c.Property("server.version", "1.0.0") c.Object(new(Server)) c.Provide(func(s *Server) *Consumer { return s.Consumer() }, "Server") err := c.Refresh() assert.Nil(t, err) p := <-ch var s *Server err = p.BeanRegistry().Get(&s) assert.Nil(t, err) assert.Equal(t, s.Version, "1.0.0") s.Version = "2.0.0" var consumer *Consumer err = p.BeanRegistry().Get(&consumer) assert.Nil(t, err) assert.Equal(t, consumer.s.Version, "2.0.0") }) t.Run("method bean selector beanId error", func(t *testing.T) { c := gs.New() c.Property("server.version", "1.0.0") c.Object(new(Server)) c.Provide(func(s *Server) *Consumer { return s.Consumer() }, "NULL") err := c.Refresh() assert.Error(t, err, "can't find bean, bean:\"NULL\" type:\"\\*gs_test.Server\"") }) }
explode_data.jsonl/17416
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2168 }
[ 2830, 3393, 19736, 73124, 3523, 10437, 1155, 353, 8840, 836, 8, 1476, 3244, 16708, 445, 4393, 20427, 497, 2915, 1155, 353, 8840, 836, 8, 1476, 197, 1444, 11, 521, 1669, 5476, 741, 197, 1444, 15727, 445, 4030, 19484, 497, 330, 16, 13, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContainsKey(t *testing.T) { s := []string{"a", "b", "c", "z"} assert.False(t, containsKey(s, "h"), "unexpected result") assert.True(t, containsKey(s, "b"), "unexpected result") }
explode_data.jsonl/9031
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 75 }
[ 2830, 3393, 23805, 1592, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 3056, 917, 4913, 64, 497, 330, 65, 497, 330, 66, 497, 330, 89, 16707, 6948, 50757, 1155, 11, 5610, 1592, 1141, 11, 330, 71, 3975, 330, 53859, 1102, 1138, 6948, 32443...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestFormatResourceName(t *testing.T) { tests := []struct { kind schema.GroupKind name string want string }{ {schema.GroupKind{}, "", ""}, {schema.GroupKind{}, "name", "name"}, {schema.GroupKind{Kind: "Kind"}, "", "kind/"}, // should not happen in practice {schema.GroupKind{Kind: "Kind"}, "name", "kind/name"}, {schema.GroupKind{Group: "group", Kind: "Kind"}, "name", "kind.group/name"}, } for _, tt := range tests { if got := formatResourceName(tt.kind, tt.name, true); got != tt.want { t.Errorf("formatResourceName(%q, %q) = %q, want %q", tt.kind, tt.name, got, tt.want) } } }
explode_data.jsonl/21580
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 248 }
[ 2830, 3393, 4061, 4783, 675, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 197, 15314, 10802, 5407, 10629, 198, 197, 11609, 914, 198, 197, 50780, 914, 198, 197, 59403, 197, 197, 90, 17349, 5407, 10629, 22655, 7342, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestParseVpsSpsPps(t *testing.T) { s := "a=fmtp:96 sprop-vps=QAEMAf//AWAAAAMAkAAAAwAAAwA/ugJA; sprop-sps=QgEBAWAAAAMAkAAAAwAAAwA/oAUCAXHy5bpKTC8BAQAAAwABAAADAA8I; sprop-pps=RAHAc8GJ" f, err := ParseAFmtPBase(s) assert.Equal(t, nil, err) vps, sps, pps, err := ParseVpsSpsPps(&f) assert.Equal(t, nil, err) nazalog.Debugf("%s", hex.Dump(vps)) nazalog.Debugf("%s", hex.Dump(sps)) nazalog.Debugf("%s", hex.Dump(pps)) }
explode_data.jsonl/55596
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 233 }
[ 2830, 3393, 14463, 53, 1690, 50, 1690, 47, 1690, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 330, 64, 18111, 76, 790, 25, 24, 21, 274, 2674, 8273, 1690, 28, 47522, 48610, 69, 322, 14419, 50107, 38100, 74, 25699, 86, 6029, 22600, 32, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGenerateSnapshotErrors(t *testing.T) { conf, cleanup := testConfig(t) defer cleanup() provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{}) defer func() { provider.Close() }() // create a ledger _, genesisBlk := testutil.NewBlockGenerator(t, "testLedgerid", false) lgr, err := provider.Create(genesisBlk) require.NoError(t, err) kvlgr := lgr.(*kvLedger) closeAndReopenLedgerProvider := func() { provider.Close() provider = testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{}) lgr, err = provider.Open("testLedgerid") require.NoError(t, err) kvlgr = lgr.(*kvLedger) } t.Run("snapshot tmp dir creation returns error", func(t *testing.T) { closeAndReopenLedgerProvider() require.NoError(t, os.RemoveAll( // remove the base tempdir so that the snapshot tempdir creation fails InProgressSnapshotsPath(conf.SnapshotsConfig.RootDir), )) err := kvlgr.generateSnapshot() require.Error(t, err) require.Contains(t, err.Error(), "error while creating temp dir") }) t.Run("block store returns error", func(t *testing.T) { closeAndReopenLedgerProvider() provider.blkStoreProvider.Close() // close the blockstore provider to trigger the error err := kvlgr.generateSnapshot() require.Error(t, err) errStackTrace := fmt.Sprintf("%+v", err) require.Contains(t, errStackTrace, "internal leveldb error while obtaining db iterator") require.Contains(t, errStackTrace, "fabric/common/ledger/blkstorage/blockindex.go") }) t.Run("config history mgr returns error", func(t *testing.T) { closeAndReopenLedgerProvider() provider.configHistoryMgr.Close() // close the configHistoryMgr to trigger the error err := kvlgr.generateSnapshot() require.Error(t, err) errStackTrace := fmt.Sprintf("%+v", err) require.Contains(t, errStackTrace, "internal leveldb error while obtaining db iterator") require.Contains(t, errStackTrace, "fabric/core/ledger/confighistory/mgr.go") }) t.Run("statedb returns error", func(t *testing.T) { closeAndReopenLedgerProvider() provider.dbProvider.Close() // close the dbProvider to trigger the error err := kvlgr.generateSnapshot() require.Error(t, err) errStackTrace := fmt.Sprintf("%+v", err) require.Contains(t, errStackTrace, "internal leveldb error while obtaining db iterator") require.Contains(t, errStackTrace, "statedb/stateleveldb/stateleveldb.go") }) t.Run("renaming to the final snapshot dir returns error", func(t *testing.T) { closeAndReopenLedgerProvider() snapshotFinalDir := SnapshotDirForLedgerHeight(conf.SnapshotsConfig.RootDir, "testLedgerid", 1) require.NoError(t, os.MkdirAll(snapshotFinalDir, 0744)) defer os.RemoveAll(snapshotFinalDir) require.NoError(t, ioutil.WriteFile( // make a non-empty snapshotFinalDir to trigger failure on rename filepath.Join(snapshotFinalDir, "dummyFile"), []byte("dummy file"), 0444), ) err := kvlgr.generateSnapshot() require.Contains(t, err.Error(), "error while renaming dir") }) }
explode_data.jsonl/74143
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1081 }
[ 2830, 3393, 31115, 15009, 13877, 1155, 353, 8840, 836, 8, 341, 67850, 11, 21290, 1669, 1273, 2648, 1155, 340, 16867, 21290, 741, 197, 19979, 1669, 1273, 1314, 3564, 5179, 29879, 11, 259, 11, 609, 16712, 34848, 1989, 291, 18837, 1851, 17...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRegenerateTeamInviteId(t *testing.T) { th := Setup(t) defer th.TearDown() Client := th.Client team := &model.Team{DisplayName: "Name", Description: "Some description", CompanyName: "Some company name", AllowOpenInvite: false, InviteId: "inviteid0", Name: "z-z-" + model.NewRandomTeamName() + "a", Email: "success+" + model.NewId() + "@simulator.amazonses.com", Type: model.TEAM_OPEN} team, _ = Client.CreateTeam(team) assert.NotEqual(t, team.InviteId, "") assert.NotEqual(t, team.InviteId, "inviteid0") rteam, resp := Client.RegenerateTeamInviteId(team.Id) CheckNoError(t, resp) assert.NotEqual(t, team.InviteId, rteam.InviteId) assert.NotEqual(t, team.InviteId, "") }
explode_data.jsonl/70710
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 267 }
[ 2830, 3393, 3477, 13220, 14597, 93540, 764, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 340, 16867, 270, 836, 682, 4454, 741, 71724, 1669, 270, 11716, 271, 197, 9196, 1669, 609, 2528, 65842, 90, 26456, 25, 330, 675, 497, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGarbageCollectorConstruction(t *testing.T) { config := &restclient.Config{} config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()} tweakableRM := meta.NewDefaultRESTMapper(nil) rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, legacyscheme.Registry.RESTMapper()}} metaOnlyClientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc) config.ContentConfig.NegotiatedSerializer = nil clientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc) podResource := map[schema.GroupVersionResource]struct{}{ {Version: "v1", Resource: "pods"}: {}, } twoResources := map[schema.GroupVersionResource]struct{}{ {Version: "v1", Resource: "pods"}: {}, {Group: "tpr.io", Version: "v1", Resource: "unknown"}: {}, } client := fake.NewSimpleClientset() sharedInformers := informers.NewSharedInformerFactory(client, 0) // No monitor will be constructed for the non-core resource, but the GC // construction will not fail. alwaysStarted := make(chan struct{}) close(alwaysStarted) gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted) if err != nil { t.Fatal(err) } assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors)) // Make sure resource monitor syncing creates and stops resource monitors. tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil) err = gc.resyncMonitors(twoResources) if err != nil { t.Errorf("Failed adding a monitor: %v", err) } assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors)) err = gc.resyncMonitors(podResource) if err != nil { t.Errorf("Failed removing a monitor: %v", err) } assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors)) // Make sure the syncing mechanism also works after Run() has been called stopCh := make(chan struct{}) defer close(stopCh) go gc.Run(1, stopCh) err = gc.resyncMonitors(twoResources) if err != nil { t.Errorf("Failed adding a monitor: %v", err) } assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors)) err = gc.resyncMonitors(podResource) if err != nil { t.Errorf("Failed removing a monitor: %v", err) } assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors)) }
explode_data.jsonl/61998
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 853 }
[ 2830, 3393, 43930, 20652, 53694, 50818, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 609, 3927, 2972, 10753, 16094, 25873, 12614, 2648, 2067, 65978, 10029, 13909, 284, 21759, 89592, 36913, 4153, 90, 36913, 4153, 25, 8823, 3243, 7121, 14610, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestSetOptionsSetFlags(t *testing.T) { kp0 := newKeypair0() sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484318)) setOptions := SetOptions{ SetFlags: []AccountFlag{AuthRequired, AuthRevocable}, } received, err := newSignedTransaction( TransactionParams{ SourceAccount: &sourceAccount, IncrementSequenceNum: true, Operations: []Operation{&setOptions}, BaseFee: MinBaseFee, Timebounds: NewInfiniteTimeout(), }, network.TestNetworkPassphrase, kp0, ) assert.NoError(t, err) expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAHwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeoucsUAAABAn2E6acbadQNs0m2+lc5DpMpPQ/+8Y2l0cUfmSKoHSt5VpB0EZI8lQY9smiOtSd7a3aewrMCJqbY5Iy6a7dFiDg==" assert.Equal(t, expected, received, "Base 64 XDR should match") }
explode_data.jsonl/20665
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 415 }
[ 2830, 3393, 1649, 3798, 1649, 9195, 1155, 353, 8840, 836, 8, 341, 16463, 79, 15, 1669, 501, 6608, 1082, 1310, 15, 741, 47418, 7365, 1669, 1532, 16374, 7365, 5969, 79, 15, 26979, 1507, 526, 21, 19, 7, 19, 15, 18, 23, 20, 20, 22, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewResolverWithReadWriteCache(t *testing.T) { r := httptransport.NewResolver(httptransport.Config{ CacheResolutions: true, }) ar, ok := r.(resolver.AddressResolver) if !ok { t.Fatal("not the resolver we expected") } ewr, ok := ar.Resolver.(resolver.ErrorWrapperResolver) if !ok { t.Fatal("not the resolver we expected") } cr, ok := ewr.Resolver.(*resolver.CacheResolver) if !ok { t.Fatal("not the resolver we expected") } if cr.ReadOnly != false { t.Fatal("expected readwrite cache here") } _, ok = cr.Resolver.(resolver.SystemResolver) if !ok { t.Fatal("not the resolver we expected") } }
explode_data.jsonl/78379
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 248 }
[ 2830, 3393, 3564, 18190, 2354, 58610, 8233, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1758, 26445, 7121, 18190, 19886, 26445, 10753, 515, 197, 6258, 1777, 1061, 20201, 25, 830, 345, 197, 3518, 69340, 11, 5394, 1669, 435, 12832, 48943, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestSleep(t *testing.T) { testcases := map[string]struct { Arg time.Duration CancelAfter time.Duration Expected time.Duration Hook func() }{ "negative": {Arg: -1 * time.Hour, Expected: 0}, "zero": {Arg: 0, Expected: 0}, "canceled": {Arg: 1 * time.Hour, CancelAfter: 1 * time.Second, Expected: 1 * time.Second}, "normal": {Arg: 1 * time.Second, Expected: 1 * time.Second}, "late-cancel": {Arg: 1 * time.Second, CancelAfter: 1 * time.Hour, Expected: 1 * time.Second}, "race": {Arg: 11 * (time.Second / 10), CancelAfter: 1 * time.Second, Hook: func() { time.Sleep(time.Second / 2) }, Expected: 3 * (time.Second / 2)}, } for tcname, tcinfo := range testcases { t.Run(tcname, func(t *testing.T) { ctx := dlog.NewTestContext(t, false) if tcinfo.CancelAfter > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, tcinfo.CancelAfter) defer cancel() } sleepTestHook = tcinfo.Hook start := time.Now() SleepWithContext(ctx, tcinfo.Arg) actual := time.Since(start) slop := 10 * time.Millisecond switch runtime.GOOS { case "darwin": slop *= 15 // Perhaps just CircleCI being bad, not darwin in general? case "windows" : slop *= 10 // Be forgiving of running in a VM } assertDurationEq(t, tcinfo.Expected, actual, slop) }) } }
explode_data.jsonl/37539
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 582 }
[ 2830, 3393, 41745, 1155, 353, 8840, 836, 8, 1476, 18185, 23910, 1669, 2415, 14032, 60, 1235, 341, 197, 197, 2735, 260, 882, 33795, 198, 197, 6258, 4949, 6025, 882, 33795, 198, 197, 197, 18896, 262, 882, 33795, 198, 197, 13292, 1941, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExtar(t *testing.T) { switch GOOS { case "windows": t.Skip("skipping signal test on Windows") } defer func() { os.Remove("libgo4.a") os.Remove("libgo4.h") os.Remove("testar") os.Remove("testar.ran") os.RemoveAll("pkg") }() os.Remove("testar") dir, err := os.Getwd() if err != nil { t.Fatal(err) } s := strings.Replace(testar, "PWD", dir, 1) if err := ioutil.WriteFile("testar", []byte(s), 0777); err != nil { t.Fatal(err) } cmd := exec.Command("go", "build", "-buildmode=c-archive", "-ldflags=-extar="+filepath.Join(dir, "testar"), "-o", "libgo4.a", "libgo4") cmd.Env = gopathEnv if out, err := cmd.CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) } if _, err := os.Stat("testar.ran"); err != nil { if os.IsNotExist(err) { t.Error("testar does not exist after go build") } else { t.Errorf("error checking testar: %v", err) } } }
explode_data.jsonl/50863
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 404 }
[ 2830, 3393, 6756, 277, 1155, 353, 8840, 836, 8, 341, 8961, 12604, 3126, 341, 2722, 330, 27077, 4660, 197, 3244, 57776, 445, 4886, 5654, 8286, 1273, 389, 5515, 1138, 197, 630, 16867, 2915, 368, 341, 197, 25078, 13270, 445, 2740, 3346, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestApiTest_MatchesTextResponseBody(t *testing.T) { handler := http.NewServeMux() handler.HandleFunc("/hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "text/plain") _, err := w.Write([]byte(`hello`)) if err != nil { panic(err) } }) apitest.New(). Handler(handler). Get("/hello"). Expect(t). Body(`hello`). Status(http.StatusOK). End() }
explode_data.jsonl/54802
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 186 }
[ 2830, 3393, 6563, 2271, 1245, 9118, 1178, 29637, 1155, 353, 8840, 836, 8, 341, 53326, 1669, 1758, 7121, 60421, 44, 2200, 741, 53326, 63623, 4283, 14990, 497, 2915, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 6692, 69794, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTransactionType(t *testing.T) { tracer, apmtracer, recorder := newTestTracer() defer apmtracer.Close() type test struct { Tag opentracing.Tag Type string } tests := []test{ {Tag: opentracing.Tag{Key: "component", Value: "foo"}, Type: "foo"}, {Tag: opentracing.Tag{Key: "http.url", Value: "http://host/path"}, Type: "request"}, {Tag: opentracing.Tag{Key: "foo", Value: "bar"}, Type: "custom"}, // default {Tag: opentracing.Tag{Key: "type", Value: "baz"}, Type: "baz"}, } for _, test := range tests { span := tracer.StartSpan("name", test.Tag) span.Finish() } apmtracer.Flush(nil) payloads := recorder.Payloads() transactions := payloads.Transactions require.Len(t, transactions, len(tests)) for i, test := range tests { assert.Equal(t, test.Type, transactions[i].Type) } }
explode_data.jsonl/28589
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 316 }
[ 2830, 3393, 8070, 929, 1155, 353, 8840, 836, 8, 341, 25583, 9584, 11, 1443, 76, 94941, 11, 48835, 1669, 501, 2271, 1282, 9584, 741, 16867, 1443, 76, 94941, 10421, 2822, 13158, 1273, 2036, 341, 197, 197, 5668, 220, 1179, 23745, 4527, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_Handler_NopPipe_ExpectNoErrorsAndPanics(t *testing.T) { h, err := New(mockPipes, mockStruct{}, converterMock) assert.NoError(t, err) handler := h.Handler() assert.NotPanics(t, func() { err := handler.(func(*mockContext) error)(&mockContext{}) assert.NoError(t, err) }) }
explode_data.jsonl/45767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 41879, 1604, 453, 34077, 62, 17536, 2753, 13877, 3036, 35693, 1211, 1155, 353, 8840, 836, 8, 341, 9598, 11, 1848, 1669, 1532, 30389, 47, 8923, 11, 7860, 9422, 22655, 27058, 11571, 340, 6948, 35699, 1155, 11, 1848, 692, 53326...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExemplar_CopyTo(t *testing.T) { ms := NewExemplar() generateTestExemplar().CopyTo(ms) assert.EqualValues(t, generateTestExemplar(), ms) }
explode_data.jsonl/32767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 59 }
[ 2830, 3393, 840, 25892, 277, 77637, 1249, 1155, 353, 8840, 836, 8, 341, 47691, 1669, 1532, 840, 25892, 277, 741, 3174, 13220, 2271, 840, 25892, 277, 1005, 12106, 1249, 35680, 340, 6948, 12808, 6227, 1155, 11, 6923, 2271, 840, 25892, 277...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestContainerStatsCollectionReconnection(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) dockerID := "container1" ctx, cancel := context.WithCancel(context.TODO()) statChan := make(chan *docker.Stats) statErr := fmt.Errorf("test error") closedChan := make(chan *docker.Stats) close(closedChan) mockContainer := &api.DockerContainer{ DockerID: dockerID, Container: &api.Container{ KnownStatusUnsafe: api.ContainerRunning, }, } gomock.InOrder( mockDockerClient.EXPECT().Stats(dockerID, ctx).Return(nil, statErr), resolver.EXPECT().ResolveContainer(dockerID).Return(mockContainer, nil), mockDockerClient.EXPECT().Stats(dockerID, ctx).Return(closedChan, nil), resolver.EXPECT().ResolveContainer(dockerID).Return(mockContainer, nil), mockDockerClient.EXPECT().Stats(dockerID, ctx).Return(statChan, nil), ) container := &StatsContainer{ containerMetadata: &ContainerMetadata{ DockerID: dockerID, }, ctx: ctx, cancel: cancel, client: mockDockerClient, resolver: resolver, } container.StartStatsCollection() time.Sleep(checkPointSleep) container.StopStatsCollection() }
explode_data.jsonl/22612
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 461 }
[ 2830, 3393, 4502, 16635, 6482, 693, 7742, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 741, 77333, 35, 13659, 2959, 1669, 7860, 814, 13659, 2068, 7121, 11571, 35, 13659, 295...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInsertKeyJSON(t *testing.T) { cdc := codec.New() foo := map[string]string{"foo": "foofoo"} bar := map[string]string{"barInner": "barbar"} // create raw messages bz, err := cdc.MarshalJSON(foo) require.NoError(t, err) fooRaw := json.RawMessage(bz) bz, err = cdc.MarshalJSON(bar) require.NoError(t, err) barRaw := json.RawMessage(bz) // make the append appBz, err := InsertKeyJSON(cdc, fooRaw, "barOuter", barRaw) require.NoError(t, err) // test the append var appended map[string]json.RawMessage err = cdc.UnmarshalJSON(appBz, &appended) require.NoError(t, err) var resBar map[string]string err = cdc.UnmarshalJSON(appended["barOuter"], &resBar) require.NoError(t, err) require.Equal(t, bar, resBar, "appended: %v", appended) }
explode_data.jsonl/40558
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 308 }
[ 2830, 3393, 13780, 1592, 5370, 1155, 353, 8840, 836, 8, 341, 1444, 7628, 1669, 34647, 7121, 2822, 197, 7975, 1669, 2415, 14032, 30953, 4913, 7975, 788, 330, 7975, 7975, 16707, 90709, 1669, 2415, 14032, 30953, 4913, 2257, 31597, 788, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCatalog_ListNodes_ACLFilter(t *testing.T) { t.Parallel() dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testrpc.WaitForLeader(t, s1.RPC, "dc1") // We scope the reply in each of these since msgpack won't clear out an // existing slice if the incoming one is nil, so it's best to start // clean each time. // Prior to version 8, the node policy should be ignored. args := structs.DCSpecificRequest{ Datacenter: "dc1", } { reply := structs.IndexedNodes{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { t.Fatalf("err: %v", err) } if len(reply.Nodes) != 1 { t.Fatalf("bad: %v", reply.Nodes) } } // Now turn on version 8 enforcement and try again. s1.config.ACLEnforceVersion8 = true { reply := structs.IndexedNodes{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { t.Fatalf("err: %v", err) } if len(reply.Nodes) != 0 { t.Fatalf("bad: %v", reply.Nodes) } } // Create an ACL that can read the node. arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTypeClient, Rules: fmt.Sprintf(` node "%s" { policy = "read" } `, s1.config.NodeName), }, WriteRequest: structs.WriteRequest{Token: "root"}, } var id string if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &id); err != nil { t.Fatalf("err: %v", err) } // Now try with the token and it will go through. args.Token = id { reply := structs.IndexedNodes{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { t.Fatalf("err: %v", err) } if len(reply.Nodes) != 1 { t.Fatalf("bad: %v", reply.Nodes) } } }
explode_data.jsonl/49227
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 854 }
[ 2830, 3393, 41606, 27104, 12288, 97627, 5632, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 48532, 16, 11, 274, 16, 1669, 1273, 5475, 2354, 2648, 1155, 11, 2915, 1337, 353, 2648, 8, 341, 197, 1444, 875, 3140, 1043, 3057, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLang_English_DiffForHumans(t *testing.T) { assert := assert.New(t) tests := []struct { input1 string // 输入值 input2 string // 输入值 expected string // 期望值 }{ {"2020-08-05 13:14:15", "2020-08-05 13:14:15", "just now"}, {"2020-08-05 13:14:15", "2021-08-05 13:14:15", "1 year before"}, {"2020-08-05 13:14:15", "2019-08-05 13:14:15", "1 year after"}, {"2020-08-05 13:14:15", "2030-08-05 13:14:15", "10 years before"}, {"2020-08-05 13:14:15", "2010-08-05 13:14:15", "10 years after"}, {"2020-08-05 13:14:15", "2020-09-05 13:14:15", "1 month before"}, {"2020-08-05 13:14:15", "2020-07-05 13:14:15", "1 month after"}, {"2020-08-05 13:14:15", "2021-06-05 13:14:15", "10 months before"}, {"2020-08-05 13:14:15", "2019-10-05 13:14:15", "10 months after"}, {"2020-08-05 13:14:15", "2020-08-06 13:14:15", "1 day before"}, {"2020-08-05 13:14:15", "2020-08-04 13:14:15", "1 day after"}, {"2020-08-05 13:14:15", "2020-08-15 13:14:15", "1 week before"}, {"2020-08-05 13:14:15", "2020-07-26 13:14:15", "1 week after"}, {"2020-08-05 13:14:15", "2020-08-05 14:14:15", "1 hour before"}, {"2020-08-05 13:14:15", "2020-08-05 12:14:15", "1 hour after"}, {"2020-08-05 13:14:15", "2020-08-05 23:14:15", "10 hours before"}, {"2020-08-05 13:14:15", "2020-08-05 03:14:15", "10 hours after"}, {"2020-08-05 13:14:15", "2020-08-05 13:15:15", "1 minute before"}, {"2020-08-05 13:14:15", "2020-08-05 13:13:15", "1 minute after"}, {"2020-08-05 13:14:15", "2020-08-05 13:24:15", "10 minutes before"}, {"2020-08-05 13:14:15", "2020-08-05 13:04:15", "10 minutes after"}, {"2020-08-05 13:14:15", "2020-08-05 13:14:16", "1 second before"}, {"2020-08-05 13:14:15", "2020-08-05 13:14:14", "1 second after"}, {"2020-08-05 13:14:15", "2020-08-05 13:14:25", "10 seconds before"}, {"2020-08-05 13:14:15", "2020-08-05 13:14:05", "10 seconds after"}, } for index, test := range tests { c1 := Parse(test.input1) c2 := Parse(test.input2) assert.Nil(c1.Error) assert.Nil(c2.Error) assert.Equal(test.expected, c1.SetLocale(english).DiffForHumans(c2), "test index id is "+strconv.Itoa(index)) } }
explode_data.jsonl/29477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1003 }
[ 2830, 3393, 26223, 2089, 968, 1672, 1557, 3092, 2461, 94568, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 78216, 1669, 3056, 1235, 341, 197, 22427, 16, 256, 914, 442, 69058, 25511, 198, 197, 22427, 17, 256, 914, 44...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCache_ObjectInfoReader(t *testing.T) { cfg, repo, _ := testcfg.BuildWithRepo(t) repoExecutor := newRepoExecutor(t, cfg, repo) cache := newCache(time.Hour, 10, helper.NewManualTicker()) defer cache.Stop() cache.cachedProcessDone = sync.NewCond(&sync.Mutex{}) t.Run("uncancellable", func(t *testing.T) { ctx := testhelper.ContextWithoutCancel() require.PanicsWithValue(t, "empty ctx.Done() in catfile.Batch.New()", func() { _, _ = cache.ObjectInfoReader(ctx, repoExecutor) }) }) t.Run("uncacheable", func(t *testing.T) { ctx, cancel := testhelper.Context() defer cancel() // The context doesn't carry a session ID and is thus uncacheable. // The process should never get returned to the cache and must be // killed on context cancellation. reader, err := cache.ObjectInfoReader(ctx, repoExecutor) require.NoError(t, err) objectInfoReaderImpl, ok := reader.(*objectInfoReader) require.True(t, ok, "expected object reader") cancel() // We're cheating a bit here to avoid creating a racy test by reaching into the // process and trying to read from its stdout. If the cancel did kill the process as // expected, then the stdout should be closed and we'll get an EOF. output, err := io.ReadAll(objectInfoReaderImpl.queue.stdout) if err != nil { require.True(t, errors.Is(err, os.ErrClosed)) } else { require.NoError(t, err) } require.Empty(t, output) require.True(t, reader.isClosed()) require.Empty(t, keys(t, &cache.objectInfoReaders)) }) t.Run("cacheable", func(t *testing.T) { defer cache.Evict() ctx, cancel := testhelper.Context() defer cancel() ctx = correlation.ContextWithCorrelation(ctx, "1") ctx = testhelper.MergeIncomingMetadata(ctx, metadata.Pairs(SessionIDField, "1"), ) reader, err := cache.ObjectInfoReader(ctx, repoExecutor) require.NoError(t, err) // Cancel the context such that the process will be considered for return to the // cache and wait for the cache to collect it. cache.cachedProcessDone.L.Lock() cancel() defer cache.cachedProcessDone.L.Unlock() cache.cachedProcessDone.Wait() keys := keys(t, &cache.objectInfoReaders) require.Equal(t, []key{{ sessionID: "1", repoStorage: repo.GetStorageName(), repoRelPath: repo.GetRelativePath(), }}, keys) // Assert that we can still read from the cached process. _, err = reader.Info(ctx, "refs/heads/master") require.NoError(t, err) }) t.Run("closed process does not get cached", func(t *testing.T) { defer cache.Evict() ctx, cancel := testhelper.Context() defer cancel() ctx = testhelper.MergeIncomingMetadata(ctx, metadata.Pairs(SessionIDField, "1"), ) reader, err := cache.ObjectInfoReader(ctx, repoExecutor) require.NoError(t, err) // Closed processes naturally cannot be reused anymore and thus shouldn't ever get // cached. reader.close() // Cancel the context such that the process will be considered for return to the // cache and wait for the cache to collect it. cache.cachedProcessDone.L.Lock() cancel() defer cache.cachedProcessDone.L.Unlock() cache.cachedProcessDone.Wait() require.Empty(t, keys(t, &cache.objectInfoReaders)) }) }
explode_data.jsonl/81955
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1142 }
[ 2830, 3393, 8233, 27839, 1731, 5062, 1155, 353, 8840, 836, 8, 341, 50286, 11, 15867, 11, 716, 1669, 1273, 14072, 25212, 2354, 25243, 1155, 340, 17200, 5368, 25255, 1669, 501, 25243, 25255, 1155, 11, 13286, 11, 15867, 692, 52680, 1669, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadCourse(t *testing.T) { Convey("When LoadCourse is called", t, func() { courses, err := LoadCourse("test_courseID") Convey("And no driver error occur", func() { expectedResponse := shared.Course{"test", "test", "test", "test"} Convey("a Course should be returned without error", func() { So(courses, ShouldResemble, expectedResponse) So(err, ShouldBeNil) }) }) }) }
explode_data.jsonl/52791
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 145 }
[ 2830, 3393, 5879, 23340, 1155, 353, 8840, 836, 8, 341, 93070, 5617, 445, 4498, 8893, 23340, 374, 2598, 497, 259, 11, 2915, 368, 341, 197, 1444, 16349, 11, 1848, 1669, 8893, 23340, 445, 1944, 31726, 915, 5130, 197, 93070, 5617, 445, 30...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSpanLogError(t *testing.T) { tracer, apmtracer, recorder := newTestTracer() defer apmtracer.Close() span := tracer.StartSpan("parent") span.LogKV("event", "error", "message", "foo") span.LogKV("event", "error", "message", "bar", "error.object", errors.New("boom")) span.LogKV("event", "warning") // non-error, ignored span.LogKV(1, "two") // non-string keys ignored span.LogKV() // no fields, no-op childSpan := tracer.StartSpan("child", opentracing.ChildOf(span.Context())) childSpan.LogFields(log.String("event", "error"), log.Error(errors.New("baz"))) childSpan.LogFields(log.String("event", "warning"), log.String("message", "meh")) // non-error, ignored childSpan.LogFields() // no fields, ignored childSpan.Finish() span.Finish() apmtracer.Flush(nil) payloads := recorder.Payloads() require.Len(t, payloads.Transactions, 1) require.Len(t, payloads.Spans, 1) require.Len(t, payloads.Errors, 3) errors := payloads.Errors assert.Equal(t, "foo", errors[0].Log.Message) assert.Equal(t, "bar", errors[1].Log.Message) assert.Equal(t, "baz", errors[2].Log.Message) assert.Zero(t, errors[0].Exception) assert.Equal(t, "boom", errors[1].Exception.Message) assert.Equal(t, "baz", errors[2].Exception.Message) assert.Equal(t, "errorString", errors[2].Exception.Type) assert.Equal(t, payloads.Transactions[0].ID, errors[0].ParentID) assert.Equal(t, payloads.Transactions[0].ID, errors[1].ParentID) assert.Equal(t, payloads.Spans[0].ID, errors[2].ParentID) }
explode_data.jsonl/28599
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 666 }
[ 2830, 3393, 12485, 2201, 1454, 1155, 353, 8840, 836, 8, 341, 25583, 9584, 11, 1443, 76, 94941, 11, 48835, 1669, 501, 2271, 1282, 9584, 741, 16867, 1443, 76, 94941, 10421, 2822, 197, 1480, 1669, 64306, 12101, 12485, 445, 3765, 1138, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParentSegmentTotalCount(t *testing.T) { ctx := context.Background() ctx, seg := BeginSegment(ctx,"test") wg := sync.WaitGroup{} n := 2 wg.Add(2 * n) for i := 0; i < n; i++ { go func(ctx context.Context) { // add async nested subsegments c1,_ := BeginSubsegment(ctx, "TestSubsegment1") c2,_ := BeginSubsegment(c1, "TestSubsegment2") go func(ctx context.Context) { // add async nested subsegments c1,_ := BeginSubsegment(ctx, "TestSubsegment1") BeginSubsegment(c1, "TestSubsegment2") wg.Done() }(c2) // passing context wg.Done() }(ctx) } wg.Wait() assert.Equal(t, 4 * uint32(n) , seg.ParentSegment.totalSubSegments, "totalSubSegments count should be correctly registered on the parent segment") }
explode_data.jsonl/76225
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 317 }
[ 2830, 3393, 8387, 21086, 7595, 2507, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 2822, 20985, 11, 4810, 1669, 18597, 21086, 7502, 1335, 1944, 25639, 72079, 1669, 12811, 28384, 2808, 16094, 9038, 1669, 220, 17, 198, 72079, 1904...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChangePackageName(t *testing.T) { t.Skip("This issue hasn't been fixed yet. See golang.org/issue/41061.") const mod = ` -- go.mod -- module mod.com -- foo/foo.go -- package foo -- foo/bar_test.go -- package foo_ ` run(t, mod, func(t *testing.T, env *Env) { env.Await(InitialWorkspaceLoad) env.OpenFile("foo/bar_test.go") env.RegexpReplace("foo/bar_test.go", "package foo_", "package foo_test") env.SaveBuffer("foo/bar_test.go") env.Await( OnceMet( CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), 1), NoDiagnostics("foo/bar_test.go"), ), OnceMet( CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), 1), NoDiagnostics("foo/foo.go"), ), ) }) }
explode_data.jsonl/38926
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 306 }
[ 2830, 3393, 4072, 65655, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 445, 1986, 4265, 12492, 944, 1012, 8356, 3602, 13, 3496, 342, 37287, 2659, 14, 11159, 14, 19, 16, 15, 21, 16, 31225, 4777, 1463, 284, 22074, 313, 728, 10929, 39514, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseTime24Hours(t *testing.T) { tm, err := NewTimeFromString("9:42") require.Nil(t, err) should := Ɀ_Time_(9, 42) assert.Equal(t, should, tm) }
explode_data.jsonl/4302
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 71 }
[ 2830, 3393, 14463, 1462, 17, 19, 23235, 1155, 353, 8840, 836, 8, 341, 3244, 76, 11, 1848, 1669, 1532, 1462, 44491, 445, 24, 25, 19, 17, 1138, 17957, 59678, 1155, 11, 1848, 340, 197, 5445, 1669, 2858, 109, 123, 39080, 8361, 24, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestExecutorDriverReconnectEvent(t *testing.T) { setTestEnv(t) ch := make(chan bool, 2) // Mock Slave process to respond to registration event. server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) { reqPath, err := url.QueryUnescape(req.URL.String()) assert.NoError(t, err) log.Infoln("RCVD request", reqPath) // exec registration request if strings.Contains(reqPath, "RegisterExecutorMessage") { log.Infoln("Got Executor registration request") } if strings.Contains(reqPath, "ReregisterExecutorMessage") { log.Infoln("Got Executor Re-registration request") ch <- true } rsp.WriteHeader(http.StatusAccepted) }) defer server.Close() exec := newTestExecutor(t) exec.t = t // start driver := newIntegrationTestDriver(t, exec) stat, err := driver.Start() assert.NoError(t, err) assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat) driver.setConnected(true) defer driver.Stop() // send "reconnect" event to driver pbMsg := &mesos.ReconnectExecutorMessage{ SlaveId: util.NewSlaveID(slaveID), } c := testutil.NewMockMesosClient(t, server.PID) c.SendMessage(driver.self, pbMsg) select { case <-ch: case <-time.After(time.Second * 2): log.Errorf("Tired of waiting...") } }
explode_data.jsonl/76148
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 481 }
[ 2830, 3393, 25255, 11349, 693, 6459, 1556, 1155, 353, 8840, 836, 8, 341, 8196, 2271, 14359, 1155, 340, 23049, 1669, 1281, 35190, 1807, 11, 220, 17, 340, 197, 322, 14563, 59368, 1882, 311, 5889, 311, 12227, 1538, 624, 41057, 1669, 1273, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestHandleExecUnknownError(t *testing.T) { logStats := tabletenv.NewLogStats(ctx, "TestHandleExecError") config := tabletenv.NewDefaultConfig() tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, logStats) panic("unknown exec error") }
explode_data.jsonl/80012
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 121 }
[ 2830, 3393, 6999, 10216, 13790, 1454, 1155, 353, 8840, 836, 8, 341, 6725, 16635, 1669, 1965, 1960, 85, 7121, 2201, 16635, 7502, 11, 330, 2271, 6999, 10216, 1454, 1138, 25873, 1669, 1965, 1960, 85, 7121, 3675, 2648, 741, 57441, 85, 1669,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIntegrationOBFS4DialContextError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() // should cause DialContex to fail config := obfs4config() results := OBFS4Connect(ctx, config) if !strings.HasSuffix(results.Error.Error(), "operation was canceled") { t.Fatal("not the error we expected") } }
explode_data.jsonl/53553
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 111 }
[ 2830, 3393, 52464, 20608, 8485, 19, 35, 530, 1972, 1454, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 84441, 368, 442, 1265, 5240, 66155, 818, 327, 311, 3690, 198, 25873, 1669, 1508, 3848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLoginRedirect(t *testing.T) { v := &Config{ Server: "https://try.gogs.io", Login: "/login/form", } h := v.Handler( http.NotFoundHandler(), ) r := httptest.NewRequest("POST", "/login", nil) w := httptest.NewRecorder() h.ServeHTTP(w, r) if want, got := w.Code, 303; want != got { t.Errorf("Want status code %d, got %d", want, got) } if want, got := w.Header().Get("Location"), "/login/form"; want != got { t.Errorf("Want redirect location %s, got %s", want, got) } }
explode_data.jsonl/36376
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 204 }
[ 2830, 3393, 6231, 17725, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 609, 2648, 515, 197, 92075, 25, 330, 2428, 1110, 1539, 1302, 26307, 4245, 756, 197, 197, 6231, 25, 220, 3521, 3673, 20260, 756, 197, 532, 9598, 1669, 348, 31010, 1006,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestIPPoolEvents(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) context, cancel := context.WithCancel(context.Background()) defer cancel() controller := newController(nil) consumerCh := make(chan string) controller.AddEventHandler( func(ippool string) { consumerCh <- ippool }) controller.crdInformerFactory.Start(stopCh) controller.crdInformerFactory.WaitForCacheSync(stopCh) go controller.Run(stopCh) require.True(t, cache.WaitForCacheSync(stopCh, controller.HasSynced)) // ADD event eip, err := controller.crdClient.CrdV1alpha2().ExternalIPPools().Create(context, newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"), metav1.CreateOptions{}, ) require.NoError(t, err) assert.Equal(t, "eip1", <-consumerCh) // UPDATE event eip.Spec.IPRanges[0].End = "10.10.10.4" eip, err = controller.crdClient.CrdV1alpha2().ExternalIPPools().Update(context, eip, metav1.UpdateOptions{}, ) require.NoError(t, err) assert.Equal(t, "eip1", <-consumerCh) // DELETE event err = controller.crdClient.CrdV1alpha2().ExternalIPPools().Delete(context, eip.Name, metav1.DeleteOptions{}, ) require.NoError(t, err) assert.Equal(t, "eip1", <-consumerCh) }
explode_data.jsonl/10261
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 472 }
[ 2830, 3393, 3298, 10551, 7900, 1155, 353, 8840, 836, 8, 341, 62644, 1143, 1669, 1281, 35190, 2036, 37790, 16867, 3265, 60170, 1143, 340, 28413, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 16867, 9121, 741, 61615, 1669, 501, 2051...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCounterFuncs(t *testing.T) { for i, this := range []struct { countFunc func(i interface{}) (int, error) in string expected int }{ {countWords, "Do Be Do Be Do", 5}, {countWords, "旁边", 2}, {countRunes, "旁边", 2}, } { result, err := this.countFunc(this.in) if err != nil { t.Errorf("[%d] Unexpected counter error: %s", i, err) } else if result != this.expected { t.Errorf("[%d] Count method error, got %v expected %v", i, result, this.expected) } _, err = this.countFunc(t) if err == nil { t.Errorf("[%d] Expected Count error", i) } } }
explode_data.jsonl/9240
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 265 }
[ 2830, 3393, 14099, 9626, 82, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 419, 1669, 2088, 3056, 1235, 341, 197, 18032, 9626, 2915, 1956, 3749, 28875, 320, 396, 11, 1465, 340, 197, 17430, 286, 914, 198, 197, 42400, 220, 526, 198, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBool_Eq(t *testing.T) { assertEq(t, []eqAssert{ {NewBool(true), NewBool(true), true}, {NewBool(false), NewBool(true), false}, {NewBool(false), NewBool(false), true}, {NewBool(true), NewBytes([]byte{0, 1, 2}), false}, {NewBool(true), NewBytes([]byte{1}), false}, {NewBool(false), NewBytes([]byte{0}), false}, }) }
explode_data.jsonl/66071
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 154 }
[ 2830, 3393, 11233, 2089, 80, 1155, 353, 8840, 836, 8, 341, 6948, 27312, 1155, 11, 3056, 11006, 8534, 515, 197, 197, 90, 3564, 11233, 3715, 701, 1532, 11233, 3715, 701, 830, 1583, 197, 197, 90, 3564, 11233, 3576, 701, 1532, 11233, 3715...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1