text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestMemoise(t *testing.T) { calls := 0 r := renderFunc(func(rpt report.Report) render.Nodes { calls++ return render.Nodes{Nodes: report.Nodes{rpt.ID: report.MakeNode(rpt.ID)}} }) m := render.Memoise(r) if render.Memoise(m) != m { t.Errorf("Memoised renderers should be fixpoints.") } rpt1 := report.MakeReport() result1 := m.Render(rpt1) // it should have rendered it. if _, ok := result1.Nodes[rpt1.ID]; !ok { t.Errorf("Expected rendered report to contain a node, but got: %v", result1) } if calls != 1 { t.Errorf("Expected renderer to have been called the first time") } result2 := m.Render(rpt1) if !reflect.DeepEqual(result1, result2) { t.Errorf("Expected memoised result to be returned: %s", test.Diff(result1, result2)) } if calls != 1 { t.Errorf("Expected renderer to not have been called the second time") } rpt2 := report.MakeReport() result3 := m.Render(rpt2) if reflect.DeepEqual(result1, result3) { t.Errorf("Expected different result for different report, but were the same") } if calls != 2 { t.Errorf("Expected renderer to have been called again for a different report") } render.ResetCache() result4 := m.Render(rpt1) if !reflect.DeepEqual(result1, result4) { t.Errorf("Expected original result to be returned: %s", test.Diff(result1, result4)) } if calls != 3 { t.Errorf("Expected renderer to have been called again after cache reset") } }
explode_data.jsonl/64909
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 533 }
[ 2830, 3393, 46402, 1064, 1155, 353, 8840, 836, 8, 341, 1444, 5583, 1669, 220, 15, 198, 7000, 1669, 3141, 9626, 18552, 2601, 417, 1895, 25702, 8, 3141, 52184, 341, 197, 1444, 5583, 22940, 197, 853, 3141, 52184, 90, 12288, 25, 1895, 521...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInternalInvokeRequest(t *testing.T) { t.Run("valid internal invoke request", func(t *testing.T) { m := &commonv1pb.InvokeRequest{ Method: "invoketest", ContentType: "application/json", Data: &anypb.Any{Value: []byte("test")}, } pb := internalv1pb.InternalInvokeRequest{ Ver: internalv1pb.APIVersion_V1, Message: m, } ir, err := InternalInvokeRequest(&pb) assert.NoError(t, err) assert.NotNil(t, ir.r.Message) assert.Equal(t, "invoketest", ir.r.Message.GetMethod()) assert.NotNil(t, ir.r.Message.GetData()) }) t.Run("nil message field", func(t *testing.T) { pb := internalv1pb.InternalInvokeRequest{ Ver: internalv1pb.APIVersion_V1, Message: nil, } _, err := InternalInvokeRequest(&pb) assert.Error(t, err) }) }
explode_data.jsonl/46239
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 347 }
[ 2830, 3393, 11569, 17604, 1900, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 1891, 5306, 19873, 1681, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 2109, 1669, 609, 5464, 85, 16, 16650, 32784, 1900, 515, 298, 84589, 25, 414, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBadReplicaValues(t *testing.T) { g := gomega.NewGomegaWithT(t) isvc := makeTestInferenceService() isvc.Spec.Default.Predictor.MinReplicas = GetIntReference(-1) g.Expect(isvc.validate(c)).Should(gomega.MatchError(MinReplicasLowerBoundExceededError)) isvc.Spec.Default.Predictor.MinReplicas = GetIntReference(1) isvc.Spec.Default.Predictor.MaxReplicas = -1 g.Expect(isvc.validate(c)).Should(gomega.MatchError(MaxReplicasLowerBoundExceededError)) isvc.Spec.Default.Predictor.MinReplicas = GetIntReference(2) isvc.Spec.Default.Predictor.MaxReplicas = 1 g.Expect(isvc.validate(c)).Should(gomega.MatchError(MinReplicasShouldBeLessThanMaxError)) // Now test transformer and explainer, so set correct value for predictor isvc.Spec.Default.Predictor.MinReplicas = GetIntReference(0) isvc.Spec.Default.Predictor.MaxReplicas = 0 isvc.Spec.Default.Transformer = &TransformerSpec{} isvc.Spec.Default.Transformer.Custom = &CustomSpec{ v1.Container{ Image: "custom:0.1", }, } isvc.applyDefaultsEndpoint(&isvc.Spec.Default, c) isvc.Spec.Default.Transformer.MinReplicas = GetIntReference(-1) g.Expect(isvc.validate(c)).Should(gomega.MatchError(MinReplicasLowerBoundExceededError)) isvc.Spec.Default.Transformer.MinReplicas = GetIntReference(1) isvc.Spec.Default.Transformer.MaxReplicas = -1 g.Expect(isvc.validate(c)).Should(gomega.MatchError(MaxReplicasLowerBoundExceededError)) isvc.Spec.Default.Transformer.MinReplicas = GetIntReference(2) isvc.Spec.Default.Transformer.MaxReplicas = 1 g.Expect(isvc.validate(c)).Should(gomega.MatchError(MinReplicasShouldBeLessThanMaxError)) // Now test explainer, so ignore transformer isvc.Spec.Default.Transformer = nil isvc.Spec.Default.Explainer = &ExplainerSpec{ Alibi: &AlibiExplainerSpec{ StorageURI: "gs://testbucket/testmodel", }, } isvc.applyDefaultsEndpoint(&isvc.Spec.Default, c) isvc.Spec.Default.Explainer.MinReplicas = GetIntReference(-1) g.Expect(isvc.validate(c)).Should(gomega.MatchError(MinReplicasLowerBoundExceededError)) isvc.Spec.Default.Explainer.MinReplicas = GetIntReference(1) isvc.Spec.Default.Explainer.MaxReplicas = -1 g.Expect(isvc.validate(c)).Should(gomega.MatchError(MaxReplicasLowerBoundExceededError)) isvc.Spec.Default.Explainer.MinReplicas = GetIntReference(2) isvc.Spec.Default.Explainer.MaxReplicas = 1 g.Expect(isvc.validate(c)).Should(gomega.MatchError(MinReplicasShouldBeLessThanMaxError)) }
explode_data.jsonl/1494
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 900 }
[ 2830, 3393, 17082, 18327, 15317, 6227, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 32696, 7121, 38, 32696, 2354, 51, 1155, 340, 19907, 7362, 1669, 1281, 2271, 641, 2202, 1860, 741, 19907, 7362, 36473, 13275, 1069, 8861, 269, 17070, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRoute_SetTTLSeconds(t *testing.T) { route := Route{} ttlSeconds := 120 route.SetTTLSeconds(&ttlSeconds) assert.Equal(t, 120, *route.ttlSeconds) }
explode_data.jsonl/67795
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 68 }
[ 2830, 3393, 4899, 14812, 51, 13470, 15343, 1155, 353, 8840, 836, 8, 341, 7000, 2133, 1669, 9572, 16094, 3244, 11544, 15343, 1669, 220, 16, 17, 15, 198, 7000, 2133, 4202, 51, 13470, 15343, 2099, 62858, 15343, 692, 6948, 12808, 1155, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestGetOutboundHTTPFilterChainForService(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockCatalog := catalog.NewMockMeshCataloger(mockCtrl) mockConfigurator := configurator.NewMockConfigurator(mockCtrl) // Mock calls used to build the HTTP connection manager mockConfigurator.EXPECT().IsTracingEnabled().Return(false).AnyTimes() mockConfigurator.EXPECT().GetTracingEndpoint().Return("test-api").AnyTimes() mockConfigurator.EXPECT().GetInboundExternalAuthConfig().Return(auth.ExtAuthConfig{ Enable: false, }).AnyTimes() mockConfigurator.EXPECT().GetFeatureFlags().Return(configv1alpha2.FeatureFlags{ EnableWASMStats: false, }).AnyTimes() lb := &listenerBuilder{ meshCatalog: mockCatalog, cfg: mockConfigurator, serviceIdentity: tests.BookbuyerServiceIdentity, } testCases := []struct { name string trafficMatch trafficpolicy.TrafficMatch expectedFilterChainMatch *xds_listener.FilterChainMatch expectError bool }{ { name: "traffic match with multiple destination IP ranges", trafficMatch: trafficpolicy.TrafficMatch{ Name: "test", DestinationPort: 80, DestinationIPRanges: []string{ "1.1.1.1/32", "2.2.2.2/32", }, }, expectedFilterChainMatch: &xds_listener.FilterChainMatch{ DestinationPort: &wrapperspb.UInt32Value{Value: 80}, // same as 'servicePort' PrefixRanges: []*xds_core.CidrRange{ // The order is guaranteed to be sorted { AddressPrefix: "1.1.1.1", PrefixLen: &wrapperspb.UInt32Value{ Value: 32, }, }, { AddressPrefix: "2.2.2.2", PrefixLen: &wrapperspb.UInt32Value{ Value: 32, }, }, }, }, expectError: false, }, { name: "traffic match without destination IP ranges", trafficMatch: trafficpolicy.TrafficMatch{ Name: "test", DestinationPort: 80, DestinationIPRanges: nil, }, expectedFilterChainMatch: nil, expectError: true, }, } for i, tc := range testCases { t.Run(fmt.Sprintf("Testing test case %d: %s", i, tc.name), func(t *testing.T) { assert := tassert.New(t) httpFilterChain, err := lb.getOutboundHTTPFilterChainForService(tc.trafficMatch) assert.Equal(err != nil, tc.expectError) if err != nil { assert.Nil(httpFilterChain) } else { assert.NotNil(httpFilterChain) assert.Len(httpFilterChain.FilterChainMatch.PrefixRanges, len(tc.trafficMatch.DestinationIPRanges)) for _, filter := range httpFilterChain.Filters { assert.Equal(wellknown.HTTPConnectionManager, filter.Name) } } }) } }
explode_data.jsonl/10267
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1160 }
[ 2830, 3393, 1949, 2662, 10891, 9230, 5632, 18837, 2461, 1860, 1155, 353, 8840, 836, 8, 341, 77333, 15001, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 7860, 15001, 991, 18176, 2822, 77333, 41606, 1669, 16403, 7121, 11571, 14194, 41...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestResponseWithCurrencies(t *testing.T) { assertCurrencyInBidResponse(t, "USD", nil) currency := "USD" assertCurrencyInBidResponse(t, "USD", &currency) currency = "EUR" assertCurrencyInBidResponse(t, "EUR", &currency) }
explode_data.jsonl/22712
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 2582, 2354, 34, 19607, 1155, 353, 8840, 836, 8, 341, 6948, 26321, 641, 65452, 2582, 1155, 11, 330, 26749, 497, 2092, 692, 1444, 5088, 1669, 330, 26749, 698, 6948, 26321, 641, 65452, 2582, 1155, 11, 330, 26749, 497, 609, 15...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestLoadManifest(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/simple.porter.yaml", Name) require.NoError(t, c.LoadManifest()) assert.NotNil(t, c.Manifest) assert.Equal(t, []string{"exec"}, c.Manifest.Mixins) assert.Len(t, c.Manifest.Install, 1) installStep := c.Manifest.Install[0] assert.NotNil(t, installStep.Description) mixin := installStep.GetMixinName() assert.Equal(t, "exec", mixin) }
explode_data.jsonl/10943
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 183 }
[ 2830, 3393, 5879, 38495, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 1532, 2271, 2648, 1155, 340, 1444, 39820, 7084, 261, 7623, 2822, 1444, 8787, 1972, 1904, 2271, 1703, 445, 92425, 67195, 14598, 261, 33406, 497, 3988, 692, 17957, 35699, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClustersPinUpdate(t *testing.T) { ctx := context.Background() clusters, mock := createClusters(t) defer shutdownClusters(t, clusters, mock) prefix := test.Cid1.Prefix() ttlDelay() h, err := prefix.Sum(randomBytes()) // create random cid h2, err := prefix.Sum(randomBytes()) // create random cid _, err = clusters[0].PinUpdate(ctx, h, h2, api.PinOptions{}) if err == nil || err != state.ErrNotFound { t.Fatal("pin update should fail when from is not pinned") } _, err = clusters[0].Pin(ctx, h, api.PinOptions{}) if err != nil { t.Errorf("error pinning %s: %s", h, err) } pinDelay() opts2 := api.PinOptions{ UserAllocations: []peer.ID{clusters[0].host.ID()}, // should not be used PinUpdate: h, Name: "new name", } _, err = clusters[0].Pin(ctx, h2, opts2) // should call PinUpdate if err != nil { t.Errorf("error pin-updating %s: %s", h2, err) } pinDelay() f := func(t *testing.T, c *Cluster) { pinget, err := c.PinGet(ctx, h2) if err != nil { t.Fatal(err) } if len(pinget.Allocations) != 0 { t.Error("new pin should be allocated everywhere like pin1") } if pinget.MaxDepth != -1 { t.Error("updated pin should be recursive like pin1") } if pinget.Name != "new name" { t.Error("name should be kept") } } runF(t, clusters, f) }
explode_data.jsonl/66604
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 548 }
[ 2830, 3393, 94992, 19861, 4289, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 39407, 14605, 11, 7860, 1669, 1855, 94992, 1155, 340, 16867, 23766, 94992, 1155, 11, 26968, 11, 7860, 340, 3223, 5060, 1669, 1273, 727, 307, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestAuthDisable(t *testing.T) { server := httptest.NewServer(initAuthRoutes()) defer server.Close() t.Log("/auth request POST method, disable auth") testutils.DoRequest(t, server.URL+"/auth", nil, "POST", http.StatusNotFound, "AuthDisable") }
explode_data.jsonl/35851
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 5087, 25479, 1155, 353, 8840, 836, 8, 341, 41057, 1669, 54320, 70334, 7121, 5475, 38250, 5087, 26653, 2398, 16867, 3538, 10421, 2822, 3244, 5247, 4283, 3242, 1681, 12869, 1714, 11, 11156, 4166, 1138, 18185, 6031, 33596, 1900, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestRouter(t *testing.T) { r, err := NewRouter() if err != nil { t.Fatal(err) } defer r.Release() if !isForwardingEnabled(r.NetNs().Name()) { t.Fatal("Expected net.ipv4.ip_forward=1 for", r.NetNs().Name()) } }
explode_data.jsonl/34908
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 102 }
[ 2830, 3393, 9523, 1155, 353, 8840, 836, 8, 341, 7000, 11, 1848, 1669, 1532, 9523, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 630, 16867, 435, 58693, 2822, 743, 753, 285, 25925, 287, 5462, 2601, 16993, 47360, 100...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestClientVolume(t *testing.T) { db := tests.Tempfile() defer os.Remove(db) // Create the app app := glusterfs.NewTestApp(db) defer app.Close() // Setup the server ts := setupHeketiServer(app) defer ts.Close() // Create cluster c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY) tests.Assert(t, c != nil) cluster_req := &api.ClusterCreateRequest{ ClusterFlags: api.ClusterFlags{ Block: true, File: true, }, } cluster, err := c.ClusterCreate(cluster_req) tests.Assert(t, err == nil) // Create node request packet for n := 0; n < 4; n++ { nodeReq := &api.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)} nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)} nodeReq.Zone = n + 1 // Create node node, err := c.NodeAdd(nodeReq) tests.Assert(t, err == nil) // Create a device request sg := utils.NewStatusGroup() for i := 0; i < 50; i++ { sg.Add(1) go func() { defer sg.Done() deviceReq := &api.DeviceAddRequest{} deviceReq.Name = "/dev/by-magic/id:" + idgen.GenUUID() deviceReq.NodeId = node.Id // Create device err := c.DeviceAdd(deviceReq) sg.Err(err) }() } r := sg.Result() tests.Assert(t, r == nil, r) } // Get list of volumes list, err := c.VolumeList() tests.Assert(t, err == nil) tests.Assert(t, len(list.Volumes) == 0) // Create a volume volumeReq := &api.VolumeCreateRequest{} volumeReq.Size = 10 volume, err := c.VolumeCreate(volumeReq) tests.Assert(t, err == nil, err) tests.Assert(t, volume.Id != "") tests.Assert(t, volume.Size == volumeReq.Size) // Get list of volumes list, err = c.VolumeList() tests.Assert(t, err == nil) tests.Assert(t, len(list.Volumes) == 1) tests.Assert(t, list.Volumes[0] == volume.Id) // Get info on incorrect id info, err := c.VolumeInfo("badid") tests.Assert(t, err != nil) // Get info info, err = c.VolumeInfo(volume.Id) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(info, volume)) // Expand volume with a bad id expandReq := &api.VolumeExpandRequest{} expandReq.Size = 10 volumeInfo, err := c.VolumeExpand("badid", expandReq) tests.Assert(t, err != nil) // Expand volume volumeInfo, err = c.VolumeExpand(volume.Id, expandReq) tests.Assert(t, err == nil) tests.Assert(t, volumeInfo.Size == 20) // Delete bad id err = c.VolumeDelete("badid") tests.Assert(t, err != nil) // Delete volume err = c.VolumeDelete(volume.Id) tests.Assert(t, err == nil) clusterInfo, err := c.ClusterInfo(cluster.Id) for _, nodeid := range clusterInfo.Nodes { // Get node information nodeInfo, err := c.NodeInfo(nodeid) tests.Assert(t, err == nil) // Change device state to offline sg := utils.NewStatusGroup() for index := range nodeInfo.DevicesInfo { sg.Add(1) go func(i int) { defer sg.Done() sg.Err(c.DeviceState(nodeInfo.DevicesInfo[i].Id, &api.StateRequest{State: api.EntryStateOffline})) }(index) } err = sg.Result() tests.Assert(t, err == nil, err) // Change device state to failed sg = utils.NewStatusGroup() for index := range nodeInfo.DevicesInfo { sg.Add(1) go func(i int) { defer sg.Done() sg.Err(c.DeviceState(nodeInfo.DevicesInfo[i].Id, &api.StateRequest{State: api.EntryStateFailed})) }(index) } err = sg.Result() tests.Assert(t, err == nil, err) // Delete all devices sg = utils.NewStatusGroup() for index := range nodeInfo.DevicesInfo { sg.Add(1) go func(i int) { defer sg.Done() sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id)) }(index) } err = sg.Result() tests.Assert(t, err == nil, err) // Delete node err = c.NodeDelete(nodeid) tests.Assert(t, err == nil) } // Delete cluster err = c.ClusterDelete(cluster.Id) tests.Assert(t, err == nil) }
explode_data.jsonl/18015
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1618 }
[ 2830, 3393, 2959, 18902, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 7032, 65009, 1192, 741, 16867, 2643, 13270, 9791, 692, 197, 322, 4230, 279, 906, 198, 28236, 1669, 2770, 4993, 3848, 7121, 2271, 2164, 9791, 340, 16867, 906, 10421, 282...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetServerAddressByClientCIDRs(t *testing.T) { publicAddressCIDRMap := []metav1.ServerAddressByClientCIDR{ { ClientCIDR: "0.0.0.0/0", ServerAddress: "ExternalAddress", }, } internalAddressCIDRMap := []metav1.ServerAddressByClientCIDR{ publicAddressCIDRMap[0], { ClientCIDR: "10.0.0.0/24", ServerAddress: "serviceIP", }, } internalIP := "10.0.0.1" publicIP := "1.1.1.1" testCases := []struct { Request http.Request ExpectedMap []metav1.ServerAddressByClientCIDR }{ { Request: http.Request{}, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Real-Ip": {internalIP}, }, }, ExpectedMap: internalAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Real-Ip": {publicIP}, }, }, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Forwarded-For": {internalIP}, }, }, ExpectedMap: internalAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Forwarded-For": {publicIP}, }, }, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ RemoteAddr: internalIP, }, ExpectedMap: internalAddressCIDRMap, }, { Request: http.Request{ RemoteAddr: publicIP, }, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ RemoteAddr: "invalidIP", }, ExpectedMap: publicAddressCIDRMap, }, } _, ipRange, _ := netutils.ParseCIDRSloppy("10.0.0.0/24") discoveryAddresses := DefaultAddresses{DefaultAddress: "ExternalAddress"} discoveryAddresses.CIDRRules = append(discoveryAddresses.CIDRRules, CIDRRule{IPRange: *ipRange, Address: "serviceIP"}) for i, test := range testCases { if a, e := discoveryAddresses.ServerAddressByClientCIDRs(utilnet.GetClientIP(&test.Request)), test.ExpectedMap; reflect.DeepEqual(e, a) != true { t.Fatalf("test case %d failed. expected: %v, actual: %v", i+1, e, a) } } }
explode_data.jsonl/48938
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 928 }
[ 2830, 3393, 1949, 5475, 4286, 1359, 2959, 54146, 42327, 1155, 353, 8840, 836, 8, 341, 1219, 4286, 54146, 49, 2227, 1669, 3056, 4059, 402, 16, 22997, 4286, 1359, 2959, 54146, 49, 515, 197, 197, 515, 298, 71724, 54146, 49, 25, 262, 330,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestEnforceSingleMeshRejectsNewMesh(t *testing.T) { assert := tassert.New(t) out := new(bytes.Buffer) store := storage.Init(driver.NewMemory()) if mem, ok := store.Driver.(*driver.Memory); ok { mem.SetNamespace(settings.Namespace()) } config := &helm.Configuration{ Releases: store, KubeClient: &kubefake.PrintingKubeClient{ Out: ioutil.Discard, }, Capabilities: chartutil.DefaultCapabilities, Log: func(format string, v ...interface{}) {}, } fakeClientSet := fake.NewSimpleClientset() labelMap := make(map[string]string) labelMap["meshName"] = defaultMeshName labelMap["app"] = constants.OSMControllerName labelMap["enforceSingleMesh"] = "true" deploymentSpec := &v1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: constants.OSMControllerName, Namespace: settings.Namespace() + "-existing", Labels: labelMap, }, } _, err := fakeClientSet.AppsV1().Deployments(settings.Namespace()+"-existing").Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) assert.Nil(err) install := &installCmd{ out: out, chartPath: testChartPath, meshName: defaultMeshName + "-2", clientSet: fakeClientSet, setOptions: []string{ fmt.Sprintf("OpenServiceMesh.image.registry=%s", testRegistry), fmt.Sprintf("OpenServiceMesh.image.tag=%s", testOsmImageTag), "OpenServiceMesh.image.pullPolicy=IfNotPresent", fmt.Sprintf("OpenServiceMesh.envoyLogLevel=%s", testEnvoyLogLevel), fmt.Sprintf("OpenServiceMesh.controllerLogLevel=%s", testControllerLogLevel), fmt.Sprintf("OpenServiceMesh.prometheus.retention.time=%s", testRetentionTime), "OpenServiceMesh.serviceCertValidityDuration=24h", "OpenServiceMesh.deployGrafana=false", "OpenServiceMesh.deployPrometheus=false", "OpenServiceMesh.enableIngress=true", "OpenServiceMesh.certificateManager=tresor", }, } err = install.run(config) assert.NotNil(err) assert.True(strings.Contains(err.Error(), "Cannot install mesh [osm-2]. Existing mesh [osm] enforces single mesh cluster")) }
explode_data.jsonl/60829
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 757 }
[ 2830, 3393, 1702, 8833, 10888, 14194, 78413, 82, 3564, 14194, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 259, 2207, 7121, 1155, 692, 13967, 1669, 501, 23158, 22622, 340, 57279, 1669, 5819, 26849, 24032, 7121, 10642, 2398, 743, 1833, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContainerHealthConfig(t *testing.T) { testTask := &Task{ Containers: []*apicontainer.Container{ { Name: "c1", HealthCheckType: apicontainer.DockerHealthCheckType, DockerConfig: apicontainer.DockerConfig{ Config: aws.String(`{ "HealthCheck":{ "Test":["command"], "Interval":5000000000, "Timeout":4000000000, "StartPeriod":60000000000, "Retries":5} }`), }, }, }, } config, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) assert.Nil(t, err) require.NotNil(t, config.Healthcheck, "health config was not set in docker config") assert.Equal(t, config.Healthcheck.Test, []string{"command"}) assert.Equal(t, config.Healthcheck.Retries, 5) assert.Equal(t, config.Healthcheck.Interval, 5*time.Second) assert.Equal(t, config.Healthcheck.Timeout, 4*time.Second) assert.Equal(t, config.Healthcheck.StartPeriod, 1*time.Minute) }
explode_data.jsonl/37230
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 408 }
[ 2830, 3393, 4502, 14542, 2648, 1155, 353, 8840, 836, 8, 341, 18185, 6262, 1669, 609, 6262, 515, 197, 197, 74632, 25, 29838, 391, 51160, 1743, 33672, 515, 298, 197, 515, 571, 21297, 25, 310, 330, 66, 16, 756, 571, 197, 14542, 3973, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestJsonBOMEncoder_SetPretty(t *testing.T) { buf := new(bytes.Buffer) encoder := NewBOMEncoder(buf, BOMFileFormatJSON) encoder.SetPretty(true) bom := NewBOM() bom.Metadata = &Metadata{ Authors: &[]OrganizationalContact{ { Name: "authorName", }, }, } require.NoError(t, encoder.Encode(bom)) assert.Equal(t, `{ "bomFormat": "CycloneDX", "specVersion": "1.3", "version": 1, "metadata": { "authors": [ { "name": "authorName" } ] } } `, buf.String()) }
explode_data.jsonl/34211
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 239 }
[ 2830, 3393, 5014, 33, 1898, 19921, 14812, 51940, 1155, 353, 8840, 836, 8, 341, 26398, 1669, 501, 23158, 22622, 340, 197, 27008, 1669, 1532, 33, 1898, 19921, 10731, 11, 425, 1898, 1703, 4061, 5370, 340, 197, 27008, 4202, 51940, 3715, 692...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInvalidChannel(t *testing.T) { // Scenario: node 1 it ordered to send a message on a channel // that doesn't exist, and also receives a message, but // the channel cannot be extracted from the message. t.Run("channel doesn't exist", func(t *testing.T) { node1 := newTestNode(t) defer node1.stop() _, err := node1.c.Remote(testChannel, 0) assert.EqualError(t, err, "channel test doesn't exist") }) t.Run("channel cannot be extracted", func(t *testing.T) { node1 := newTestNode(t) defer node1.stop() node1.c.Configure(testChannel, []cluster.RemoteNode{node1.nodeInfo}) gt := gomega.NewGomegaWithT(t) gt.Eventually(func() (bool, error) { _, err := node1.c.Remote(testChannel, node1.nodeInfo.ID) return true, err }, time.Minute).Should(gomega.BeTrue()) stub, err := node1.c.Remote(testChannel, node1.nodeInfo.ID) assert.NoError(t, err) stream := assertEventualEstablishStream(t, stub) // An empty SubmitRequest has an empty channel which is invalid err = stream.Send(wrapSubmitReq(&orderer.SubmitRequest{})) assert.NoError(t, err) _, err = stream.Recv() assert.EqualError(t, err, "rpc error: code = Unknown desc = badly formatted message, cannot extract channel") // Test directly without going through the gRPC stream err = node1.c.DispatchSubmit(context.Background(), &orderer.SubmitRequest{}) assert.EqualError(t, err, "badly formatted message, cannot extract channel") }) }
explode_data.jsonl/39836
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 507 }
[ 2830, 3393, 7928, 9629, 1155, 353, 8840, 836, 8, 341, 197, 322, 58663, 25, 2436, 220, 16, 432, 11457, 311, 3624, 264, 1943, 389, 264, 5496, 198, 197, 322, 429, 3171, 944, 3000, 11, 323, 1083, 21189, 264, 1943, 11, 714, 198, 197, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTopicSyncHelpers(t *testing.T) { testTopicInSync := TopicInfo{ Partitions: []PartitionInfo{ { Topic: "topic1", ID: 0, Leader: 1, Replicas: []int{1, 2, 5}, ISR: []int{5, 2, 1}, }, { Topic: "topic1", ID: 1, Leader: 2, Replicas: []int{2, 4}, ISR: []int{2, 4}, }, { Topic: "topic1", ID: 2, Leader: 3, Replicas: []int{3, 5}, ISR: []int{5, 3}, }, }, } assert.True(t, testTopicInSync.AllReplicasInSync()) assert.Equal(t, []PartitionInfo{}, testTopicInSync.OutOfSyncPartitions(nil)) assert.True(t, testTopicInSync.AllLeadersCorrect()) assert.Equal(t, []PartitionInfo{}, testTopicInSync.WrongLeaderPartitions(nil)) testTopicOutOfSync := TopicInfo{ Partitions: []PartitionInfo{ { Topic: "topic1", ID: 0, Leader: 1, Replicas: []int{1, 2, 5, 6}, ISR: []int{5, 2, 1}, }, { Topic: "topic1", ID: 1, Leader: 2, Replicas: []int{2, 4}, ISR: []int{2, 4}, }, { Topic: "topic1", ID: 2, Leader: 2, Replicas: []int{3, 2}, ISR: []int{5, 3}, }, }, } assert.False(t, testTopicOutOfSync.AllReplicasInSync()) assert.Equal( t, []PartitionInfo{ testTopicOutOfSync.Partitions[0], testTopicOutOfSync.Partitions[2], }, testTopicOutOfSync.OutOfSyncPartitions(nil), ) assert.Equal( t, []PartitionInfo{ testTopicOutOfSync.Partitions[2], }, testTopicOutOfSync.OutOfSyncPartitions([]int{1, 2, 3}), ) assert.False(t, testTopicOutOfSync.AllLeadersCorrect()) assert.Equal( t, []PartitionInfo{ testTopicOutOfSync.Partitions[2], }, testTopicOutOfSync.WrongLeaderPartitions(nil), ) assert.Equal( t, []PartitionInfo{}, testTopicOutOfSync.WrongLeaderPartitions([]int{1}), ) }
explode_data.jsonl/70031
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 970 }
[ 2830, 3393, 26406, 12154, 28430, 1155, 353, 8840, 836, 8, 341, 18185, 26406, 641, 12154, 1669, 32911, 1731, 515, 197, 197, 5800, 5930, 25, 3056, 49978, 1731, 515, 298, 197, 515, 571, 197, 26406, 25, 262, 330, 16411, 16, 756, 571, 2958...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConn_DisconnectNotify_Close_async(t *testing.T) { done := make(chan struct{}) c := jsonrpc2.NewConn(context.Background(), jsonrpc2.NewBufferedStream(&readWriteCloser{eof, eof}, jsonrpc2.VarintObjectCodec{}), nil) go func() { if err := c.Close(); err != nil && err != jsonrpc2.ErrClosed { t.Error(err) } close(done) }() select { case <-c.DisconnectNotify(): case <-time.After(200 * time.Millisecond): t.Fatal("no disconnect notification") } <-done }
explode_data.jsonl/50186
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 193 }
[ 2830, 3393, 9701, 45525, 6459, 28962, 68185, 28346, 1155, 353, 8840, 836, 8, 341, 40495, 1669, 1281, 35190, 2036, 37790, 1444, 1669, 2951, 29414, 17, 7121, 9701, 5378, 19047, 1507, 2951, 29414, 17, 7121, 4095, 291, 3027, 2099, 878, 7985, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNeighborCloseTwice(t *testing.T) { a, _, teardown := newPipe() defer teardown() n := newTestNeighbor("A", a) n.Listen() require.NoError(t, n.Close()) require.NoError(t, n.Close()) }
explode_data.jsonl/20516
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 84 }
[ 2830, 3393, 88109, 7925, 22816, 558, 1155, 353, 8840, 836, 8, 341, 11323, 11, 8358, 49304, 1669, 501, 34077, 741, 16867, 49304, 2822, 9038, 1669, 501, 2271, 88109, 445, 32, 497, 264, 340, 9038, 68334, 741, 17957, 35699, 1155, 11, 308, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestService_GetRepos(t *testing.T) { repository := new(repoMock.Repository) helm := new(helmMock.Helm) stringifiedRepos := "[{\"name\":\"stable\",\"url\":\"https://chart.stable.com\"}]" repository.On("Get", "repos").Return(stringifiedRepos).Once() svc := service.NewService(helm, repository) charts := svc.GetRepos() expectedCharts := []model.Repo{ { Name: "stable", URL: "https://chart.stable.com", }, } assert.Equal(t, expectedCharts, charts) }
explode_data.jsonl/15362
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 187 }
[ 2830, 3393, 1860, 13614, 693, 966, 1155, 353, 8840, 836, 8, 341, 17200, 3099, 1669, 501, 50608, 11571, 25170, 340, 9598, 23162, 1669, 501, 3203, 23162, 11571, 3839, 23162, 340, 11357, 1870, 693, 966, 1669, 10545, 64238, 606, 23488, 27992,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUpdatePodOrphanWithNewLabels(t *testing.T) { for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } ds1 := newDaemonSet("foo1") ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy manager.dsStore.Add(ds1) manager.dsStore.Add(ds2) pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) prev := *pod prev.Labels = map[string]string{"foo2": "bar2"} bumpResourceVersion(pod) manager.updatePod(&prev, pod) if got, want := manager.queue.Len(), 2; got != want { t.Fatalf("queue.Len() = %v, want %v", got, want) } if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { t.Errorf("getQueuedKeys() = %v, want %v", got, want) } } }
explode_data.jsonl/50357
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 368 }
[ 2830, 3393, 4289, 23527, 2195, 9943, 2354, 3564, 23674, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 8282, 1669, 2088, 2647, 2580, 69388, 368, 341, 197, 92272, 11, 8358, 8358, 1848, 1669, 501, 2271, 2051, 741, 197, 743, 1848, 961, 2092, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestMetricCountGatekeeping(t *testing.T) { m := createMetricsForTesting() // Gather All Metrics metricFamilies, err := m.Registry.Gather() assert.NoError(t, err, "gather metics") // Summarize By Adapter Cardinality // - This requires metrics to be preloaded. We don't preload account metrics, so we can't test those. generalCardinalityCount := 0 adapterCardinalityCount := 0 for _, metricFamily := range metricFamilies { for _, metric := range metricFamily.GetMetric() { isPerAdapter := false for _, label := range metric.GetLabel() { if label.GetName() == adapterLabel { isPerAdapter = true } } if isPerAdapter { adapterCardinalityCount++ } else { generalCardinalityCount++ } } } // Calculate Per-Adapter Cardinality adapterCount := len(openrtb_ext.BidderList()) perAdapterCardinalityCount := adapterCardinalityCount / adapterCount // Verify General Cardinality // - This assertion provides a warning for newly added high-cardinality non-adapter specific metrics. The hardcoded limit // is an arbitrary soft ceiling. Thought should be given as to the value of the new metrics if you find yourself // needing to increase this number. assert.True(t, generalCardinalityCount <= 500, "General Cardinality") // Verify Per-Adapter Cardinality // - This assertion provides a warning for newly added adapter metrics. Threre are 40+ adapters which makes the // cost of new per-adapter metrics rather expensive. Thought should be given when adding new per-adapter metrics. assert.True(t, perAdapterCardinalityCount <= 22, "Per-Adapter Cardinality") }
explode_data.jsonl/13940
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 497 }
[ 2830, 3393, 54310, 2507, 42318, 32394, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 1855, 27328, 2461, 16451, 2822, 197, 322, 48995, 2009, 54190, 198, 2109, 16340, 37, 58904, 11, 1848, 1669, 296, 89142, 1224, 1856, 741, 6948, 35699, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestServiceInstanceValidate(t *testing.T) { cases := []struct { name string instance *ServiceInstance valid bool }{ { name: "nil service", instance: &ServiceInstance{ Labels: Labels{}, Endpoint: endpoint1, }, }, { name: "bad label", instance: &ServiceInstance{ Service: service1, Labels: Labels{"*": "-"}, Endpoint: endpoint1, }, }, { name: "invalid service", instance: &ServiceInstance{ Service: &Service{}, }, }, { name: "invalid endpoint port and service port", instance: &ServiceInstance{ Service: service1, Endpoint: NetworkEndpoint{ Address: "192.168.1.2", Port: -80, }, }, }, { name: "endpoint missing service port", instance: &ServiceInstance{ Service: service1, Endpoint: NetworkEndpoint{ Address: "192.168.1.2", Port: service1.Ports[1].Port, ServicePort: &Port{ Name: service1.Ports[1].Name + "-extra", Port: service1.Ports[1].Port, Protocol: service1.Ports[1].Protocol, }, }, }, }, { name: "endpoint port and protocol mismatch", instance: &ServiceInstance{ Service: service1, Endpoint: NetworkEndpoint{ Address: "192.168.1.2", Port: service1.Ports[1].Port, ServicePort: &Port{ Name: "http", Port: service1.Ports[1].Port + 1, Protocol: ProtocolGRPC, }, }, }, }, } for _, c := range cases { t.Log("running case " + c.name) if got := c.instance.Validate(); (got == nil) != c.valid { t.Errorf("%s failed: got valid=%v but wanted valid=%v: %v", c.name, got == nil, c.valid, got) } } }
explode_data.jsonl/56888
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 791 }
[ 2830, 3393, 1860, 2523, 17926, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 56256, 353, 1860, 2523, 198, 197, 56322, 262, 1807, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPrincipalBad(t *testing.T) { msp1, err := setup("testdata/idemix/MSP1OU1", "MSP1OU1") assert.NoError(t, err) id1, err := getDefaultSigner(msp1) assert.NoError(t, err) principal := &msp.MSPPrincipal{ PrincipalClassification: 1234, Principal: nil} err = id1.SatisfiesPrincipal(principal) assert.Error(t, err, "Principal with bad Classification should fail") assert.Contains(t, err.Error(), "invalid principal type") }
explode_data.jsonl/46052
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 185 }
[ 2830, 3393, 31771, 17082, 1155, 353, 8840, 836, 8, 341, 47691, 79, 16, 11, 1848, 1669, 6505, 445, 92425, 38146, 336, 941, 10270, 4592, 16, 11922, 16, 497, 330, 44, 4592, 16, 11922, 16, 1138, 6948, 35699, 1155, 11, 1848, 692, 15710, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_skipReplicaCountWithAutoscaleEnabled(t *testing.T) { const valuesWithHPAndReplicaCountFormat = ` values: pilot: autoscaleEnabled: %t gateways: istio-ingressgateway: autoscaleEnabled: %t istio-egressgateway: autoscaleEnabled: %t components: pilot: k8s: replicaCount: 2 ingressGateways: - name: istio-ingressgateway enabled: true k8s: replicaCount: 2 egressGateways: - name: istio-egressgateway enabled: true k8s: replicaCount: 2 ` cases := []struct { name string component name.ComponentName values string expectSkip bool }{ { name: "hpa enabled for pilot without replicas", component: name.PilotComponentName, values: fmt.Sprintf(valuesWithHPAndReplicaCountFormat, false, false, false), expectSkip: false, }, { name: "hpa enabled for ingressgateway without replica", component: name.IngressComponentName, values: fmt.Sprintf(valuesWithHPAndReplicaCountFormat, false, false, false), expectSkip: false, }, { name: "hpa enabled for pilot without replicas", component: name.EgressComponentName, values: fmt.Sprintf(valuesWithHPAndReplicaCountFormat, false, false, false), expectSkip: false, }, { name: "hpa enabled for pilot with replicas", component: name.PilotComponentName, values: fmt.Sprintf(valuesWithHPAndReplicaCountFormat, true, false, false), expectSkip: true, }, { name: "hpa enabled for ingressgateway with replicass", component: name.IngressComponentName, values: fmt.Sprintf(valuesWithHPAndReplicaCountFormat, false, true, false), expectSkip: true, }, { name: "hpa enabled for egressgateway with replicas", component: name.EgressComponentName, values: fmt.Sprintf(valuesWithHPAndReplicaCountFormat, true, false, true), expectSkip: true, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { var iop *v1alpha1.IstioOperatorSpec if tt.values != "" { iop = &v1alpha1.IstioOperatorSpec{} if err := util.UnmarshalWithJSONPB(tt.values, iop, true); err != nil { t.Fatal(err) } } got := skipReplicaCountWithAutoscaleEnabled(iop, tt.component) assert.Equal(t, tt.expectSkip, got) }) } }
explode_data.jsonl/34683
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 994 }
[ 2830, 3393, 44830, 18327, 15317, 2507, 2354, 19602, 436, 2246, 5462, 1155, 353, 8840, 836, 8, 341, 4777, 2750, 2354, 6610, 3036, 18327, 15317, 2507, 4061, 284, 22074, 3661, 510, 220, 17708, 510, 262, 46872, 2246, 5462, 25, 1018, 83, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestAPIListGitHooksNoAccess(t *testing.T) { defer prepareTestEnv(t)() repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository) owner := models.AssertExistsAndLoadBean(t, &models.User{ID: repo.OwnerID}).(*models.User) session := loginUser(t, owner.Name) token := getTokenForLoggedInUser(t, session) req := NewRequestf(t, "GET", "/api/v1/repos/%s/%s/hooks/git?token=%s", owner.Name, repo.Name, token) MakeRequest(t, req, http.StatusForbidden) }
explode_data.jsonl/32981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 198 }
[ 2830, 3393, 7082, 852, 46562, 67769, 2753, 6054, 1155, 353, 8840, 836, 8, 341, 16867, 10549, 2271, 14359, 1155, 8, 2822, 17200, 5368, 1669, 4119, 11711, 15575, 3036, 5879, 10437, 1155, 11, 609, 6507, 25170, 90, 915, 25, 220, 16, 16630, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReversiAnz31(t *testing.T) { r := NewReversiAnz() r.SetOwnEdgeSideOneCnt(1) if r.GetOwnEdgeSideOneCnt() != 1 { t.Errorf("NG") } }
explode_data.jsonl/23054
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 72 }
[ 2830, 3393, 693, 3004, 72, 2082, 89, 18, 16, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 693, 3004, 72, 2082, 89, 741, 7000, 4202, 14182, 11656, 16384, 3966, 33747, 7, 16, 340, 743, 435, 2234, 14182, 11656, 16384, 3966, 33747, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestPeersConfig(t *testing.T) { endpointConfig, err := ConfigFromBackend(configBackend) if err != nil { t.Fatal("Failed to get endpoint config from backend") } pc, ok := endpointConfig.PeersConfig(org2) assert.True(t, ok) for _, value := range pc { if value.URL == "" { t.Fatal("Url value for the host is empty") } } pc, ok = endpointConfig.PeersConfig(org1) assert.True(t, ok) for _, value := range pc { if value.URL == "" { t.Fatal("Url value for the host is empty") } } }
explode_data.jsonl/34080
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 199 }
[ 2830, 3393, 10197, 388, 2648, 1155, 353, 8840, 836, 8, 1476, 6246, 2768, 2648, 11, 1848, 1669, 5532, 3830, 29699, 8754, 29699, 340, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 445, 9408, 311, 633, 14887, 2193, 504, 19163, 1138, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestGather_host(t *testing.T) { s := &Snmp{ Agents: []string{"TestGather"}, Name: "mytable", Fields: []Field{ { Name: "host", Oid: ".1.0.0.1.1", IsTag: true, }, { Name: "myfield2", Oid: ".1.0.0.1.2", }, }, connectionCache: []snmpConnection{ tsc, }, } acc := &testutil.Accumulator{} require.NoError(t, s.Gather(acc)) require.Len(t, acc.Metrics, 1) m := acc.Metrics[0] require.Equal(t, "baz", m.Tags["host"]) }
explode_data.jsonl/50795
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 255 }
[ 2830, 3393, 38, 1856, 12848, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 609, 20720, 1307, 515, 197, 197, 91804, 25, 3056, 917, 4913, 2271, 38, 1856, 7115, 197, 21297, 25, 256, 330, 2408, 2005, 756, 197, 197, 8941, 25, 3056, 1877, 515...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEndpoint(t *testing.T) { network, err := CreateTestNetwork() if err != nil { t.Error(err) } Endpoint := &hcsshim.HNSEndpoint{ Name: NatTestEndpointName, } Endpoint, err = network.CreateEndpoint(Endpoint) if err != nil { t.Error(err) } err = Endpoint.HostAttach(1) if err != nil { t.Error(err) } err = Endpoint.HostDetach() if err != nil { t.Error(err) } _, err = Endpoint.Delete() if err != nil { t.Error(err) } _, err = network.Delete() if err != nil { t.Error(err) } }
explode_data.jsonl/7693
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 228 }
[ 2830, 3393, 27380, 1155, 353, 8840, 836, 8, 1476, 9038, 2349, 11, 1848, 1669, 4230, 2271, 12320, 741, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, 197, 630, 197, 27380, 1669, 609, 71, 5143, 39517, 3839, 71438, 303, 2768, 515...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestGetPageInfo(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) ctx = OrderBy(ctx, "test", ASC) ctx = StartPage(ctx, 1, 10) pageInfo := GetPageInfo(ctx) pageInfo.total = 1001 t.Log( "pageNum: ", pageInfo.GetPageNum(), "totalPage: ", pageInfo.GetTotalPage(), "pageSize: ", pageInfo.GetPageSize(), "total: ", pageInfo.GetTotal()) if pageInfo.GetTotalPage() != 101 { t.Fail() } }
explode_data.jsonl/64349
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 170 }
[ 2830, 3393, 1949, 2665, 1731, 1155, 353, 8840, 836, 8, 341, 20985, 11, 716, 1669, 2266, 26124, 7636, 5378, 19047, 1507, 220, 17, 77053, 32435, 340, 20985, 284, 7217, 1359, 7502, 11, 330, 1944, 497, 19796, 340, 20985, 284, 5145, 2665, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMonotonicString(t *testing.T) { t1 := Now() t.Logf("Now() = %v", t1) for _, tt := range monotonicStringTests { t1 := Now() SetMono(&t1, tt.mono) s := t1.String() got := s[strings.LastIndex(s, " ")+1:] if got != tt.want { t.Errorf("with mono=%d: got %q; want %q", tt.mono, got, tt.want) } } }
explode_data.jsonl/48367
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 162 }
[ 2830, 3393, 11095, 354, 14011, 703, 1155, 353, 8840, 836, 8, 341, 3244, 16, 1669, 4695, 741, 3244, 98954, 445, 7039, 368, 284, 1018, 85, 497, 259, 16, 692, 2023, 8358, 17853, 1669, 2088, 77216, 14011, 703, 18200, 341, 197, 3244, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestInvalidateAllEmailInvites(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() t1 := model.Token{ Token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", CreateAt: model.GetMillis(), Type: TOKEN_TYPE_GUEST_INVITATION, Extra: "", } err := th.App.Srv.Store.Token().Save(&t1) require.Nil(t, err) t2 := model.Token{ Token: "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy", CreateAt: model.GetMillis(), Type: TOKEN_TYPE_TEAM_INVITATION, Extra: "", } err = th.App.Srv.Store.Token().Save(&t2) require.Nil(t, err) t3 := model.Token{ Token: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz", CreateAt: model.GetMillis(), Type: "other", Extra: "", } err = th.App.Srv.Store.Token().Save(&t3) require.Nil(t, err) err = th.App.InvalidateAllEmailInvites() require.Nil(t, err) _, err = th.App.Srv.Store.Token().GetByToken(t1.Token) require.NotNil(t, err) _, err = th.App.Srv.Store.Token().GetByToken(t2.Token) require.NotNil(t, err) _, err = th.App.Srv.Store.Token().GetByToken(t3.Token) require.Nil(t, err) }
explode_data.jsonl/30281
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 537 }
[ 2830, 3393, 641, 7067, 2403, 4781, 15174, 3611, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 2822, 3244, 16, 1669, 1614, 32277, 515, 197, 33299, 25, 262, 330, 44102, 44102, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRetriesNew_As(t *testing.T) { err := NewRetriesResult(NewResult(404, "this is an example error, %s", "yep"), 5, time.Now(), nil) var event *RetriesResult if !protocol.ResultAs(err, &event) { t.Errorf("Expected error to be a NewRetriesResult, is not") } if event.Retries != 5 { t.Errorf("Mismatched retries") } }
explode_data.jsonl/66500
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 12020, 4019, 3564, 62741, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 1532, 12020, 4019, 2077, 35063, 2077, 7, 19, 15, 19, 11, 330, 574, 374, 458, 3110, 1465, 11, 1018, 82, 497, 330, 88, 747, 3975, 220, 20, 11, 882, 1324...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetRepairRecord(t *testing.T) { taskIdArgs := &TaskIdArgs{ TaskId: BBC_TestTaskId, } res, err := BBC_CLIENT.GetRepairTaskRecord(taskIdArgs) fmt.Println(res) ExpectEqual(t.Errorf, err, nil) }
explode_data.jsonl/4083
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 89 }
[ 2830, 3393, 1949, 98386, 6471, 1155, 353, 8840, 836, 8, 341, 49115, 764, 4117, 1669, 609, 6262, 764, 4117, 515, 197, 81153, 764, 25, 18096, 32541, 6262, 764, 345, 197, 532, 10202, 11, 1848, 1669, 18096, 22521, 2234, 98386, 6262, 6471, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestAttachClusterOptions_ValidateWithClient(t *testing.T) { s := scheme.Scheme cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "myName", Namespace: "myNamespace", Labels: map[string]string{ "ocm-configmap-type": "image-manifest", "ocm-release-version": "2.3.0", }, }, Data: map[string]string{}, } kubeClient := fakekubernetes.NewSimpleClientset(cm) dynamicClient := fakedynamic.NewSimpleDynamicClient(s) type fields struct { values map[string]interface{} clusterName string clusterServer string clusterToken string clusterKubeConfig string } tests := []struct { name string fields fields wantErr bool }{ { name: "Success local-cluster, all info in values", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "local-cluster", }, }, }, wantErr: false, }, { name: "Failed local-cluster, cluster name empty", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "", }, }, }, wantErr: true, }, { name: "Failed local-cluster, cluster name missing", fields: fields{ values: map[string]interface{}{}, }, wantErr: true, }, { name: "Success non-local-cluster, overrite cluster-name with local-cluster", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "test-cluster", }, }, clusterName: "local-cluster", }, wantErr: false, }, { name: "Failed non-local-cluster, missing kubeconfig or token/server", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "cluster-test", }, }, }, wantErr: true, }, { name: "Success non-local-cluster, with kubeconfig", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "cluster-test", }, }, clusterKubeConfig: "fake-config", }, wantErr: false, }, { name: "Success non-local-cluster, with token/server", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "cluster-test", }, }, clusterToken: "fake-token", clusterServer: "fake-server", }, wantErr: false, }, { name: "Failed non-local-cluster, with token no server", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "cluster-test", }, }, clusterToken: "fake-token", }, wantErr: true, }, { name: "Failed non-local-cluster, with server no token", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "cluster-test", }, }, clusterServer: "fake-server", }, wantErr: true, }, { name: "Failed non-local-cluster, with kubeconfig and token/server", fields: fields{ values: map[string]interface{}{ "managedCluster": map[string]interface{}{ "name": "cluster-test", }, }, clusterKubeConfig: "fake-config", clusterToken: "fake-token", clusterServer: "fake-server", }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { o := &Options{ values: tt.fields.values, clusterName: tt.fields.clusterName, clusterServer: tt.fields.clusterServer, clusterToken: tt.fields.clusterToken, clusterKubeConfig: tt.fields.clusterKubeConfig, } if err := o.validateWithClient(kubeClient, dynamicClient); (err != nil) != tt.wantErr { t.Errorf("AttachClusterOptions.Validate() error = %v, wantErr %v", err, tt.wantErr) } }) } }
explode_data.jsonl/35501
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1779 }
[ 2830, 3393, 30485, 28678, 3798, 62, 17926, 2354, 2959, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 12859, 92719, 198, 98316, 1669, 609, 98645, 16, 10753, 2227, 515, 197, 23816, 12175, 25, 77520, 16, 80222, 515, 298, 21297, 25, 414, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEventTimeout(t *testing.T) { // Test whether a WorkflowTimedOut event is emitted in case of timeout controller := newController() wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("") wf := unmarshalWF(timeout) wf, err := wfcset.Create(wf) assert.NoError(t, err) woc := newWorkflowOperationCtx(wf, controller) woc.operate() makePodsRunning(t, controller.kubeclientset, wf.ObjectMeta.Namespace) wf, err = wfcset.Get(wf.ObjectMeta.Name, metav1.GetOptions{}) assert.NoError(t, err) woc = newWorkflowOperationCtx(wf, controller) time.Sleep(10 * time.Second) woc.operate() events, err := controller.kubeclientset.CoreV1().Events("").List(metav1.ListOptions{}) assert.NoError(t, err) assert.Equal(t, 2, len(events.Items)) runningEvent := events.Items[0] assert.Equal(t, "WorkflowRunning", runningEvent.Reason) timeoutEvent := events.Items[1] assert.Equal(t, "WorkflowTimedOut", timeoutEvent.Reason) assert.True(t, strings.HasPrefix(timeoutEvent.Message, "timeout-template error in entry template execution: Deadline exceeded")) }
explode_data.jsonl/54393
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 386 }
[ 2830, 3393, 1556, 7636, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 3425, 264, 60173, 20217, 291, 2662, 1538, 374, 46942, 304, 1142, 315, 9632, 198, 61615, 1669, 501, 2051, 741, 6692, 8316, 746, 1669, 6461, 1418, 69, 2972, 746, 1897...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMutableEmpty(t *testing.T) { t.Parallel() // Ensure the treap length is the expected value. testTreap := NewMutable() if gotLen := testTreap.Len(); gotLen != 0 { t.Fatalf("Len: unexpected length - got %d, want %d", gotLen, 0) } // Ensure the reported size is 0. if gotSize := testTreap.Size(); gotSize != 0 { t.Fatalf("Size: unexpected byte size - got %d, want 0", gotSize) } // Ensure there are no errors with requesting keys from an empty treap. key := serializeUint32(0) if gotVal := testTreap.Has(key); gotVal { t.Fatalf("Has: unexpected result - got %v, want false", gotVal) } if gotVal := testTreap.Get(key); gotVal != nil { t.Fatalf("Get: unexpected result - got %x, want nil", gotVal) } // Ensure there are no panics when deleting keys from an empty treap. testTreap.Delete(key) // Ensure the number of keys iterated by ForEach on an empty treap is // zero. var numIterated int testTreap.ForEach(func(k, v []byte) bool { numIterated++ return true }) if numIterated != 0 { t.Fatalf("ForEach: unexpected iterate count - got %d, want 0", numIterated) } }
explode_data.jsonl/70542
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 402 }
[ 2830, 3393, 11217, 3522, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 322, 29279, 279, 4258, 391, 3084, 374, 279, 3601, 897, 624, 18185, 65775, 391, 1669, 1532, 11217, 741, 743, 2684, 11271, 1669, 1273, 65775, 391, 65819,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProcessAssociationUnableToParseAssociation(t *testing.T) { processor := createProcessor() svcMock := service.NewMockDefault() assocRawData := createAssociationRawData() output := ssm.UpdateInstanceAssociationStatusOutput{} sys = &systemStub{} complianceUploader := complianceUploader.NewMockDefault() parserMock := parserMock{} // Arrange processor.assocSvc = svcMock assocParser = &parserMock processor.complianceUploader = complianceUploader // Mock service mockService(svcMock, assocRawData, &output) // Mock processor processorMock := &processormock.MockedProcessor{} processor.proc = processorMock ch := make(chan contracts.DocumentResult) processorMock.On("Start").Return(ch, nil) processorMock.On("InitialProcessing").Return(nil) complianceUploader.On("CreateNewServiceIfUnHealthy", mock.AnythingOfType("*log.Mock")) // Act processor.InitializeAssociationProcessor() processor.ProcessAssociation() close(ch) // Assert assert.True(t, svcMock.AssertNumberOfCalls(t, "CreateNewServiceIfUnHealthy", 1)) assert.True(t, svcMock.AssertNumberOfCalls(t, "ListInstanceAssociations", 1)) assert.True(t, svcMock.AssertNumberOfCalls(t, "LoadAssociationDetail", 1)) }
explode_data.jsonl/74693
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 403 }
[ 2830, 3393, 7423, 63461, 17075, 1249, 14463, 63461, 1155, 353, 8840, 836, 8, 341, 197, 29474, 1669, 1855, 22946, 741, 1903, 7362, 11571, 1669, 2473, 7121, 11571, 3675, 741, 197, 46877, 20015, 1043, 1669, 1855, 63461, 20015, 1043, 741, 211...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWritersACLFail(t *testing.T) { //skip pending FAB-2457 fix t.Skip() chainID := util.GetTestChainID() var ctxt = context.Background() url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01" chaincodeID := &pb.ChaincodeID{Path: url, Name: "ex01-fail", Version: "0"} defer deleteChaincodeOnDisk("ex01-fail.0") args := []string{"10"} f := "init" argsDeploy := util.ToChaincodeArgs(f, "a", "100", "b", "200") spec := &pb.ChaincodeSpec{Type: 1, ChaincodeId: chaincodeID, Input: &pb.ChaincodeInput{Args: argsDeploy}} cccid := ccprovider.NewCCContext(chainID, "ex01-fail", "0", "", false, nil, nil) resp, prop, err := deploy(endorserServer, chainID, spec, nil) chaincodeID1 := spec.ChaincodeId.Name if err != nil { t.Fail() t.Logf("Error deploying <%s>: %s", chaincodeID1, err) chaincode.GetChain().Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: chaincodeID}}) return } var nextBlockNumber uint64 = 3 // The tests that ran before this test created blocks 0-2 err = endorserServer.(*Endorser).commitTxSimulation(prop, chainID, signer, resp, nextBlockNumber) if err != nil { t.Fail() t.Logf("Error committing deploy <%s>: %s", chaincodeID1, err) chaincode.GetChain().Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: chaincodeID}}) return } // here we inject a reject policy for writers // to simulate the scenario in which the invoker // is not authorized to issue this proposal rejectpolicy := &mockpolicies.Policy{ Err: errors.New("The creator of this proposal does not fulfil the writers policy of this chain"), } pm := peer.GetPolicyManager(chainID) pm.(*mockpolicies.Manager).PolicyMap = map[string]policies.Policy{policies.ChannelApplicationWriters: rejectpolicy} f = "invoke" invokeArgs := append([]string{f}, args...) spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: chaincodeID, Input: &pb.ChaincodeInput{Args: util.ToChaincodeArgs(invokeArgs...)}} prop, resp, _, _, err = invoke(chainID, spec) if err == nil { t.Fail() t.Logf("Invocation should have failed") chaincode.GetChain().Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: chaincodeID}}) return } fmt.Println("TestWritersACLFail passed") t.Logf("TestWritersACLFail passed") chaincode.GetChain().Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: chaincodeID}}) }
explode_data.jsonl/27805
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 893 }
[ 2830, 3393, 54, 31829, 55393, 19524, 1155, 353, 8840, 836, 8, 341, 197, 322, 20599, 15280, 434, 1867, 12, 17, 19, 20, 22, 5046, 198, 3244, 57776, 741, 197, 8819, 915, 1669, 4094, 2234, 2271, 18837, 915, 741, 2405, 59162, 284, 2266, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_MergeL3(t *testing.T) { identityCache := cache.IdentityCache{ identity.NumericIdentity(identityFoo): labelsFoo, identity.NumericIdentity(identityBar): labelsBar, } selectorCache := NewSelectorCache(identityCache) tests := []struct { test int rules api.Rules result MapState }{ {0, api.Rules{rule__L3AllowFoo, rule__L3AllowBar}, MapState{mapKeyAllowFoo__: mapEntryL7None_, mapKeyAllowBar__: mapEntryL7None_}}, {1, api.Rules{rule__L3AllowFoo, ruleL3L4__Allow}, MapState{mapKeyAllowFoo__: mapEntryL7None_, mapKeyAllowFooL4: mapEntryL7None_}}, } for _, tt := range tests { repo := newPolicyDistillery(selectorCache) for _, r := range tt.rules { if r != nil { rule := r.WithEndpointSelector(selectFoo_) _, _ = repo.AddList(api.Rules{rule}) } } t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) { logBuffer := new(bytes.Buffer) repo = repo.WithLogBuffer(logBuffer) mapstate, err := repo.distillPolicy(labelsFoo) if err != nil { t.Errorf("Policy resolution failure: %s", err) } if equal, err := checker.DeepEqual(mapstate, tt.result); !equal { t.Logf("Rules:\n%s\n\n", tt.rules.String()) t.Logf("Policy Trace: \n%s\n", logBuffer.String()) t.Errorf("Policy obtained didn't match expected for endpoint %s:\n%s", labelsFoo, err) } }) } }
explode_data.jsonl/50023
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 569 }
[ 2830, 3393, 1245, 10080, 43, 18, 1155, 353, 8840, 836, 8, 341, 197, 16912, 8233, 1669, 6500, 24423, 8233, 515, 197, 197, 16912, 2067, 12572, 18558, 86843, 40923, 1648, 9201, 40923, 345, 197, 197, 16912, 2067, 12572, 18558, 86843, 3428, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestIteratorsAndNextResultOrderA(t *testing.T) { ts := MakeTestingMemstore() fixed := ts.MakeFixed() fixed.AddValue(ts.GetIdFor("C")) all := ts.GetNodesAllIterator() lto := graph.NewLinksToIterator(ts, all, "o") innerAnd := graph.NewAndIterator() fixed2 := ts.MakeFixed() fixed2.AddValue(ts.GetIdFor("follows")) lto2 := graph.NewLinksToIterator(ts, fixed2, "p") innerAnd.AddSubIterator(lto2) innerAnd.AddSubIterator(lto) hasa := graph.NewHasaIterator(ts, innerAnd, "s") outerAnd := graph.NewAndIterator() outerAnd.AddSubIterator(fixed) outerAnd.AddSubIterator(hasa) val, ok := outerAnd.Next() if !ok { t.Error("Expected one matching subtree") } if ts.GetNameFor(val) != "C" { t.Errorf("Matching subtree should be %s, got %s", "barak", ts.GetNameFor(val)) } expected := make([]string, 2) expected[0] = "B" expected[1] = "D" actualOut := make([]string, 2) actualOut[0] = ts.GetNameFor(all.LastResult()) nresultOk := outerAnd.NextResult() if !nresultOk { t.Error("Expected two results got one") } actualOut[1] = ts.GetNameFor(all.LastResult()) nresultOk = outerAnd.NextResult() if nresultOk { t.Error("Expected two results got three") } CompareStringSlices(t, expected, actualOut) val, ok = outerAnd.Next() if ok { t.Error("More than one possible top level output?") } }
explode_data.jsonl/76228
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 511 }
[ 2830, 3393, 8537, 2973, 3036, 5847, 2077, 4431, 32, 1155, 353, 8840, 836, 8, 341, 57441, 1669, 7405, 16451, 18816, 4314, 741, 1166, 3286, 1669, 10591, 50133, 13520, 741, 1166, 3286, 1904, 1130, 35864, 2234, 764, 2461, 445, 34, 5455, 509...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestAkimaSlopesErrors(t *testing.T) { t.Parallel() for _, test := range []struct { xs, ys []float64 }{ { xs: []float64{0, 1, 2}, ys: []float64{10, 20}, }, { xs: []float64{0, 1}, ys: []float64{10, 20, 30}, }, { xs: []float64{0, 2}, ys: []float64{0, 1}, }, { xs: []float64{0, 1, 1}, ys: []float64{10, 20, 10}, }, { xs: []float64{0, 2, 1}, ys: []float64{10, 20, 10}, }, { xs: []float64{0, 0}, ys: []float64{-1, 2}, }, { xs: []float64{0, -1}, ys: []float64{-1, 2}, }, } { if !panics(func() { akimaSlopes(test.xs, test.ys) }) { t.Errorf("expected panic for xs: %v and ys: %v", test.xs, test.ys) } } }
explode_data.jsonl/44086
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 385 }
[ 2830, 3393, 55802, 7523, 50, 385, 20352, 13877, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 2023, 8358, 1273, 1669, 2088, 3056, 1235, 341, 197, 10225, 82, 11, 31810, 3056, 3649, 21, 19, 198, 197, 59403, 197, 197, 515, 298, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQueryInfo(t *testing.T) { session := createSession(t) defer session.Close() conn := getRandomConn(t, session) info, err := conn.prepareStatement(context.Background(), "SELECT release_version, host_id FROM system.local WHERE key = ?", nil) if err != nil { t.Fatalf("Failed to execute query for preparing statement: %v", err) } if x := len(info.request.columns); x != 1 { t.Fatalf("Was not expecting meta data for %d query arguments, but got %d\n", 1, x) } if session.cfg.ProtoVersion > 1 { if x := len(info.response.columns); x != 2 { t.Fatalf("Was not expecting meta data for %d result columns, but got %d\n", 2, x) } } }
explode_data.jsonl/11160
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 232 }
[ 2830, 3393, 2859, 1731, 1155, 353, 8840, 836, 8, 341, 25054, 1669, 1855, 5283, 1155, 340, 16867, 3797, 10421, 2822, 32917, 1669, 52436, 9701, 1155, 11, 3797, 340, 27043, 11, 1848, 1669, 4534, 29704, 5378, 19047, 1507, 330, 4858, 4879, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestHistory(t *testing.T) { options := server.DefaultOptions().WithAuth(true) bs := servertest.NewBufconnServer(options) bs.Start() defer bs.Stop() defer os.RemoveAll(options.Dir) defer os.Remove(".state-") tkf := cmdtest.RandString() ts := tokenservice.NewFileTokenService().WithTokenFileName(tkf) ic := test.NewClientTest(&test.PasswordReader{ Pass: []string{"immudb"}, }, ts) ic.Connect(bs.Dialer) ic.Login("immudb") cli := new(cli) cli.immucl = ic.Imc msg, err := cli.history([]string{"key"}) if err != nil { t.Fatal("History fail", err) } if !strings.Contains(msg, "key not found") { t.Fatalf("History fail %s", msg) } _, err = cli.set([]string{"key", "value"}) if err != nil { t.Fatal("History fail", err) } msg, err = cli.history([]string{"key"}) if err != nil { t.Fatal("History fail", err) } if !strings.Contains(msg, "hash") { t.Fatalf("History fail %s", msg) } }
explode_data.jsonl/39499
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 378 }
[ 2830, 3393, 13424, 1155, 353, 8840, 836, 8, 341, 35500, 1669, 3538, 13275, 3798, 1005, 2354, 5087, 3715, 340, 93801, 1669, 1420, 1621, 477, 7121, 15064, 5148, 5475, 12078, 692, 93801, 12101, 741, 16867, 17065, 30213, 2822, 16867, 2643, 84...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestAddChannelMember(t *testing.T) { th := Setup().InitBasic().InitSystemAdmin() defer th.TearDown() Client := th.Client user := th.BasicUser user2 := th.BasicUser2 team := th.BasicTeam publicChannel := th.CreatePublicChannel() privateChannel := th.CreatePrivateChannel() user3 := th.CreateUserWithClient(th.SystemAdminClient) _, resp := th.SystemAdminClient.AddTeamMember(team.Id, user3.Id) CheckNoError(t, resp) cm, resp := Client.AddChannelMember(publicChannel.Id, user2.Id) CheckNoError(t, resp) CheckCreatedStatus(t, resp) if cm.ChannelId != publicChannel.Id { t.Fatal("should have returned exact channel") } if cm.UserId != user2.Id { t.Fatal("should have returned exact user added to public channel") } cm, resp = Client.AddChannelMember(privateChannel.Id, user2.Id) CheckNoError(t, resp) if cm.ChannelId != privateChannel.Id { t.Fatal("should have returned exact channel") } if cm.UserId != user2.Id { t.Fatal("should have returned exact user added to private channel") } post := &model.Post{ChannelId: publicChannel.Id, Message: "a" + GenerateTestId() + "a"} rpost, err := Client.CreatePost(post) if err == nil { t.Fatal("should have created a post") } Client.RemoveUserFromChannel(publicChannel.Id, user.Id) _, resp = Client.AddChannelMemberWithRootId(publicChannel.Id, user.Id, rpost.Id) CheckNoError(t, resp) CheckCreatedStatus(t, resp) Client.RemoveUserFromChannel(publicChannel.Id, user.Id) _, resp = Client.AddChannelMemberWithRootId(publicChannel.Id, user.Id, "junk") CheckBadRequestStatus(t, resp) _, resp = Client.AddChannelMemberWithRootId(publicChannel.Id, user.Id, GenerateTestId()) CheckNotFoundStatus(t, resp) Client.RemoveUserFromChannel(publicChannel.Id, user.Id) _, resp = Client.AddChannelMember(publicChannel.Id, user.Id) CheckNoError(t, resp) cm, resp = Client.AddChannelMember(publicChannel.Id, "junk") CheckBadRequestStatus(t, resp) if cm != nil { t.Fatal("should return nothing") } _, resp = Client.AddChannelMember(publicChannel.Id, GenerateTestId()) CheckNotFoundStatus(t, resp) _, resp = Client.AddChannelMember("junk", user2.Id) CheckBadRequestStatus(t, resp) _, resp = Client.AddChannelMember(GenerateTestId(), user2.Id) CheckNotFoundStatus(t, resp) otherUser := th.CreateUser() otherChannel := th.CreatePublicChannel() Client.Logout() Client.Login(user2.Id, user2.Password) _, resp = Client.AddChannelMember(publicChannel.Id, otherUser.Id) CheckUnauthorizedStatus(t, resp) _, resp = Client.AddChannelMember(privateChannel.Id, otherUser.Id) CheckUnauthorizedStatus(t, resp) _, resp = Client.AddChannelMember(otherChannel.Id, otherUser.Id) CheckUnauthorizedStatus(t, resp) Client.Logout() Client.Login(user.Id, user.Password) // should fail adding user who is not a member of the team _, resp = Client.AddChannelMember(otherChannel.Id, otherUser.Id) CheckUnauthorizedStatus(t, resp) Client.DeleteChannel(otherChannel.Id) // should fail adding user to a deleted channel _, resp = Client.AddChannelMember(otherChannel.Id, user2.Id) CheckUnauthorizedStatus(t, resp) Client.Logout() _, resp = Client.AddChannelMember(publicChannel.Id, user2.Id) CheckUnauthorizedStatus(t, resp) _, resp = Client.AddChannelMember(privateChannel.Id, user2.Id) CheckUnauthorizedStatus(t, resp) _, resp = th.SystemAdminClient.AddChannelMember(publicChannel.Id, user2.Id) CheckNoError(t, resp) _, resp = th.SystemAdminClient.AddChannelMember(privateChannel.Id, user2.Id) CheckNoError(t, resp) // Test policy does not apply to TE. restrictPrivateChannel := *th.App.Config().TeamSettings.RestrictPrivateChannelManageMembers defer func() { th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.RestrictPrivateChannelManageMembers = restrictPrivateChannel }) }() th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.RestrictPrivateChannelManageMembers = model.PERMISSIONS_CHANNEL_ADMIN }) th.App.SetDefaultRolesBasedOnConfig() Client.Login(user2.Username, user2.Password) privateChannel = th.CreatePrivateChannel() _, resp = Client.AddChannelMember(privateChannel.Id, user.Id) CheckNoError(t, resp) Client.Logout() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckNoError(t, resp) Client.Logout() // Add a license isLicensed := utils.IsLicensed() license := utils.License() defer func() { utils.SetIsLicensed(isLicensed) utils.SetLicense(license) th.App.SetDefaultRolesBasedOnConfig() }() th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.RestrictPrivateChannelManageMembers = model.PERMISSIONS_ALL }) utils.SetIsLicensed(true) utils.SetLicense(&model.License{Features: &model.Features{}}) utils.License().Features.SetDefaults() th.App.SetDefaultRolesBasedOnConfig() // Check that a regular channel user can add other users. Client.Login(user2.Username, user2.Password) privateChannel = th.CreatePrivateChannel() _, resp = Client.AddChannelMember(privateChannel.Id, user.Id) CheckNoError(t, resp) Client.Logout() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckNoError(t, resp) Client.Logout() // Test with CHANNEL_ADMIN level permission. th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.RestrictPrivateChannelManageMembers = model.PERMISSIONS_CHANNEL_ADMIN }) utils.SetIsLicensed(true) utils.SetLicense(&model.License{Features: &model.Features{}}) utils.License().Features.SetDefaults() th.App.SetDefaultRolesBasedOnConfig() Client.Login(user2.Username, user2.Password) privateChannel = th.CreatePrivateChannel() _, resp = Client.AddChannelMember(privateChannel.Id, user.Id) CheckNoError(t, resp) Client.Logout() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckForbiddenStatus(t, resp) Client.Logout() th.MakeUserChannelAdmin(user, privateChannel) th.App.InvalidateAllCaches() utils.SetIsLicensed(true) utils.SetLicense(&model.License{Features: &model.Features{}}) utils.License().Features.SetDefaults() th.App.SetDefaultRolesBasedOnConfig() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckNoError(t, resp) Client.Logout() // Test with TEAM_ADMIN level permission. th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.RestrictPrivateChannelManageMembers = model.PERMISSIONS_TEAM_ADMIN }) utils.SetIsLicensed(true) utils.SetLicense(&model.License{Features: &model.Features{}}) utils.License().Features.SetDefaults() th.App.SetDefaultRolesBasedOnConfig() Client.Login(user2.Username, user2.Password) privateChannel = th.CreatePrivateChannel() _, resp = th.SystemAdminClient.AddChannelMember(privateChannel.Id, user.Id) CheckNoError(t, resp) Client.Logout() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckForbiddenStatus(t, resp) Client.Logout() th.UpdateUserToTeamAdmin(user, team) th.App.InvalidateAllCaches() utils.SetIsLicensed(true) utils.SetLicense(&model.License{Features: &model.Features{}}) utils.License().Features.SetDefaults() th.App.SetDefaultRolesBasedOnConfig() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckNoError(t, resp) Client.Logout() // Test with SYSTEM_ADMIN level permission. th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.RestrictPrivateChannelManageMembers = model.PERMISSIONS_SYSTEM_ADMIN }) utils.SetIsLicensed(true) utils.SetLicense(&model.License{Features: &model.Features{}}) utils.License().Features.SetDefaults() th.App.SetDefaultRolesBasedOnConfig() Client.Login(user2.Username, user2.Password) privateChannel = th.CreatePrivateChannel() _, resp = th.SystemAdminClient.AddChannelMember(privateChannel.Id, user.Id) CheckNoError(t, resp) Client.Logout() Client.Login(user.Username, user.Password) _, resp = Client.AddChannelMember(privateChannel.Id, user3.Id) CheckForbiddenStatus(t, resp) Client.Logout() _, resp = th.SystemAdminClient.AddChannelMember(privateChannel.Id, user3.Id) CheckNoError(t, resp) }
explode_data.jsonl/65667
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2835 }
[ 2830, 3393, 2212, 9629, 9366, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1005, 3803, 15944, 1005, 3803, 2320, 7210, 741, 16867, 270, 836, 682, 4454, 741, 71724, 1669, 270, 11716, 198, 19060, 1669, 270, 48868, 1474, 198, 19060, 17...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHashValues(t *testing.T) { s, err := Run() ok(t, err) defer s.Close() c, err := proto.Dial(s.Addr()) ok(t, err) defer c.Close() s.HSet("wim", "zus", "jet") s.HSet("wim", "teun", "vuur") s.HSet("wim", "gijs", "lam") s.HSet("wim", "kees", "bok") mustDo(t, c, "HVALS", "wim", proto.Strings( "bok", "jet", "lam", "vuur", ), ) mustDo(t, c, "HVALS", "nosuch", proto.Strings()) // Wrong key type s.Set("foo", "bar") mustDo(t, c, "HVALS", "foo", proto.Error(msgWrongType)) }
explode_data.jsonl/11374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 269 }
[ 2830, 3393, 6370, 6227, 1155, 353, 8840, 836, 8, 341, 1903, 11, 1848, 1669, 6452, 741, 59268, 1155, 11, 1848, 340, 16867, 274, 10421, 741, 1444, 11, 1848, 1669, 18433, 98462, 1141, 93626, 2398, 59268, 1155, 11, 1848, 340, 16867, 272, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUnmarshalJSON(t *testing.T) { s := `{"type":"File","start":0,"end":31,"loc":{"start":{"line":1,"column":0},"end":{"line":2,"column":0}},"program":{"type":"Program","start":0,"end":31,"loc":{"start":{"line":1,"column":0},"end":{"line":2,"column":0}},"sourceType":"script","body":[{"type":"ExpressionStatement","start":0,"end":30,"loc":{"start":{"line":1,"column":0},"end":{"line":1,"column":30}},"expression":{"type":"CallExpression","start":0,"end":30,"loc":{"start":{"line":1,"column":0},"end":{"line":1,"column":30}},"callee":{"type":"MemberExpression","start":0,"end":11,"loc":{"start":{"line":1,"column":0},"end":{"line":1,"column":11}},"object":{"type":"Identifier","start":0,"end":7,"loc":{"start":{"line":1,"column":0},"end":{"line":1,"column":7},"identifierName":"console"},"name":"console"},"property":{"type":"Identifier","start":8,"end":11,"loc":{"start":{"line":1,"column":8},"end":{"line":1,"column":11},"identifierName":"log"},"name":"log"},"computed":false},"arguments":[{"type":"StringLiteral","start":12,"end":29,"loc":{"start":{"line":1,"column":12},"end":{"line":1,"column":29}},"extra":{"rawValue":"Hello, Godzilla","raw":"'Hello, Godzilla'"},"value":"Hello, Godzilla"}]}}],"directives":[]},"comments":[],"tokens":[{"type":{"label":"name","beforeExpr":false,"startsExpr":true,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null},"value":"console","start":0,"end":7,"loc":{"start":{"line":1,"column":0},"end":{"line":1,"column":7}}},{"type":{"label":".","beforeExpr":false,"startsExpr":false,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null,"updateContext":null},"start":7,"end":8,"loc":{"start":{"line":1,"column":7},"end":{"line":1,"column":8}}},{"type":{"label":"name","beforeExpr":false,"startsExpr":true,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null},"value":"log","start":8,"end":11,"loc":{"start":{"line":1,"column":8},"end":{"line":1,"column":11}}},{"type":{"label":"(","beforeExpr":true,"startsExpr":true,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null},"start":11,"end":12,"loc":{"start":{"line":1,"column":11},"end":{"line":1,"column":12}}},{"type":{"label":"string","beforeExpr":false,"startsExpr":true,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null,"updateContext":null},"value":"Hello, Godzilla","start":12,"end":29,"loc":{"start":{"line":1,"column":12},"end":{"line":1,"column":29}}},{"type":{"label":")","beforeExpr":false,"startsExpr":false,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null},"start":29,"end":30,"loc":{"start":{"line":1,"column":29},"end":{"line":1,"column":30}}},{"type":{"label":"eof","beforeExpr":false,"startsExpr":false,"rightAssociative":false,"isLoop":false,"isAssign":false,"prefix":false,"postfix":false,"binop":null,"updateContext":null},"start":31,"end":31,"loc":{"start":{"line":2,"column":0},"end":{"line":2,"column":0}}}]}` got := &File{} if err := json.Unmarshal([]byte(s), got); err != nil { t.Fatalf("json unmarshal has error: %s", err) } want := &File{ Attr: &Attr{ Type: "File", Start: 0, End: 31, Loc: &SourceLocation{ Start: &Position{ Line: 1, Column: 0, }, End: &Position{ Line: 2, Column: 0, }, }, }, } if !reflect.DeepEqual(want.Attr, got.Attr) { t.Fatalf("file not equal: want=%s got=%s", want, got) } }
explode_data.jsonl/53785
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1247 }
[ 2830, 3393, 1806, 27121, 5370, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 1565, 4913, 1313, 3252, 1703, 2198, 2468, 788, 15, 1335, 408, 788, 18, 16, 1335, 1074, 22317, 2468, 22317, 1056, 788, 16, 1335, 6229, 788, 15, 51193, 408, 22317,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestOrString(t *testing.T) { assert := require.New(t) assert.Equal(``, OrString()) assert.Equal(``, OrString(``)) assert.Equal(`one`, OrString(`one`)) assert.Equal(`two`, OrString(``, `two`, ``, `three`)) }
explode_data.jsonl/45567
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 91 }
[ 2830, 3393, 2195, 703, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 1373, 7121, 1155, 692, 6948, 12808, 5809, 7808, 2521, 703, 2398, 6948, 12808, 5809, 7808, 2521, 703, 5809, 63, 4390, 6948, 12808, 5809, 603, 7808, 2521, 703, 5809, 603, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestAppJavaResources(t *testing.T) { bp := ` android_app { name: "foo", sdk_version: "current", java_resources: ["resources/a"], srcs: ["a.java"], } android_app { name: "bar", sdk_version: "current", java_resources: ["resources/a"], } ` ctx := testApp(t, bp) foo := ctx.ModuleForTests("foo", "android_common") fooResources := foo.Output("res/foo.jar") fooDexJar := foo.Output("dex-withres/foo.jar") fooDexJarAligned := foo.Output("dex-withres-aligned/foo.jar") fooApk := foo.Rule("combineApk") if g, w := fooDexJar.Inputs.Strings(), fooResources.Output.String(); !android.InList(w, g) { t.Errorf("expected resource jar %q in foo dex jar inputs %q", w, g) } if g, w := fooDexJarAligned.Input.String(), fooDexJar.Output.String(); g != w { t.Errorf("expected dex jar %q in foo aligned dex jar inputs %q", w, g) } if g, w := fooApk.Inputs.Strings(), fooDexJarAligned.Output.String(); !android.InList(w, g) { t.Errorf("expected aligned dex jar %q in foo apk inputs %q", w, g) } bar := ctx.ModuleForTests("bar", "android_common") barResources := bar.Output("res/bar.jar") barApk := bar.Rule("combineApk") if g, w := barApk.Inputs.Strings(), barResources.Output.String(); !android.InList(w, g) { t.Errorf("expected resources jar %q in bar apk inputs %q", w, g) } }
explode_data.jsonl/58485
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 542 }
[ 2830, 3393, 2164, 15041, 11277, 1155, 353, 8840, 836, 8, 341, 2233, 79, 1669, 22074, 298, 197, 5954, 8191, 341, 571, 11609, 25, 330, 7975, 756, 571, 1903, 7584, 9438, 25, 330, 3231, 756, 571, 56171, 35569, 25, 4383, 12745, 14186, 8097...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test_reportStatusShouldntReport(t *testing.T) { ctx := log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout)) ext := createTestVMExtension() c := cmd{nil, "Install", false, 99} ext.HandlerEnv.StatusFolder = statusTestDirectory ext.RequestedSequenceNumber = 45 err := reportStatus(ctx, ext, status.StatusSuccess, c, "msg") require.NoError(t, err, "reportStatus failed") _, err = os.Stat(path.Join(statusTestDirectory, "45.status")) require.True(t, os.IsNotExist(err), "File exists when we don't expect it to") }
explode_data.jsonl/18572
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 182 }
[ 2830, 3393, 14813, 2522, 14996, 406, 10361, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 1487, 7121, 12154, 7395, 12531, 7121, 2201, 12501, 7395, 9638, 83225, 1171, 95450, 1669, 1855, 2271, 11187, 12049, 741, 1444, 1669, 5439, 90, 8385, 11,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGet(t *testing.T) { mockHTTP := NewTestClient(func(req *http.Request) *http.Response { return &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBufferString("0")), Header: make(http.Header), } }) c := Client{hc: *mockHTTP, maxRetries: 1} url := "https://mock/status/200" resp1, err := c.Get(url) if err != nil { t.Error(err) } req, err := NewGetRequest(url) if err != nil { t.Error(err) } resp2, err := c.Do(req) if err != nil { t.Error(err) } if resp1.StatusCode() != resp2.StatusCode() { t.Errorf("Expected status code %v, got %v\n", resp1.statusCode, resp2.statusCode) } if resp1.Retries() != resp2.Retries() { t.Errorf("Expected retries %v, got %v\n", resp1.retries, resp2.retries) } if bytes.Compare(resp1.Payload(), resp2.Payload()) != 0 { t.Errorf("Expected status code %v, got %v\n", resp1.payload, resp2.payload) } _, err = c.Get("") if err != nil { t.Error("Expected error due to wrong URL, got no error") } }
explode_data.jsonl/11462
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 440 }
[ 2830, 3393, 1949, 1155, 353, 8840, 836, 8, 341, 77333, 9230, 1669, 1532, 2271, 2959, 18552, 6881, 353, 1254, 9659, 8, 353, 1254, 12574, 341, 197, 853, 609, 1254, 12574, 515, 298, 197, 15872, 25, 220, 17, 15, 15, 345, 298, 197, 5444,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWorkflowTemplateServer_GetClusterWorkflowTemplate(t *testing.T) { server, ctx := getClusterWorkflowTemplateServer() t.Run("Labelled", func(t *testing.T) { cwftRsp, err := server.GetClusterWorkflowTemplate(ctx, &clusterwftmplpkg.ClusterWorkflowTemplateGetRequest{ Name: "cluster-workflow-template-whalesay-template2", }) if assert.NoError(t, err) { assert.NotNil(t, cwftRsp) assert.Equal(t, "cluster-workflow-template-whalesay-template2", cwftRsp.Name) assert.Contains(t, cwftRsp.Labels, common.LabelKeyControllerInstanceID) } }) t.Run("Unlabelled", func(t *testing.T) { _, err := server.GetClusterWorkflowTemplate(ctx, &clusterwftmplpkg.ClusterWorkflowTemplateGetRequest{ Name: "cluster-workflow-template-whalesay-template", }) assert.Error(t, err) }) }
explode_data.jsonl/48137
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 316 }
[ 2830, 3393, 62768, 7275, 5475, 13614, 28678, 62768, 7275, 1155, 353, 8840, 836, 8, 341, 41057, 11, 5635, 1669, 633, 28678, 62768, 7275, 5475, 741, 3244, 16708, 445, 2476, 832, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 1444, 86, 72...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestWriterFields(t *testing.T) { samples := []struct { channel string user string token string }{ {"#channelname", "username", "token"}, } for _, s := range samples { w, err := NewWriter(s.channel, s.user, s.token) if err != nil { t.Errorf("NewWriter failed c=%s u=%s t=%s", s.channel, s.user, s.token) } if c := w.Channel(); c != s.channel { t.Errorf(".Channel(): expected %s got %s", s.channel, c) } if u := w.Username(); u != s.user { t.Errorf(".Username(): expected %s got %s", s.user, u) } if a := w.Token(); a != s.token { t.Errorf(".Token(): expected %s got %s", s.token, a) } } }
explode_data.jsonl/71433
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 289 }
[ 2830, 3393, 6492, 8941, 1155, 353, 8840, 836, 8, 341, 1903, 4023, 1669, 3056, 1235, 341, 197, 71550, 914, 198, 197, 19060, 262, 914, 198, 197, 43947, 256, 914, 198, 197, 59403, 197, 197, 4913, 2, 10119, 606, 497, 330, 5113, 497, 330...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestTopDownNestedReferences(t *testing.T) { tests := []struct { note string rules []string expected interface{} }{ // nested base document references {"ground ref", []string{`p = true { a[h[0][0]] = 2 }`}, "true"}, {"non-ground ref", []string{`p[x] { x = a[h[i][j]] }`}, "[2,3,4]"}, {"two deep", []string{`p[x] { x = a[a[a[i]]] }`}, "[3,4]"}, {"two deep", []string{`p[x] { x = a[h[i][a[j]]] }`}, "[3,4]"}, {"two deep repeated var", []string{`p[x] { x = a[h[i][a[i]]] }`}, "[3]"}, {"no suffix", []string{`p = true { 4 = a[three] }`}, "true"}, {"var ref", []string{`p[y] { x = [1, 2, 3]; y = a[x[_]] }`}, "[2,3,4]"}, {"undefined", []string{`p = true { a[three.deadbeef] = x }`}, ""}, // nested virtual document references {"vdoc ref: complete", []string{`p[x] { x = a[q[_]] }`, `q = [2, 3] { true }`}, "[3,4]"}, {"vdoc ref: complete: ground", []string{`p[x] { x = a[q[1]] }`, `q = [2, 3] { true }`}, "[4]"}, {"vdoc ref: complete: no suffix", []string{`p = true { 2 = a[q] }`, `q = 1 { true }`}, "true"}, {"vdoc ref: partial object", []string{ `p[x] { x = a[q[_]] }`, `q[k] = v { o = {"a": 2, "b": 3, "c": 100}; o[k] = v }`}, "[3,4]"}, {"vdoc ref: partial object: ground", []string{ `p[x] { x = a[q.b] }`, `q[k] = v { o = {"a": 2, "b": 3, "c": 100}; o[k] = v }`}, "[4]"}, // mixed cases {"vdoc ref: complete: nested bdoc ref", []string{ `p[x] { x = a[q[b[_]]] }`, `q = {"hello": 1, "goodbye": 3, "deadbeef": 1000} { true }`}, "[2,4]"}, {"vdoc ref: partial object: nested bdoc ref", []string{ `p[x] { x = a[q[b[_]]] }`, // bind to value `q[k] = v { o = {"hello": 1, "goodbye": 3, "deadbeef": 1000}; o[k] = v }`}, "[2,4]"}, {"vdoc ref: partial object: nested bdoc ref-2", []string{ `p[x] { x = a[q[d.e[_]]] }`, // bind to reference `q[k] = v { strings[k] = v }`}, "[3,4]"}, {"vdoc ref: multiple", []string{ `p[x] { x = q[a[_]].v[r[a[_]]] }`, `q = [{"v": {}}, {"v": [0, 0, 1, 2]}, {"v": [0, 0, 3, 4]}, {"v": [0, 0]}, {}] { true }`, `r = [1, 2, 3, 4] { true }`}, "[1,2,3,4]"}, } data := loadSmallTestData() for _, tc := range tests { runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected) } }
explode_data.jsonl/25201
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1083 }
[ 2830, 3393, 5366, 4454, 71986, 31712, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 9038, 1272, 257, 914, 198, 197, 7000, 2425, 262, 3056, 917, 198, 197, 42400, 3749, 16094, 197, 59403, 197, 197, 322, 24034, 2331, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestIndent(t *testing.T) { tpl := `{{indent 4 "a\nb\nc"}}` if err := runt(tpl, " a\n b\n c"); err != nil { t.Error(err) } }
explode_data.jsonl/63886
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 78 }
[ 2830, 3393, 42729, 1155, 353, 8840, 836, 8, 341, 3244, 500, 1669, 1565, 2979, 32840, 220, 19, 330, 64, 1699, 65, 59, 1016, 30975, 3989, 743, 1848, 1669, 1598, 83, 1155, 500, 11, 330, 262, 264, 1699, 262, 293, 1699, 262, 272, 5038, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestPullLatestDigest_Pulled(t *testing.T) { var pulledImage string listImages = func(_ *dockerClient.Client) []types.ImageSummary { return []types.ImageSummary{ { RepoDigests: []string{"TestRepo@TestLatestDigest"}, }, } } pullImage = func(_ *dockerClient.Client, imageRef string) { pulledImage = imageRef } cake := Cake{ LatestDigest: "TestLatestDigest", Repo: "TestRepo", } cake.PullLatestDigest() if pulledImage != "" { log(t, pulledImage, "") t.Fail() } }
explode_data.jsonl/49465
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 203 }
[ 2830, 3393, 36068, 31992, 45217, 1088, 91022, 1155, 353, 8840, 836, 8, 341, 2405, 13238, 1906, 914, 271, 14440, 14228, 284, 2915, 2490, 353, 28648, 2959, 11716, 8, 3056, 9242, 7528, 19237, 341, 197, 853, 3056, 9242, 7528, 19237, 515, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOneLineFileNoNewLine(t *testing.T) { _, err := newWithContents(t, `{"user":"scheduler", "readonly": true, "resource": "pods", "namespace":"ns1"}`) if err != nil { t.Errorf("unable to read policy file: %v", err) } }
explode_data.jsonl/46825
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 91 }
[ 2830, 3393, 3966, 2460, 1703, 2753, 3564, 2460, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1848, 1669, 501, 2354, 14803, 1155, 11, 1565, 4913, 872, 3252, 63122, 497, 220, 330, 22569, 788, 830, 11, 330, 9233, 788, 330, 79, 29697, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMetricsMetadata_MetadataType(t *testing.T) { testCases := map[string]struct { timestampColumnName string expectedMetadataType MetricsMetadataType }{ "Current stats metadata": {"", MetricsMetadataTypeCurrentStats}, "Interval stats metadata": {"timestampColumnName", MetricsMetadataTypeIntervalStats}, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { metricsMetadata := &MetricsMetadata{TimestampColumnName: testCase.timestampColumnName} assert.Equal(t, testCase.expectedMetadataType, metricsMetadata.MetadataType()) }) } }
explode_data.jsonl/64258
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 195 }
[ 2830, 3393, 27328, 14610, 62219, 929, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 3244, 4702, 26162, 220, 914, 198, 197, 42400, 14610, 929, 54190, 14610, 929, 198, 197, 59403, 197, 197, 1, 5405, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTxNotifierCancelSpend(t *testing.T) { t.Parallel() const startingHeight = 10 hintCache := newMockHintCache() n := chainntnfs.NewTxNotifier( startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, ) // We'll register two notification requests. Only the second one will be // canceled. op1 := wire.OutPoint{Index: 1} ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) if err != nil { t.Fatalf("unable to register spend ntfn: %v", err) } op2 := wire.OutPoint{Index: 2} ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) if err != nil { t.Fatalf("unable to register spend ntfn: %v", err) } // Construct the spending details of the outpoint and create a dummy // block containing it. spendTx := wire.NewMsgTx(2) spendTx.AddTxIn(&wire.TxIn{ PreviousOutPoint: op1, SignatureScript: testSigScript, }) spendTxHash := spendTx.TxHash() expectedSpendDetails := &chainntnfs.SpendDetail{ SpentOutPoint: &op1, SpenderTxHash: &spendTxHash, SpendingTx: spendTx, SpenderInputIndex: 0, SpendingHeight: startingHeight + 1, } block := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{spendTx}, }) // Before extending the notifier's tip with the dummy block above, we'll // cancel the second request. n.CancelSpend(ntfn2.HistoricalDispatch.SpendRequest, 2) err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) if err != nil { t.Fatalf("unable to connect block: %v", err) } if err := n.NotifyHeight(startingHeight + 1); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } // The first request should still be active, so we should receive a // spend notification with the correct spending details. select { case spendDetails := <-ntfn1.Event.Spend: assertSpendDetails(t, spendDetails, expectedSpendDetails) default: t.Fatalf("expected to receive spend notification") } // The second one, however, should not have. The event's Spend channel // must have also been closed to indicate the caller that the TxNotifier // can no longer fulfill their canceled request. select { case _, ok := <-ntfn2.Event.Spend: if ok { t.Fatal("expected Spend channel to be closed") } default: t.Fatal("expected Spend channel to be closed") } }
explode_data.jsonl/67720
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 817 }
[ 2830, 3393, 31584, 64729, 9269, 50, 3740, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 4777, 5916, 3640, 284, 220, 16, 15, 198, 9598, 396, 8233, 1669, 501, 11571, 26987, 8233, 741, 9038, 1669, 8781, 406, 77, 3848, 7121, 3158...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestNonContiguousWeightedUndirectedMultiplex(t *testing.T) { g := simple.NewWeightedUndirectedGraph(0, 0) for _, e := range []simple.WeightedEdge{ {F: simple.Node(0), T: simple.Node(1), W: 1}, {F: simple.Node(4), T: simple.Node(5), W: 1}, } { g.SetWeightedEdge(e) } func() { defer func() { r := recover() if r != nil { t.Error("unexpected panic with non-contiguous ID range") } }() ModularizeMultiplex(UndirectedLayers{g}, nil, nil, true, nil) }() }
explode_data.jsonl/80622
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 211 }
[ 2830, 3393, 8121, 818, 27029, 8295, 291, 19957, 74612, 57251, 2571, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 4285, 7121, 8295, 291, 19957, 74612, 11212, 7, 15, 11, 220, 15, 340, 2023, 8358, 384, 1669, 2088, 3056, 22944, 22404, 64507, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestResolvePath(t *testing.T) { is := is.New(t) s := NewShell(shellUrl) childHash, err := s.ResolvePath(fmt.Sprintf("/ipfs/%s/about", examplesHash)) is.Nil(err) is.Equal(childHash, "QmZTR5bcpQD7cFgTorqxZDYaew1Wqgfbd2ud9QqGPAkK2V") }
explode_data.jsonl/61086
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 56808, 1820, 1155, 353, 8840, 836, 8, 341, 19907, 1669, 374, 7121, 1155, 340, 1903, 1669, 1532, 25287, 93558, 2864, 692, 58391, 6370, 11, 1848, 1669, 274, 57875, 1820, 28197, 17305, 4283, 573, 3848, 12627, 82, 42516, 497, 10...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStripPassword(t *testing.T) { for name, test := range map[string]struct { DSN string Schema string ExpectedOut string }{ "mysql": { DSN: "mysql://mmuser:password@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", Schema: "mysql", ExpectedOut: "mysql://mmuser:@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", }, "mysql idempotent": { DSN: "mysql://mmuser:@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", Schema: "mysql", ExpectedOut: "mysql://mmuser:@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", }, "mysql: password with : and @": { DSN: "mysql://mmuser:p:assw@ord@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", Schema: "mysql", ExpectedOut: "mysql://mmuser:@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", }, "mysql: password with @ and :": { DSN: "mysql://mmuser:pa@sswo:rd@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", Schema: "mysql", ExpectedOut: "mysql://mmuser:@tcp(localhost:3306)/mattermost?charset=utf8mb4,utf8&readTimeout=30s", }, "postgres": { DSN: "postgres://mmuser:password@localhost:5432/mattermost?sslmode=disable&connect_timeout=10", Schema: "postgres", ExpectedOut: "postgres://mmuser:@localhost:5432/mattermost?sslmode=disable&connect_timeout=10", }, "pipe": { DSN: "mysql://user@unix(/path/to/socket)/dbname", Schema: "mysql", ExpectedOut: "mysql://user@unix(/path/to/socket)/dbname", }, "malformed without :": { DSN: "postgres://mmuserpassword@localhost:5432/mattermost?sslmode=disable&connect_timeout=10", Schema: "postgres", ExpectedOut: "postgres://mmuserpassword@localhost:5432/mattermost?sslmode=disable&connect_timeout=10", }, "malformed without @": { DSN: "postgres://mmuser:passwordlocalhost:5432/mattermost?sslmode=disable&connect_timeout=10", Schema: "postgres", ExpectedOut: "(omitted due to error parsing the DSN)", }, } { t.Run(name, func(t *testing.T) { out := stripPassword(test.DSN, test.Schema) assert.Equal(t, test.ExpectedOut, out) }) } }
explode_data.jsonl/80629
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1052 }
[ 2830, 3393, 5901, 4876, 1155, 353, 8840, 836, 8, 341, 2023, 829, 11, 1273, 1669, 2088, 2415, 14032, 60, 1235, 341, 197, 197, 5936, 45, 260, 914, 198, 197, 77838, 414, 914, 198, 197, 197, 18896, 2662, 914, 198, 197, 59403, 197, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUpdateRuntimeConfigurationFactoryConfig(t *testing.T) { assert := assert.New(t) config := oci.RuntimeConfig{} expectedFactoryConfig := oci.FactoryConfig{ Template: true, TemplatePath: defaultTemplatePath, VMCacheEndpoint: defaultVMCacheEndpoint, } tomlConf := tomlConfig{Factory: factory{Template: true}} err := updateRuntimeConfig("", tomlConf, &config, false) assert.NoError(err) assert.Equal(expectedFactoryConfig, config.FactoryConfig) }
explode_data.jsonl/11749
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 157 }
[ 2830, 3393, 4289, 15123, 7688, 4153, 2648, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 25873, 1669, 93975, 16706, 2648, 16094, 42400, 4153, 2648, 1669, 93975, 51008, 2648, 515, 197, 197, 7275, 25, 286, 830, 345, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTestTraceKVStoreIterator(t *testing.T) { var buf bytes.Buffer store := newTraceKVStore(&buf) iterator := store.Iterator(nil, nil) s, e := iterator.Domain() require.Equal(t, []byte(nil), s) require.Equal(t, []byte(nil), e) testCases := []struct { expectedKey []byte expectedValue []byte expectedKeyOut string expectedvalueOut string }{ { expectedKey: kvPairs[0].Key, expectedValue: kvPairs[0].Value, expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", }, { expectedKey: kvPairs[1].Key, expectedValue: kvPairs[1].Value, expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", }, { expectedKey: kvPairs[2].Key, expectedValue: kvPairs[2].Value, expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", }, } for _, tc := range testCases { buf.Reset() ka := iterator.Key() require.Equal(t, tc.expectedKeyOut, buf.String()) buf.Reset() va := iterator.Value() require.Equal(t, tc.expectedvalueOut, buf.String()) require.Equal(t, tc.expectedKey, ka) require.Equal(t, tc.expectedValue, va) iterator.Next() } require.False(t, iterator.Valid()) require.Panics(t, iterator.Next) require.NotPanics(t, iterator.Close) }
explode_data.jsonl/52007
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 776 }
[ 2830, 3393, 2271, 6550, 82707, 6093, 11951, 1155, 353, 8840, 836, 8, 341, 2405, 6607, 5820, 22622, 271, 57279, 1669, 501, 6550, 82707, 6093, 2099, 5909, 340, 197, 6854, 1669, 3553, 40846, 27907, 11, 2092, 692, 1903, 11, 384, 1669, 15091...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStore_GetBulk_Internal(t *testing.T) { t.Run("Failure while getting raw CouchDB documents", func(t *testing.T) { store := &store{db: &mockDB{errBulkGet: errors.New("mockDB BulkGet always fails")}} values, err := store.GetBulk("key") require.EqualError(t, err, "failure while getting documents: "+ "failure while sending request to CouchDB bulk docs endpoint: mockDB BulkGet always fails") require.Nil(t, values) }) }
explode_data.jsonl/72583
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 150 }
[ 2830, 3393, 6093, 13614, 88194, 37238, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 17507, 1393, 3709, 7112, 61128, 3506, 9293, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 57279, 1669, 609, 4314, 90, 1999, 25, 609, 16712, 3506, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWhen_String(t *testing.T) { for _, tt := range stringTests { t.Run(tt.desc, func(t *testing.T) { actual := tt.input.String() if tt.expected != actual { t.Errorf("want %q; got %q", tt.expected, actual) } }) } }
explode_data.jsonl/17477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 110 }
[ 2830, 3393, 4498, 31777, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 914, 18200, 341, 197, 3244, 16708, 47152, 30514, 11, 2915, 1155, 353, 8840, 836, 8, 341, 298, 88814, 1669, 17853, 10046, 6431, 741, 298, 743, 17853, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNamespaceAggregateTiles(t *testing.T) { ctrl := xtest.NewController(t) defer ctrl.Finish() ctx := context.NewBackground() defer ctx.Close() var ( start = xtime.Now().Truncate(targetBlockSize) shard0ID = uint32(10) shard1ID = uint32(20) createdWarmIndexForBlockStart xtime.UnixNano ) opts, err := NewAggregateTilesOptions( start, start.Add(targetBlockSize), time.Second, targetNsID, AggregateTilesRegular, false, false, nil, insOpts) require.NoError(t, err) sourceNs, sourceCloser := newTestNamespaceWithIDOpts(t, sourceNsID, namespace.NewOptions()) defer sourceCloser() sourceNs.bootstrapState = Bootstrapped sourceRetentionOpts := sourceNs.nopts.RetentionOptions().SetBlockSize(sourceBlockSize) sourceNs.nopts = sourceNs.nopts.SetRetentionOptions(sourceRetentionOpts) targetNs, targetCloser := newTestNamespaceWithIDOpts(t, targetNsID, namespace.NewOptions()) defer targetCloser() targetNs.bootstrapState = Bootstrapped targetNs.createEmptyWarmIndexIfNotExistsFn = func(blockStart xtime.UnixNano) error { createdWarmIndexForBlockStart = blockStart return nil } targetRetentionOpts := targetNs.nopts.RetentionOptions().SetBlockSize(targetBlockSize) targetNs.nopts = targetNs.nopts.SetColdWritesEnabled(true).SetRetentionOptions(targetRetentionOpts) // Pass in mock cold flusher and expect the cold flush ns process to finish. mockOnColdFlushNs := NewMockOnColdFlushNamespace(ctrl) mockOnColdFlushNs.EXPECT().Done().Return(nil) mockOnColdFlush := NewMockOnColdFlush(ctrl) cfOpts := NewColdFlushNsOpts(false) mockOnColdFlush.EXPECT().ColdFlushNamespace(gomock.Any(), cfOpts).Return(mockOnColdFlushNs, nil) targetNs.opts = targetNs.opts.SetOnColdFlush(mockOnColdFlush) targetShard0 := NewMockdatabaseShard(ctrl) targetShard1 := NewMockdatabaseShard(ctrl) targetNs.shards[0] = targetShard0 targetNs.shards[1] = targetShard1 targetShard0.EXPECT().IsBootstrapped().Return(true) targetShard1.EXPECT().IsBootstrapped().Return(true) targetShard0.EXPECT().ID().Return(shard0ID) targetShard1.EXPECT().ID().Return(shard1ID) targetShard0.EXPECT(). AggregateTiles(ctx, sourceNs, targetNs, shard0ID, mockOnColdFlushNs, opts). Return(int64(3), nil) targetShard1.EXPECT(). AggregateTiles(ctx, sourceNs, targetNs, shard1ID, mockOnColdFlushNs, opts). Return(int64(2), nil) processedTileCount, err := targetNs.AggregateTiles(ctx, sourceNs, opts) require.NoError(t, err) assert.Equal(t, int64(3+2), processedTileCount) assert.Equal(t, start, createdWarmIndexForBlockStart) }
explode_data.jsonl/35385
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 941 }
[ 2830, 3393, 22699, 64580, 58365, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 856, 1944, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 20985, 1669, 2266, 7121, 8706, 741, 16867, 5635, 10421, 2822, 2405, 2399, 197, 21375, 262, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSmInfo(t *testing.T) { mSm := new(logics.SmLogic) r, err := mSm.SmInfo(context.Background(), 200, 429) logs.Info(err) rbyte, _ := json.Marshal(r) logs.Info(string(rbyte)) }
explode_data.jsonl/35922
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 83 }
[ 2830, 3393, 10673, 1731, 1155, 353, 8840, 836, 8, 341, 2109, 10673, 1669, 501, 12531, 1211, 92445, 26751, 340, 7000, 11, 1848, 1669, 296, 10673, 92445, 1731, 5378, 19047, 1507, 220, 17, 15, 15, 11, 220, 19, 17, 24, 340, 6725, 82, 20...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestNewResolverVanilla(t *testing.T) { r := httptransport.NewResolver(httptransport.Config{}) ar, ok := r.(resolver.AddressResolver) if !ok { t.Fatal("not the resolver we expected") } ewr, ok := ar.Resolver.(resolver.ErrorWrapperResolver) if !ok { t.Fatal("not the resolver we expected") } _, ok = ewr.Resolver.(resolver.SystemResolver) if !ok { t.Fatal("not the resolver we expected") } }
explode_data.jsonl/78374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 3564, 18190, 45224, 6241, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1758, 26445, 7121, 18190, 19886, 26445, 10753, 37790, 69340, 11, 5394, 1669, 435, 12832, 48943, 26979, 18190, 340, 743, 753, 562, 341, 197, 3244, 26133, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestNextHourTime(t *testing.T) { nextHour := nextHourTime() hourElapse := nextHour.Sub(time.Now()).Hours() if !almostEqual(hourElapse, 1.0) { t.Errorf("wrong next one hour. want=%f, got=%f", 1.0, hourElapse) } }
explode_data.jsonl/1962
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 93 }
[ 2830, 3393, 5847, 30254, 1462, 1155, 353, 8840, 836, 8, 341, 28144, 30254, 1669, 1790, 30254, 1462, 741, 9598, 413, 6582, 7477, 1669, 1790, 30254, 12391, 9730, 13244, 6011, 23235, 741, 743, 753, 59201, 2993, 71035, 6582, 7477, 11, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMQTTIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } t.Skip("Skipping MQTT tests because the library crashes on shutdown") pool, err := dockertest.NewPool("") if err != nil { t.Skipf("Could not connect to docker: %s", err) } pool.MaxWait = time.Second * 30 resource, err := pool.Run("ncarlier/mqtt", "latest", nil) if err != nil { t.Fatalf("Could not start resource: %s", err) } urls := []string{fmt.Sprintf("tcp://localhost:%v", resource.GetPort("1883/tcp"))} if err = pool.Retry(func() error { client, err := getMQTTConn(urls) if err == nil { client.Disconnect(0) } return err }); err != nil { t.Fatalf("Could not connect to docker resource: %s", err) } defer func() { if err = pool.Purge(resource); err != nil { t.Logf("Failed to clean up docker resource: %v", err) } }() t.Run("TestMQTTConnect", func(te *testing.T) { testMQTTConnect(urls, te) }) t.Run("TestMQTTDisconnect", func(te *testing.T) { testMQTTDisconnect(urls, te) }) }
explode_data.jsonl/31235
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 418 }
[ 2830, 3393, 35169, 14903, 52464, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 85945, 17590, 1273, 304, 2805, 3856, 1138, 197, 532, 3244, 57776, 445, 85945, 61424, 7032, 1576, 279, 6733, 36137, 389, 23...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAutoUnsubscribePropagation(t *testing.T) { srvA, srvB, optsA, _ := runServers(t) defer srvA.Shutdown() defer srvB.Shutdown() clientA := createClientConn(t, optsA.Host, optsA.Port) defer clientA.Close() sendA, expectA := setupConn(t, clientA) expectMsgs := expectMsgsCommand(t, expectA) // We will create subscriptions that will auto-unsubscribe and make sure // we are not accumulating orphan subscriptions on the other side. for i := 1; i <= 100; i++ { sub := fmt.Sprintf("SUB foo %d\r\n", i) auto := fmt.Sprintf("UNSUB %d 1\r\n", i) sendA(sub) sendA(auto) // This will trip the auto-unsubscribe sendA("PUB foo 2\r\nok\r\n") expectMsgs(1) } sendA("PING\r\n") expectA(pongRe) time.Sleep(50 * time.Millisecond) // Make sure number of subscriptions on B is correct if subs := srvB.NumSubscriptions(); subs != 0 { t.Fatalf("Expected no subscriptions on remote server, got %d\n", subs) } }
explode_data.jsonl/5075
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 373 }
[ 2830, 3393, 13253, 1806, 9384, 35172, 1155, 353, 8840, 836, 8, 341, 1903, 10553, 32, 11, 43578, 33, 11, 12185, 32, 11, 716, 1669, 1598, 78139, 1155, 340, 16867, 43578, 32, 10849, 18452, 741, 16867, 43578, 33, 10849, 18452, 2822, 25291, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCheckEnablePerformanceInsights(t *testing.T) { tests := []struct { name string input rds.RDS expected bool }{ { name: "RDS Instance with performance insights disabled", input: rds.RDS{ Metadata: types.NewTestMetadata(), Instances: []rds.Instance{ { Metadata: types.NewTestMetadata(), PerformanceInsights: rds.PerformanceInsights{ Metadata: types.NewTestMetadata(), Enabled: types.Bool(false, types.NewTestMetadata()), KMSKeyID: types.String("some-kms-key", types.NewTestMetadata()), }, }, }, }, expected: true, }, { name: "RDS Instance with performance insights enabled and KMS key provided", input: rds.RDS{ Metadata: types.NewTestMetadata(), Instances: []rds.Instance{ { Metadata: types.NewTestMetadata(), PerformanceInsights: rds.PerformanceInsights{ Metadata: types.NewTestMetadata(), Enabled: types.Bool(true, types.NewTestMetadata()), KMSKeyID: types.String("some-kms-key", types.NewTestMetadata()), }, }, }, }, expected: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var testState state.State testState.AWS.RDS = test.input results := CheckEnablePerformanceInsights.Evaluate(&testState) var found bool for _, result := range results { if result.Status() == rules.StatusFailed && result.Rule().LongID() == CheckEnablePerformanceInsights.Rule().LongID() { found = true } } if test.expected { assert.True(t, found, "Rule should have been found") } else { assert.False(t, found, "Rule should not have been found") } }) } }
explode_data.jsonl/31836
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 715 }
[ 2830, 3393, 3973, 11084, 34791, 15474, 2796, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 22427, 262, 435, 5356, 2013, 5936, 198, 197, 42400, 1807, 198, 197, 59403, 197, 197, 515, 298, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestChequebookDeposit(t *testing.T) { address := common.HexToAddress("0xabcd") ownerAdress := common.HexToAddress("0xfff") balance := big.NewInt(30) depositAmount := big.NewInt(20) txHash := common.HexToHash("0xdddd") chequebookService, err := chequebook.New( transactionmock.New(), address, ownerAdress, nil, &chequeSignerMock{}, erc20mock.New( erc20mock.WithBalanceOfFunc(func(ctx context.Context, address common.Address) (*big.Int, error) { if address != ownerAdress { return nil, errors.New("getting balance of wrong address") } return balance, nil }), erc20mock.WithTransferFunc(func(ctx context.Context, to common.Address, value *big.Int) (common.Hash, error) { if to != address { return common.Hash{}, fmt.Errorf("sending to wrong address. wanted %x, got %x", address, to) } if depositAmount.Cmp(value) != 0 { return common.Hash{}, fmt.Errorf("sending wrong value. wanted %d, got %d", depositAmount, value) } return txHash, nil }), ), ) if err != nil { t.Fatal(err) } returnedTxHash, err := chequebookService.Deposit(context.Background(), depositAmount) if err != nil { t.Fatal(err) } if txHash != returnedTxHash { t.Fatalf("returned wrong transaction hash. wanted %v, got %v", txHash, returnedTxHash) } }
explode_data.jsonl/41440
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 520 }
[ 2830, 3393, 26843, 591, 2190, 78982, 1155, 353, 8840, 836, 8, 341, 63202, 1669, 4185, 91538, 1249, 4286, 445, 15, 52616, 4385, 1138, 197, 8118, 2589, 673, 1669, 4185, 91538, 1249, 4286, 445, 15, 87812, 1138, 2233, 4978, 1669, 2409, 7121...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEvalSymlinksIsNotExist(t *testing.T) { testenv.MustHaveSymlink(t) defer chtmpdir(t)() _, err := filepath.EvalSymlinks("notexist") if !os.IsNotExist(err) { t.Errorf("expected the file is not found, got %v\n", err) } err = os.Symlink("notexist", "link") if err != nil { t.Fatal(err) } defer os.Remove("link") _, err = filepath.EvalSymlinks("link") if !os.IsNotExist(err) { t.Errorf("expected the file is not found, got %v\n", err) } }
explode_data.jsonl/1666
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 203 }
[ 2830, 3393, 54469, 34667, 1014, 15504, 3872, 45535, 1155, 353, 8840, 836, 8, 341, 18185, 3160, 50463, 12116, 34667, 44243, 1155, 692, 16867, 272, 426, 1307, 3741, 1155, 8, 2822, 197, 6878, 1848, 1669, 26054, 5142, 831, 34667, 1014, 15504,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestJRPCChannel(t *testing.T) { // 启动RPCmocker mocker := testnode.New("--notset--", nil) defer func() { mocker.Close() }() mocker.Listen() jrpcClient := mocker.GetJSONC() testCases := []struct { fn func(*testing.T, *jsonclient.JSONClient) error }{ {fn: testBackupCmd}, {fn: testPrepareCmd}, {fn: testPerformCmd}, {fn: testCancelCmd}, {fn: testRetrieveQueryCmd}, } for index, testCase := range testCases { err := testCase.fn(t, jrpcClient) if err == nil { continue } assert.NotEqualf(t, err, types.ErrActionNotSupport, "test index %d", index) if strings.Contains(err.Error(), "rpc: can't find") { assert.FailNowf(t, err.Error(), "test index %d", index) } } }
explode_data.jsonl/18079
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 302 }
[ 2830, 3393, 41, 29528, 9629, 1155, 353, 8840, 836, 8, 341, 197, 322, 38433, 107, 27733, 29528, 76, 13659, 198, 2109, 13659, 1669, 1273, 3509, 7121, 21549, 1921, 746, 313, 497, 2092, 340, 16867, 2915, 368, 341, 197, 2109, 13659, 10421, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetObjWithVersion(t *testing.T) { obj := &api.Pod{JSONBase: api.JSONBase{ID: "foo", ResourceVersion: 1}} fakeClient := NewFakeEtcdClient(t) fakeClient.TestIndex = true fakeClient.Data["/some/key"] = EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.DefaultScheme.EncodeOrDie(obj), ModifiedIndex: 1, }, }, } helper := EtcdHelper{fakeClient, codec, versioner} err := helper.SetObj("/some/key", obj) if err != nil { t.Fatalf("Unexpected error %#v", err) } data, err := codec.Encode(obj) if err != nil { t.Fatalf("Unexpected error %#v", err) } expect := string(data) got := fakeClient.Data["/some/key"].R.Node.Value if expect != got { t.Errorf("Wanted %v, got %v", expect, got) } }
explode_data.jsonl/28144
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 319 }
[ 2830, 3393, 1649, 5261, 2354, 5637, 1155, 353, 8840, 836, 8, 341, 22671, 1669, 609, 2068, 88823, 90, 5370, 3978, 25, 6330, 18009, 3978, 90, 915, 25, 330, 7975, 497, 11765, 5637, 25, 220, 16, 11248, 1166, 726, 2959, 1669, 1532, 52317, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRGB_Sprintln(t *testing.T) { RGBs := []RGB{{0, 0, 0}, {127, 127, 127}, {255, 255, 255}} for _, rgb := range RGBs { t.Run("", func(t *testing.T) { internal.TestSprintlnContains(t, func(a interface{}) string { return rgb.Sprintln(a) }) }) } }
explode_data.jsonl/62952
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 119 }
[ 2830, 3393, 18184, 1098, 33655, 1155, 353, 8840, 836, 8, 341, 11143, 5381, 82, 1669, 3056, 18184, 2979, 15, 11, 220, 15, 11, 220, 15, 2137, 314, 16, 17, 22, 11, 220, 16, 17, 22, 11, 220, 16, 17, 22, 2137, 314, 17, 20, 20, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStackParenting(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: "stack_parenting", Dependencies: []string{"@pulumi/pulumi"}, Quick: true, ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { // Ensure the checkpoint contains resources parented correctly. This should look like this: // // A F // / \ \ // B C G // / \ // D E // // with the caveat, of course, that A and F will share a common parent, the implicit stack. assert.NotNil(t, stackInfo.Deployment) if assert.Equal(t, 9, len(stackInfo.Deployment.Resources)) { stackRes := stackInfo.Deployment.Resources[0] assert.NotNil(t, stackRes) assert.Equal(t, resource.RootStackType, stackRes.Type) assert.Equal(t, "", string(stackRes.Parent)) urns := make(map[string]resource.URN) for _, res := range stackInfo.Deployment.Resources[1:] { assert.NotNil(t, res) urns[string(res.URN.Name())] = res.URN switch res.URN.Name() { case "a", "f": assert.NotEqual(t, "", res.Parent) assert.Equal(t, stackRes.URN, res.Parent) case "b", "c": assert.Equal(t, urns["a"], res.Parent) case "d", "e": assert.Equal(t, urns["c"], res.Parent) case "g": assert.Equal(t, urns["f"], res.Parent) case "default": // Default providers are not parented. assert.Equal(t, "", string(res.Parent)) default: t.Fatalf("unexpected name %s", res.URN.Name()) } } } }, }) }
explode_data.jsonl/76353
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 726 }
[ 2830, 3393, 4336, 8387, 287, 1155, 353, 8840, 836, 8, 341, 2084, 17376, 80254, 2271, 1155, 11, 609, 60168, 80254, 2271, 3798, 515, 197, 197, 6184, 25, 688, 330, 7693, 15960, 287, 756, 197, 197, 48303, 25, 3056, 917, 4913, 31, 79, 65...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestUsesRootRouterWhenPreviousRoutersFails(t *testing.T) { borrows := [][]string{} conn := &testutil.ConnFake{Table: &db.RoutingTable{ TimeToLive: 1, Routers: []string{"otherRouter"}, Readers: []string{"router1"}, }} var err error pool := &poolFake{ borrow: func(names []string, cancel context.CancelFunc, _ log.BoltLogger) (db.Connection, error) { borrows = append(borrows, names) return conn, err }, } nzero := time.Now() n := nzero router := New("rootRouter", func() []string { return []string{} }, nil, pool, logger, "routerid") router.now = func() time.Time { return n } dbName := "dbname" // First access should trigger initial table read from root router router.Readers(context.Background(), nil, dbName, nil) if borrows[0][0] != "rootRouter" { t.Errorf("Should have connected to root upon first router request") } // Next access should go to otherRouter n = n.Add(2 * time.Second) router.Readers(context.Background(), nil, dbName, nil) if borrows[1][0] != "otherRouter" { t.Errorf("Should have queried other router") } // Let the next access first fail when requesting otherRouter and then succeed requesting // rootRouter requestedOther := false requestedRoot := false pool.borrow = func(names []string, cancel context.CancelFunc, _ log.BoltLogger) (db.Connection, error) { if !requestedOther { if names[0] != "otherRouter" { t.Errorf("Expected request for otherRouter") return nil, errors.New("Wrong") } requestedOther = true return nil, errors.New("some err") } if names[0] != "rootRouter" { t.Errorf("Expected request for rootRouter") return nil, errors.New("oh") } requestedRoot = true return &testutil.ConnFake{Table: &db.RoutingTable{TimeToLive: 1, Readers: []string{"aReader"}}}, nil } n = n.Add(2 * time.Second) readers, err := router.Readers(context.Background(), nil, dbName, nil) if err != nil { t.Error(err) } if readers[0] != "aReader" { t.Errorf("Didn't get the expected reader") } if !requestedOther || !requestedRoot { t.Errorf("Should have requested both other and root routers") } }
explode_data.jsonl/40050
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 788 }
[ 2830, 3393, 68965, 8439, 9523, 4498, 21291, 49, 66095, 37, 6209, 1155, 353, 8840, 836, 8, 341, 2233, 269, 1811, 1669, 52931, 917, 31483, 32917, 1669, 609, 1944, 1314, 50422, 52317, 90, 2556, 25, 609, 1999, 2013, 10909, 2556, 515, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProxy(t *testing.T) { downstreamServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/error" { w.Header().Set("Content-Type", "application/json") w.WriteHeader(500) w.Write([]byte(`{"error": "Something went wrong"}`)) } if r.URL.Path == "/success" { w.Write([]byte(`{"success": true}`)) } if r.URL.Path == "/headers" { w.Header().Set("X-Added-Downstream", "header added by downstream server") w.Header().Set("X-Received-Downstream", r.Header.Get("X-Sent-Downstream")) w.Write([]byte(`{"success": true}`)) } if r.URL.Path == "/query" { response := fmt.Sprintf(`{"received": "%s"}`, r.URL.Query().Get("sent")) w.WriteHeader(200) w.Write([]byte(response)) } if r.URL.Path == "/body" { bodyParams := make(map[string]string) err := json.NewDecoder(r.Body).Decode(&bodyParams) if err != nil { w.WriteHeader(500) fmt.Println("JSON decoding error", err) return } response := fmt.Sprintf(`{"received": "%s"}`, bodyParams["sent"]) w.WriteHeader(200) w.Write([]byte(response)) } })) defer downstreamServer.Close() errorRoute := routes.Route{ IncomingRequestPath: "/test/error", ForwardedRequestURL: downstreamServer.URL, ForwardedRequestPath: "/error", } successRoute := routes.Route{ IncomingRequestPath: "/test/success", ForwardedRequestURL: downstreamServer.URL, ForwardedRequestPath: "/success", } headersRoute := routes.Route{ IncomingRequestPath: "/test/headers", ForwardedRequestURL: downstreamServer.URL, ForwardedRequestPath: "/headers", } bodyRoute := routes.Route{ IncomingRequestPath: "/test/body", ForwardedRequestURL: downstreamServer.URL, ForwardedRequestPath: "/body", } queryRoute := routes.Route{ IncomingRequestPath: "/test/query", ForwardedRequestURL: downstreamServer.URL, ForwardedRequestPath: "/query", } routesConfig := &routes.RoutesConfig{ Routes: []routes.Route{ successRoute, errorRoute, headersRoute, bodyRoute, queryRoute, }, } logger := &logger.NullLogger{} proxy := NewProxy(logger, routesConfig, 8080) go func() { proxy.Start() }() defer proxy.Stop() assert := assert.New(t) t.Run("when incoming request does not match any Routes", func(t *testing.T) { resp, err := http.Get("http://localhost:8080/not_found") defer resp.Body.Close() assert.Nil(err) assert.Equal(404, resp.StatusCode) }) t.Run("when incoming request does not quite match any Routes", func(t *testing.T) { resp, err := http.Get("http://localhost:8080/test/error/specific_error") defer resp.Body.Close() assert.Nil(err) assert.Equal(404, resp.StatusCode) }) t.Run("when the downstream endpoint returns an error", func(t *testing.T) { resp, err := http.Get("http://localhost:8080/test/error") defer resp.Body.Close() assert.Nil(err) body, err := ioutil.ReadAll(resp.Body) assert.Nil(err) assert.Equal(500, resp.StatusCode) assert.Equal(string(body), `{"error": "Something went wrong"}`) }) t.Run("when the downstream endpoint returns a successful response", func(t *testing.T) { resp, err := http.Get("http://localhost:8080/test/success") assert.Nil(err) defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) assert.Nil(err) assert.Equal(200, resp.StatusCode) assert.Equal(string(body), `{"success": true}`) }) t.Run("when headers are provided", func(t *testing.T) { req, err := http.NewRequest("GET", "http://localhost:8080/test/headers", nil) assert.Nil(err) req.Header.Add("X-Sent-Downstream", "test sent header") resp, err := http.DefaultClient.Do(req) assert.Nil(err) assert.Equal(200, resp.StatusCode) assert.Equal("test sent header", resp.Header.Get("X-Received-Downstream")) }) t.Run("when headers are added by the downstream endpoint", func(t *testing.T) { resp, err := http.Get("http://localhost:8080/test/headers") assert.Nil(err) defer resp.Body.Close() assert.Equal(200, resp.StatusCode) assert.Equal("header added by downstream server", resp.Header.Get("X-Added-Downstream")) }) t.Run("when a request query string is provided", func(t *testing.T) { resp, err := http.Get("http://localhost:8080/test/query?sent=test") assert.Nil(err) defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) assert.Nil(err) assert.Equal(200, resp.StatusCode) assert.Equal(string(body), `{"received": "test"}`) }) t.Run("when a request body is provided", func(t *testing.T) { requestBody := `{"sent": "test"}` resp, err := http.Post("http://localhost:8080/test/body", "application/json", strings.NewReader(requestBody)) assert.Nil(err) defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) assert.Nil(err) assert.Equal(200, resp.StatusCode) assert.Equal(string(body), `{"received": "test"}`) }) }
explode_data.jsonl/54823
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1881 }
[ 2830, 3393, 16219, 1155, 353, 8840, 836, 8, 341, 2698, 779, 4027, 5475, 1669, 54320, 70334, 7121, 5475, 19886, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 743, 435, 20893, 17474, 621, 3521, 841, 1, 341, 298...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestInboundConnContext(t *testing.T) { opts := testutils.NewOpts().NoRelay().SetConnContext(func(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, "foo", "bar") }) testutils.WithTestServer(t, opts, func(t testing.TB, ts *testutils.TestServer) { alice := ts.Server() testutils.RegisterFunc(alice, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { // Verify that the context passed into the handler inherits from the base context // set by ConnContext assert.Equal(t, "bar", ctx.Value("foo"), "Value unexpectedly different from base context") return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil }) copts := testutils.NewOpts() bob := ts.NewClient(copts) testutils.AssertEcho(t, bob, ts.HostPort(), ts.ServiceName()) }) }
explode_data.jsonl/78212
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 292 }
[ 2830, 3393, 641, 10891, 9701, 1972, 1155, 353, 8840, 836, 8, 341, 64734, 1669, 1273, 6031, 7121, 43451, 1005, 2753, 6740, 352, 1005, 1649, 9701, 1972, 18552, 7502, 2266, 9328, 11, 4534, 4179, 50422, 8, 2266, 9328, 341, 197, 853, 2266, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStripConnectionString(t *testing.T) { tests := []struct { connectionString, expected string }{ { "Endpoint=sb://something", "(redacted)", }, { "Endpoint=sb://dummynamespace.servicebus.windows.net/;SharedAccessKeyName=DummyAccessKeyName;SharedAccessKey=5dOntTRytoC24opYThisAsit3is2B+OGY1US/fuL3ly=", "Endpoint=sb://dummynamespace.servicebus.windows.net/", }, { "Endpoint=sb://dummynamespace.servicebus.windows.net/;SharedAccessKey=5dOntTRytoC24opYThisAsit3is2B+OGY1US/fuL3ly=", "Endpoint=sb://dummynamespace.servicebus.windows.net/", }, } for _, tt := range tests { res := stripConnectionString(tt.connectionString) assert.Equal(t, res, tt.expected) } }
explode_data.jsonl/53937
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 298 }
[ 2830, 3393, 5901, 40431, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 54590, 703, 11, 3601, 914, 198, 197, 59403, 197, 197, 515, 298, 197, 1, 27380, 14149, 65, 1110, 33331, 756, 298, 197, 29209, 1151, 22167, 15752,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGenerateUUIDValidationCode_String(t *testing.T) { v := NewUUIDValidator() e := UUIDTestStruct{} et := reflect.TypeOf(e) field, _ := et.FieldByName("UUIDString") code, err := v.Generate(et, field, []string{}) require.NoError(t, err) code = strings.Replace(strings.TrimSpace(code), "\t", "", -1) require.Equal(t, "if err := gokay.IsUUID(&s.UUIDString); err != nil {\nerrorsUUIDString = append(errorsUUIDString, err)\n}", code) }
explode_data.jsonl/48496
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 173 }
[ 2830, 3393, 31115, 24754, 13799, 2078, 31777, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 1532, 24754, 14256, 741, 7727, 1669, 23698, 2271, 9422, 16094, 197, 295, 1669, 8708, 73921, 2026, 340, 39250, 11, 716, 1669, 1842, 17087, 16898, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHydraConsent(t *testing.T) { s, _, _, h, _, err := setupHydraTest(true) if err != nil { t.Fatalf("setupHydraTest() failed: %v", err) } clientID := "cid" h.GetConsentRequestResp = &hydraapi.ConsentRequest{ Client: &hydraapi.Client{ClientID: clientID}, Context: map[string]interface{}{hydra.StateIDKey: consentStateID}, } h.AcceptConsentResp = &hydraapi.RequestHandlerResponse{RedirectTo: hydraPublicURL} // Send Request. query := fmt.Sprintf("?consent_challenge=%s", consentChallenge) u := damURL + hydraConsentPath + query w := httptest.NewRecorder() r := httptest.NewRequest(http.MethodGet, u, nil) s.Handler.ServeHTTP(w, r) resp := w.Result() if resp.StatusCode != http.StatusSeeOther { t.Errorf("resp.StatusCode wants %d got %d", http.StatusSeeOther, resp.StatusCode) } l := resp.Header.Get("Location") if l != hydraPublicURL { t.Errorf("Location wants %s got %s", hydraPublicURL, l) } if diff := cmp.Diff(h.AcceptConsentReq.GrantedAudience, []string{clientID}); len(diff) != 0 { t.Errorf("GrantedAudience (-want +got): %s", diff) } if h.AcceptConsentReq.Session.AccessToken["cart"] != consentStateID { t.Errorf("AccessToken.cart = %v wants %v", h.AcceptConsentReq.Session.AccessToken["cart"], consentStateID) } atid, ok := h.AcceptConsentReq.Session.AccessToken["tid"].(string) if !ok { t.Fatalf("tid in access token in wrong type") } itid, ok := h.AcceptConsentReq.Session.IDToken["tid"].(string) if !ok { t.Fatalf("tid in id token in wrong type") } if itid != atid { t.Errorf("tid in id token and access token should be the same, %s, %s", itid, atid) } }
explode_data.jsonl/18502
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 631 }
[ 2830, 3393, 30816, 22248, 15220, 306, 1155, 353, 8840, 836, 8, 341, 1903, 11, 8358, 8358, 305, 11, 8358, 1848, 1669, 6505, 30816, 22248, 2271, 3715, 340, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 15188, 30816, 22248, 2271, 368, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestClientWriteShutdown(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping test; see https://golang.org/issue/7237") } defer afterTest(t) ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) defer ts.Close() conn, err := net.Dial("tcp", ts.Listener.Addr().String()) if err != nil { t.Fatalf("Dial: %v", err) } err = conn.(*net.TCPConn).CloseWrite() if err != nil { t.Fatalf("Dial: %v", err) } donec := make(chan bool) go func() { defer close(donec) bs, err := ioutil.ReadAll(conn) if err != nil { t.Fatalf("ReadAll: %v", err) } got := string(bs) if got != "" { t.Errorf("read %q from server; want nothing", got) } }() select { case <-donec: case <-time.After(10 * time.Second): t.Fatalf("timeout") } }
explode_data.jsonl/22446
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 343 }
[ 2830, 3393, 2959, 7985, 62004, 1155, 353, 8840, 836, 8, 341, 743, 15592, 97574, 3126, 621, 330, 10393, 24, 1, 341, 197, 3244, 57776, 445, 4886, 5654, 1273, 26, 1490, 3703, 1110, 70, 37287, 2659, 14, 11159, 14, 22, 17, 18, 22, 1138, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExtractTarFileToBuf(t *testing.T) { entries := []*testTarEntry{ { header: &tar.Header{ Name: "folder/", Typeflag: tar.TypeDir, Mode: int64(0747), }, }, { contents: "foo", header: &tar.Header{ Name: "folder/foo.txt", Size: 3, }, }, { contents: "bar", header: &tar.Header{ Name: "folder/bar.txt", Size: 3, }, }, { header: &tar.Header{ Name: "folder/symlink.txt", Typeflag: tar.TypeSymlink, Linkname: "folder/foo.txt", }, }, } testTarPath, err := newTestTar(entries) if err != nil { t.Fatalf("unexpected error: %v", err) } defer os.Remove(testTarPath) containerTar1, err := os.Open(testTarPath) if err != nil { t.Fatalf("unexpected error: %v", err) } defer containerTar1.Close() tr := tar.NewReader(containerTar1) buf, err := extractFileFromTar(tr, "folder/foo.txt") if err != nil { t.Errorf("unexpected error: %v", err) } if string(buf) != "foo" { t.Errorf("unexpected contents, wanted: %s, got: %s", "foo", buf) } containerTar2, err := os.Open(testTarPath) if err != nil { t.Errorf("unexpected error: %v", err) } defer containerTar2.Close() tr = tar.NewReader(containerTar2) buf, err = extractFileFromTar(tr, "folder/symlink.txt") if err == nil { t.Errorf("expected error") } }
explode_data.jsonl/51017
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 608 }
[ 2830, 3393, 28959, 62733, 1703, 1249, 15064, 1155, 353, 8840, 836, 8, 341, 197, 12940, 1669, 29838, 1944, 62733, 5874, 515, 197, 197, 515, 298, 20883, 25, 609, 26737, 15753, 515, 571, 21297, 25, 257, 330, 17668, 35075, 571, 27725, 9903,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestCachedChartsRefreshChartAddsNewVersion(t *testing.T) { // Stubs Download and processing DownloadAndExtractChartTarballOrig := charthelper.DownloadAndExtractChartTarball defer func() { charthelper.DownloadAndExtractChartTarball = DownloadAndExtractChartTarballOrig }() charthelper.DownloadAndExtractChartTarball = func(chart *swaggermodels.ChartPackage, repoURL string) error { return nil } DownloadAndProcessChartIconOrig := charthelper.DownloadAndProcessChartIcon defer func() { charthelper.DownloadAndProcessChartIcon = DownloadAndProcessChartIconOrig }() charthelper.DownloadAndProcessChartIcon = func(chart *swaggermodels.ChartPackage) error { return nil } chart, err := chartsImplementation.ChartVersionFromRepo("stable", "datadog", "0.1.0") assert.NoErr(t, err) err = chartsImplementation.DeleteChart("stable", *chart.Name, *chart.Version) assert.NoErr(t, err) _, err = chartsImplementation.ChartVersionFromRepo("stable", *chart.Name, *chart.Version) assert.Err(t, err, errors.New("didn't find version "+*chart.Version+" of chart "+*chart.Name+"\n")) err = chartsImplementation.RefreshChart("stable", *chart.Name) assert.NoErr(t, err) }
explode_data.jsonl/37974
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 360 }
[ 2830, 3393, 70293, 64878, 14567, 14488, 72111, 3564, 5637, 1155, 353, 8840, 836, 8, 341, 197, 322, 794, 15738, 8577, 323, 8692, 198, 197, 11377, 3036, 28959, 14488, 62733, 3959, 62726, 1669, 1161, 339, 2947, 61204, 3036, 28959, 14488, 627...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCalculateChangesOmittedFields(t *testing.T) { tests := map[string]struct { platformFixture string templateFixture string expectedAction string expectedDiffGoldenFile string }{ "Rolebinding with legacy fields": { platformFixture: "rolebinding-platform", templateFixture: "rolebinding-template", expectedAction: "Update", expectedDiffGoldenFile: "rolebinding-changed", }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { platformItem := getPlatformItem(t, "item-omitted-fields/"+tc.platformFixture+".yml") templateItem := getTemplateItem(t, "item-omitted-fields/"+tc.templateFixture+".yml") changes, err := calculateChanges(templateItem, platformItem, []string{}, true) if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Expected 1 change, got: %d", len(changes)) } actualChange := changes[0] if actualChange.Action != tc.expectedAction { t.Fatalf("Expected change action to be: %s, got: %s", tc.expectedAction, actualChange.Action) } if len(tc.expectedDiffGoldenFile) > 0 { want := strings.TrimSpace(getGoldenDiff(t, "item-omitted-fields", tc.expectedDiffGoldenFile+".txt")) got := strings.TrimSpace(actualChange.Diff(true)) if diff := cmp.Diff(want, got); diff != "" { t.Errorf("Change diff mismatch (-want +got):\n%s", diff) } } }) } }
explode_data.jsonl/33772
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 563 }
[ 2830, 3393, 47866, 11317, 46, 5483, 8941, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 2415, 14032, 60, 1235, 341, 197, 197, 15734, 18930, 286, 914, 198, 197, 22832, 18930, 286, 914, 198, 197, 42400, 2512, 260, 914, 198, 197, 42400, 21...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestExponentialGrowth__OnSuccess(t *testing.T) { g := &tester.ExponentialGrowth{Precision: 10} // When not bound it should double the test value assert.Equal(t, 40, g.OnSuccess(20)) assert.Equal(t, 80, g.OnSuccess(40)) assert.Equal(t, 160, g.OnSuccess(80)) // When bound it should return a middle value assert.Equal(t, 120, g.OnFail(160)) assert.Equal(t, 140, g.OnSuccess(120)) assert.Equal(t, 150, g.OnSuccess(140)) // Finally return 0 when precision is met assert.Equal(t, 0, g.OnSuccess(150)) }
explode_data.jsonl/66518
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 196 }
[ 2830, 3393, 840, 59825, 38, 19089, 563, 1925, 7188, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 609, 73358, 5121, 59825, 38, 19089, 90, 55501, 25, 220, 16, 15, 630, 197, 322, 3197, 537, 6822, 432, 1265, 1990, 279, 1273, 897, 198, 6948...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReleaseAllWorkflowLocks(t *testing.T) { cancel, controller := newController() defer cancel() t.Run("nilObject", func(t *testing.T) { controller.releaseAllWorkflowLocks(nil) }) t.Run("unStructuredObject", func(t *testing.T) { un := &unstructured.Unstructured{} controller.releaseAllWorkflowLocks(un) }) t.Run("otherObject", func(t *testing.T) { un := &wfv1.Workflow{} controller.releaseAllWorkflowLocks(un) }) }
explode_data.jsonl/2870
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 171 }
[ 2830, 3393, 16077, 2403, 62768, 11989, 82, 1155, 353, 8840, 836, 8, 341, 84441, 11, 6461, 1669, 501, 2051, 741, 16867, 9121, 741, 3244, 16708, 445, 8385, 1190, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 61615, 25561, 2403, 62768, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewHelmCLI(t *testing.T) { cli := helm.NewHelmCLI(binary, helm.V2, cwd, true, "arg1 arg2 arg3") assert.Equal(t, binary, cli.Binary) assert.Equal(t, cwd, cli.CWD) assert.Equal(t, helm.V2, cli.BinVersion) assert.Equal(t, true, cli.Debug) assert.NotNil(t, cli.Runner) assert.Equal(t, []string{"arg1", "arg2", "arg3"}, cli.Runner.CurrentArgs()) assert.Equal(t, binary, cli.Runner.CurrentName()) assert.Equal(t, cwd, cli.Runner.CurrentDir()) }
explode_data.jsonl/4639
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 206 }
[ 2830, 3393, 3564, 39, 23162, 63959, 1155, 353, 8840, 836, 8, 341, 86448, 1669, 33765, 7121, 39, 23162, 63959, 63926, 11, 33765, 5058, 17, 11, 46938, 11, 830, 11, 330, 858, 16, 1392, 17, 1392, 18, 1138, 6948, 12808, 1155, 11, 7868, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHTTPProxyRouteWithAServiceWeight(t *testing.T) { rh, c, done := setup(t) defer done() rh.OnAdd(fixture.NewService("kuard"). WithPorts(v1.ServicePort{Port: 80, TargetPort: intstr.FromInt(8080)})) proxy1 := &projcontour.HTTPProxy{ ObjectMeta: metav1.ObjectMeta{ Name: "simple", Namespace: "default", }, Spec: projcontour.HTTPProxySpec{ VirtualHost: &projcontour.VirtualHost{Fqdn: "test2.test.com"}, Routes: []projcontour.Route{{ Conditions: conditions(prefixCondition("/a")), Services: []projcontour.Service{{ Name: "kuard", Port: 80, Weight: 90, // ignored }}, }}, }, } rh.OnAdd(proxy1) assertRDS(t, c, "1", virtualhosts( envoy.VirtualHost("test2.test.com", &envoy_api_v2_route.Route{ Match: routePrefix("/a"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, ), ), nil) proxy2 := &projcontour.HTTPProxy{ ObjectMeta: metav1.ObjectMeta{ Name: "simple", Namespace: "default", }, Spec: projcontour.HTTPProxySpec{ VirtualHost: &projcontour.VirtualHost{Fqdn: "test2.test.com"}, Routes: []projcontour.Route{{ Conditions: conditions(prefixCondition("/a")), Services: []projcontour.Service{{ Name: "kuard", Port: 80, Weight: 90, }, { Name: "kuard", Port: 80, Weight: 60, }}, }}, }, } rh.OnUpdate(proxy1, proxy2) assertRDS(t, c, "2", virtualhosts( envoy.VirtualHost("test2.test.com", &envoy_api_v2_route.Route{ Match: routePrefix("/a"), Action: routeweightedcluster( weightedcluster{"default/kuard/80/da39a3ee5e", 60}, weightedcluster{"default/kuard/80/da39a3ee5e", 90}), }, ), ), nil) }
explode_data.jsonl/24119
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 808 }
[ 2830, 3393, 9230, 16219, 4899, 2354, 32, 1860, 8295, 1155, 353, 8840, 836, 8, 341, 7000, 71, 11, 272, 11, 2814, 1669, 6505, 1155, 340, 16867, 2814, 2822, 7000, 71, 8071, 2212, 94886, 7121, 1860, 445, 74, 11034, 38609, 197, 197, 2354, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadGraphToElastic_allRefsFailToInsert(t *testing.T) { const ( pkgpath = "github.com/korfuri/goref/testprograms/simple" ) client := &mocks.Client{} client.On("GetPackage", mock.Anything, mock.Anything).Return(nil, errors.New("not found")) client.On("CreatePackage", mock.Anything, mock.Anything).Return(nil) client.On("CreateFile", mock.Anything, mock.Anything).Return(&elastic.IndexResponse{}, nil) client.On("CreateRef", mock.Anything, mock.Anything).Return(nil, errors.New("cannot create ref")) pg := goref.NewPackageGraph(goref.ConstantVersion(0)) pg.LoadPackages([]string{pkgpath}, false) err := elasticsearch.LoadGraphToElastic(*pg, client) assert.Error(t, err) // Errors are capped at 20 reported in the error message assert.Contains(t, err.Error(), "entries couldn't be imported. Errors were:") var n int fmt.Sscanf(err.Error(), "%d entries couldn't be imported.", &n) assert.True(t, n > 20) r := regexp.MustCompile("\n") // There's an extra \n due to the leading message. assert.Equal(t, 21, len(r.FindAllStringIndex(err.Error(), -1))) }
explode_data.jsonl/54352
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 373 }
[ 2830, 3393, 5879, 11212, 1249, 36, 51179, 5705, 82807, 19524, 1249, 13780, 1155, 353, 8840, 836, 8, 341, 4777, 2399, 197, 3223, 7351, 2343, 284, 330, 5204, 905, 14109, 32842, 6070, 4846, 46752, 12697, 72953, 67195, 698, 197, 692, 25291, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBlockWire(t *testing.T) { tests := []struct { in *MsgBlock // Message to encode out *MsgBlock // Expected decoded message buf []byte // Wire encoding txLocs []TxLoc // Expected transaction locations sTxLocs []TxLoc // Expected stake transaction locations pver uint32 // Protocol version for wire encoding }{ // Latest protocol version. { &testBlock, &testBlock, testBlockBytes, testBlockTxLocs, testBlockSTxLocs, ProtocolVersion, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode the message to wire format. var buf bytes.Buffer err := test.in.BtcEncode(&buf, test.pver) if err != nil { t.Errorf("BtcEncode #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("BtcEncode #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Decode the message from wire format. var msg MsgBlock rbuf := bytes.NewReader(test.buf) err = msg.BtcDecode(rbuf, test.pver) if err != nil { t.Errorf("BtcDecode #%d error %v", i, err) continue } if !reflect.DeepEqual(&msg, test.out) { t.Errorf("BtcDecode #%d\n got: %s want: %s", i, spew.Sdump(&msg), spew.Sdump(test.out)) continue } } }
explode_data.jsonl/20100
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 566 }
[ 2830, 3393, 4713, 37845, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 17430, 414, 353, 6611, 4713, 442, 4856, 311, 16164, 198, 197, 13967, 257, 353, 6611, 4713, 442, 31021, 29213, 1943, 198, 197, 26398, 257, 3056, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestConfig(t *testing.T) { Convey("Given an environment with no environment variables set", t, func() { os.Clearenv() cfg, err := Get() Convey("When the config values are retrieved", func() { Convey("Then there should be no error returned", func() { So(err, ShouldBeNil) }) Convey("Then the values should be set to the expected defaults", func() { So(cfg.Debug, ShouldBeFalse) So(cfg.BindAddr, ShouldEqual, ":27700") So(cfg.PatternLibraryAssetsPath, ShouldEqual, "//cdn.ons.gov.uk/dp-design-system/8324f37") So(cfg.SupportedLanguages, ShouldResemble, []string{"en", "cy"}) So(cfg.GracefulShutdownTimeout, ShouldEqual, 5*time.Second) So(cfg.HealthCheckInterval, ShouldEqual, 30*time.Second) So(cfg.HealthCheckCriticalTimeout, ShouldEqual, 90*time.Second) So(cfg.APIRouterURL, ShouldEqual, "http://localhost:23200/v1") So(cfg.BabbageURL, ShouldEqual, "http://localhost:8080") So(cfg.MaxAgeKey, ShouldEqual, "") So(cfg.DefaultLimit, ShouldEqual, 10) So(cfg.DefaultMaximumLimit, ShouldEqual, 100) So(cfg.DefaultSort, ShouldEqual, "date-newest") So(cfg.DefaultMaximumSearchResults, ShouldEqual, 1000) }) Convey("Then a second call to config should return the same config", func() { newCfg, newErr := Get() So(newErr, ShouldBeNil) So(newCfg, ShouldResemble, cfg) }) }) }) }
explode_data.jsonl/65193
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 543 }
[ 2830, 3393, 2648, 1155, 353, 8840, 836, 8, 341, 93070, 5617, 445, 22043, 458, 4573, 448, 902, 4573, 7332, 738, 497, 259, 11, 2915, 368, 341, 197, 25078, 727, 273, 9151, 85, 741, 197, 50286, 11, 1848, 1669, 2126, 2822, 197, 93070, 56...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetChannelInfoMissingChannelID(t *testing.T) { InitMSP() resetFlags() signer, err := common.GetDefaultSigner() if err != nil { t.Fatalf("Get default signer error: %v", err) } mockCF := &ChannelCmdFactory{ Signer: signer, } cmd := getinfoCmd(mockCF) AddFlags(cmd) cmd.SetArgs([]string{}) assert.Error(t, cmd.Execute()) }
explode_data.jsonl/82666
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 138 }
[ 2830, 3393, 1949, 9629, 1731, 25080, 9629, 915, 1155, 353, 8840, 836, 8, 341, 98762, 44, 4592, 741, 70343, 9195, 2822, 69054, 261, 11, 1848, 1669, 4185, 2234, 3675, 7264, 261, 741, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 194...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestConfig_normalizeAddrs_NoAdvertise(t *testing.T) { c := &Config{ BindAddr: "127.0.0.1", Ports: &Ports{ HTTP: 4646, RPC: 4647, Serf: 4648, }, Addresses: &Addresses{}, AdvertiseAddrs: &AdvertiseAddrs{}, DevMode: false, } if err := c.normalizeAddrs(); err == nil { t.Fatalf("expected an error when no valid advertise address is available") } if c.AdvertiseAddrs.HTTP == "127.0.0.1:4646" { t.Fatalf("expected non-localhost HTTP advertise address, got %s", c.AdvertiseAddrs.HTTP) } if c.AdvertiseAddrs.RPC == "127.0.0.1:4647" { t.Fatalf("expected non-localhost RPC advertise address, got %s", c.AdvertiseAddrs.RPC) } if c.AdvertiseAddrs.Serf == "127.0.0.1:4648" { t.Fatalf("expected non-localhost Serf advertise address, got %s", c.AdvertiseAddrs.Serf) } }
explode_data.jsonl/76938
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 361 }
[ 2830, 3393, 2648, 80807, 2212, 5428, 36989, 2589, 67787, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 609, 2648, 515, 197, 197, 9950, 13986, 25, 330, 16, 17, 22, 13, 15, 13, 15, 13, 16, 756, 197, 197, 68273, 25, 609, 68273, 515, 298,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestMisusedAlias(t *testing.T) { require := require.New(t) f := getRule(resolveColumnsId) table := memory.NewTable("mytable", sql.NewPrimaryKeySchema(sql.Schema{ {Name: "i", Type: sql.Int32}, }), nil) node := plan.NewProject( []sql.Expression{ expression.NewAlias("alias_i", uc("i")), // like most missing column error cases, this error takes 2 passes to manifest and gets deferred on the first pass &deferredColumn{uc("alias_i")}, }, plan.NewResolvedTable(table, nil, nil), ) _, _, err := f.Apply(sql.NewEmptyContext(), nil, node, nil, DefaultRuleSelector) require.EqualError(err, sql.ErrMisusedAlias.New("alias_i").Error()) }
explode_data.jsonl/58299
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 242 }
[ 2830, 3393, 83159, 2591, 22720, 1155, 353, 8840, 836, 8, 341, 17957, 1669, 1373, 7121, 1155, 340, 1166, 1669, 633, 11337, 43234, 13965, 764, 692, 26481, 1669, 4938, 7121, 2556, 445, 2408, 2005, 497, 5704, 7121, 25981, 8632, 13148, 21105, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1