text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestChangeTodoName(t *testing.T) { t.Run("invalid name", func(t *testing.T) { r := &MemoryRepository{} s := NewService(r) todo, err := r.addTodo("gud name", nil, 1, defaultPriority()) if err != nil { t.Fatalf("error adding todo") } err = s.ChangeTodoName(todo.userID, todo.id, "") if err == nil { t.Fatalf("got nil error expected err") } }) t.Run("valid name", func(t *testing.T) { r := &MemoryRepository{} s := NewService(r) todo, err := r.addTodo("gud name", nil, 1, defaultPriority()) if err != nil { t.Fatalf("error adding todo") } newName := "gudder name" err = s.ChangeTodoName(todo.userID, todo.id, newName) if err != nil { t.Fatalf("got error expected no err") } memoryTodo, err := r.getMemoryTodo(todo.userID, todo.id) if err != nil { t.Fatalf("unable to get memory todo") } if memoryTodo.name != newName { t.Fatalf("got wrong name: %s, want: %s", memoryTodo.name, newName) } }) }
explode_data.jsonl/21375
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 424 }
[ 2830, 3393, 4072, 24176, 675, 1155, 353, 8840, 836, 8, 1476, 3244, 16708, 445, 11808, 829, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 7000, 1669, 609, 10642, 4624, 16094, 197, 1903, 1669, 1532, 1860, 2601, 692, 197, 3244, 6004, 11,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_ToSliceUnthreadsafe(t *testing.T) { s := makeUnsafeSet([]int{1, 2, 3}) setAsSlice := s.ToSlice() if len(setAsSlice) != s.Cardinality() { t.Errorf("Set length is incorrect: %v", len(setAsSlice)) } for _, i := range setAsSlice { if !s.Contains(i) { t.Errorf("Set is missing element: %v", i) } } }
explode_data.jsonl/214
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 38346, 33236, 1806, 4528, 18675, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 1281, 78770, 1649, 10556, 396, 90, 16, 11, 220, 17, 11, 220, 18, 3518, 8196, 2121, 33236, 1669, 274, 3274, 33236, 741, 743, 2422, 14171, 2121, 3323...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestReconcileEnableAutoTLSHTTPDisabled(t *testing.T) { table := TableTest{{ Name: "check that Route is correctly updated when Certificate is not ready", Objects: []runtime.Object{ Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), cfg("default", "config", WithConfigGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), // MakeCertificates will create a certificate with DNS name "*.test-ns.example.com" which is not the host name // needed by the input Route. &netv1alpha1.Certificate{ ObjectMeta: metav1.ObjectMeta{ Name: "route-12-34", Namespace: "default", Labels: map[string]string{ serving.RouteLabelKey: "becomes-ready", }, OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")))}, Annotations: map[string]string{ networking.CertificateClassAnnotationKey: network.CertManagerCertificateClassName, }, }, Spec: netv1alpha1.CertificateSpec{ DNSNames: []string{"abc.test.example.com"}, }, Status: notReadyCertStatus(), }, }, WantCreates: []runtime.Object{ ingressWithTLS( Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), &traffic.Config{ RevisionTargets: traffic.RevisionTargets{{ TrafficTarget: v1.TrafficTarget{ ConfigurationName: "config", RevisionName: "config-00001", Percent: ptr.Int64(100), LatestRevision: ptr.Bool(true), }, }}, Targets: map[string]traffic.RevisionTargets{ traffic.DefaultTarget: {{ TrafficTarget: v1.TrafficTarget{ // Use the Revision name from the config. RevisionName: "config-00001", Percent: ptr.Int64(100), }, ServiceName: "mcd", Active: true, }}, }, }, nil, nil, ), simpleK8sService( Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), WithExternalName("becomes-ready.default.example.com"), ), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: certificateWithStatus(resources.MakeCertificates(Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), map[string]string{"becomes-ready.default.example.com": ""}, network.CertManagerCertificateClassName)[0], notReadyCertStatus()), }}, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34"), // Populated by reconciliation when all traffic has been assigned. WithAddress, WithInitRouteConditions, MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic( v1.TrafficTarget{ RevisionName: "config-00001", Percent: ptr.Int64(100), LatestRevision: ptr.Bool(true), }), MarkCertificateNotReady, MarkIngressNotConfigured, // The certificate is not ready. But we still want to have HTTPS URL. WithHTTPSDomain), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), Eventf(corev1.EventTypeNormal, "Updated", "Updated Spec for Certificate %s/%s", "default", "route-12-34"), Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), }, Key: "default/becomes-ready", }} table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { cfg := ReconcilerTestConfig(true) cfg.Network.HTTPProtocol = network.HTTPDisabled r := &Reconciler{ kubeclient: kubeclient.Get(ctx), client: servingclient.Get(ctx), netclient: networkingclient.Get(ctx), configurationLister: listers.GetConfigurationLister(), revisionLister: listers.GetRevisionLister(), serviceLister: listers.GetK8sServiceLister(), ingressLister: listers.GetIngressLister(), certificateLister: listers.GetCertificateLister(), tracker: &NullTracker{}, clock: FakeClock{Time: fakeCurTime}, } return routereconciler.NewReconciler(ctx, logging.FromContext(ctx), servingclient.Get(ctx), listers.GetRouteLister(), controller.GetEventRecorder(ctx), r, controller.Options{ConfigStore: &testConfigStore{config: cfg}}) })) }
explode_data.jsonl/7816
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1844 }
[ 2830, 3393, 693, 40446, 457, 11084, 13253, 45439, 9230, 25907, 1155, 353, 8840, 836, 8, 341, 26481, 1669, 6633, 2271, 90, 515, 197, 21297, 25, 330, 2028, 429, 9572, 374, 12440, 6049, 979, 31402, 374, 537, 5527, 756, 197, 197, 11543, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMergeWalletStore(t *testing.T) { nft.RegisterAction(nft.DefaultActions...) w1 := wsFromGenesisFile(t, "./testdata/genesis.json") w2 := wsFromFile(t, "./testdata/wallets.json") expected := WalletStore{ Wallets: []cash.GenesisAccount{ { Address: toWeaveAddress(t, "3AFCDAB4CFBF066E959D139251C8F0EE91E99D5A"), Set: cash.Set{ Coins: []*coin.Coin{ { Ticker: "CASH", Whole: 123456789, Fractional: 5555555, }, }, }, }, { Address: toWeaveAddress(t, "12AFFBF6012FD2DF21416582DC80CBF1EFDF2460"), Set: cash.Set{ Coins: []*coin.Coin{ { Ticker: "CASH", Whole: 987654321, Fractional: 5555555, }, }, }, }, { Address: toWeaveAddress(t, "CE5D5A5CA8C7D545D7756D3677234D81622BA297"), Set: cash.Set{ Coins: []*coin.Coin{ { Ticker: "IOV", Whole: 123456789, Fractional: 5555555, }, }, }, }, { Address: toWeaveAddress(t, "D4821FD051696273D09E1FBAD0EBE5B5060787A7"), Set: cash.Set{ Coins: []*coin.Coin{ { Ticker: "IOV", Whole: 123456789, Fractional: 5555555, }, }, }, }, }, } actual := MergeWalletStore(w1, w2) assert.EqualValues(t, expected, actual, ToString(expected), ToString(actual)) }
explode_data.jsonl/11363
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 788 }
[ 2830, 3393, 52096, 38259, 6093, 1155, 353, 8840, 836, 8, 341, 9038, 723, 19983, 2512, 1445, 723, 13275, 12948, 31218, 6692, 16, 1669, 17624, 3830, 84652, 1703, 1155, 11, 5924, 92425, 14, 77894, 4323, 1138, 6692, 17, 1669, 17624, 43633, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCopyUnstableJSON(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() env2, cleanup2 := withTestEnvironment(t) defer cleanup2() // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink` datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz") rtest.SetupTarTestFixture(t, env.base, datafile) testRunInit(t, env2.gopts) testRunCopy(t, env.gopts, env2.gopts) testRunCheck(t, env2.gopts) copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts) rtest.Assert(t, 1 == len(copiedSnapshotIDs), "still expected %v snapshot, found %v", 1, len(copiedSnapshotIDs)) }
explode_data.jsonl/43552
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 259 }
[ 2830, 3393, 12106, 1806, 27992, 5370, 1155, 353, 8840, 836, 8, 341, 57538, 11, 21290, 1669, 448, 2271, 12723, 1155, 340, 16867, 21290, 741, 57538, 17, 11, 21290, 17, 1669, 448, 2271, 12723, 1155, 340, 16867, 21290, 17, 2822, 197, 322, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateSimpleGraphWithNode(t *testing.T) { g := dot.NewGraph("Test") expected := "digraph Test {\n}\n" if fmt.Sprint(g) != expected { t.Errorf("'%s' != '%s'", fmt.Sprint(g), expected) } g.SetType(dot.GRAPH) expected = "graph Test {\n}\n" if fmt.Sprint(g) != expected { t.Errorf("'%s' != '%s'", fmt.Sprint(g), expected) } g.SetType(dot.DIGRAPH) node := dot.NewNode("legend") node.Set("shape", "box") g.AddNode(node) node.Set("label", "value with spaces") node = dot.NewNode("html") node.Set("shape", "plain") node.Set("label", "<<B>bold</B>>") g.AddNode(node) expected = "digraph Test {\nlegend [label=\"value with spaces\", shape=box];\nhtml [label=<<B>bold</B>>, shape=plain];\n}\n" if fmt.Sprint(g) != expected { t.Errorf("'%s' != '%s'", fmt.Sprint(g), expected) } }
explode_data.jsonl/407
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 347 }
[ 2830, 3393, 4021, 16374, 11212, 2354, 1955, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 12756, 7121, 11212, 445, 2271, 5130, 42400, 1669, 330, 44861, 1935, 3393, 28152, 77, 11035, 77, 698, 743, 8879, 808, 1350, 3268, 8, 961, 3601, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestCT_CacheHierarchiesConstructor(t *testing.T) { v := sml.NewCT_CacheHierarchies() if v == nil { t.Errorf("sml.NewCT_CacheHierarchies must return a non-nil value") } if err := v.Validate(); err != nil { t.Errorf("newly constructed sml.CT_CacheHierarchies should validate: %s", err) } }
explode_data.jsonl/28611
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 1162, 920, 1777, 74909, 1113, 550, 13288, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 274, 1014, 7121, 1162, 920, 1777, 74909, 1113, 550, 741, 743, 348, 621, 2092, 341, 197, 3244, 13080, 445, 82, 1014, 7121, 1162, 920, 1777,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUploadOrderSingleBufferedReader(t *testing.T) { s, ops, _ := loggingSvc(emptyList) mgr := s3manager.NewUploaderWithClient(s) resp, err := mgr.Upload(&s3manager.UploadInput{ Bucket: aws.String("Bucket"), Key: aws.String("Key"), Body: &sizedReader{size: 1024 * 1024 * 2}, }) if err != nil { t.Errorf("Expected no error but received %v", err) } if e, a := []string{"PutObject"}, *ops; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, but received %v", e, a) } if len(resp.Location) == 0 { t.Error("Expected a value in Location but received empty string") } if len(resp.UploadID) > 0 { t.Errorf("Expected empty string but received %q", resp.UploadID) } }
explode_data.jsonl/55648
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 272 }
[ 2830, 3393, 13844, 4431, 10888, 4095, 17120, 1155, 353, 8840, 836, 8, 341, 1903, 11, 27132, 11, 716, 1669, 8392, 92766, 24216, 852, 340, 2109, 901, 1669, 274, 18, 13297, 7121, 67574, 2354, 2959, 1141, 340, 34653, 11, 1848, 1669, 57897, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestInitApp(t *testing.T) { app = nil initApp() if app == nil { t.Errorf("app should not be nil") } MiddlewareStack = []negroni.Handler{} }
explode_data.jsonl/51055
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 62 }
[ 2830, 3393, 3803, 2164, 1155, 353, 8840, 836, 8, 341, 28236, 284, 2092, 198, 28248, 2164, 741, 743, 906, 621, 2092, 341, 197, 3244, 13080, 445, 676, 1265, 537, 387, 2092, 1138, 197, 532, 9209, 11603, 4336, 284, 3056, 28775, 2248, 72, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestReject(t *testing.T) { policyID := "policyID" m, err := policies.NewManagerImpl("test", providerMap(), &cb.ConfigGroup{ Policies: map[string]*cb.ConfigPolicy{ policyID: {Policy: rejectAllPolicy}, }, }) assert.NoError(t, err) assert.NotNil(t, m) policy, ok := m.GetPolicy(policyID) assert.True(t, ok, "Should have found policy which was just added, but did not") err = policy.Evaluate([]*protoutil.SignedData{{Identity: []byte("identity"), Data: []byte("data"), Signature: []byte("sig")}}) assert.Error(t, err, "Should have errored evaluating an rejectAll policy") }
explode_data.jsonl/51381
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 212 }
[ 2830, 3393, 78413, 1155, 353, 8840, 836, 8, 341, 3223, 8018, 915, 1669, 330, 34790, 915, 698, 2109, 11, 1848, 1669, 10186, 7121, 2043, 9673, 445, 1944, 497, 9109, 2227, 1507, 609, 7221, 10753, 2808, 515, 197, 10025, 42038, 25, 2415, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTerraformHttpExample(t *testing.T) { t.Parallel() // A unique ID we can use to namespace resources so we don't clash with anything already in the AWS account or // tests running in parallel uniqueID := random.UniqueId() // Give this EC2 Instance and other resources in the Terraform code a name with a unique ID so it doesn't clash // with anything else in the AWS account. //instanceName := fmt.Sprintf("terratest-http-example-%s", uniqueID) // Specify the text the EC2 Instance will return when we make HTTP requests to it. instanceText := fmt.Sprintf("Hello, %s!", uniqueID) // Pick a random AWS region to test in. This helps ensure your code works in all regions. //awsRegion := aws.GetRandomStableRegion(t, nil, nil) terraformOptions := &terraform.Options{ // The path to where our Terraform code is located TerraformDir: "../terraform-sysage", // Variables to pass to our Terraform code using -var options Vars: map[string]interface{}{ "nsxt_t0": "PKS-provisioned-t0-router", "nsxt_edgecluster": "edge-cluster", "nsxt_tz_overlay": "TZ-internal-overlay", }, } // At the end of the test, run `terraform destroy` to clean up any resources that were created defer terraform.Destroy(t, terraformOptions) // This will run `terraform init` and `terraform apply` and fail the test if there are any errors terraform.InitAndApply(t, terraformOptions) // Run `terraform output` to get the value of an output variable instanceURL := terraform.Output(t, terraformOptions, "instance_url") // Setup a TLS configuration to submit with the helper, a blank struct is acceptable tlsConfig := tls.Config{} // It can take a minute or so for the Instance to boot up, so retry a few times maxRetries := 30 timeBetweenRetries := 5 * time.Second // Verify that we get back a 200 OK http_helper.Htt // Verify that we get back a 200 OK with the expected instanceText //http_helper.HttpGetWithRetry(t, instanceURL, &tlsConfig, 200, instanceText, maxRetries, timeBetweenRetries) }
explode_data.jsonl/23318
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 636 }
[ 2830, 3393, 51, 13886, 627, 2905, 13314, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 322, 362, 4911, 3034, 582, 646, 990, 311, 4473, 4963, 773, 582, 1513, 944, 39903, 448, 4113, 2669, 304, 279, 23245, 2692, 476, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRpcStream_writeStreamNext(t *testing.T) { t.Run("test", func(t *testing.T) { assert := base.NewAssert(t) testRange := getTestRange(streamPosBody, 2*streamBlockSize, 16, 16, 61) for _, i := range testRange { bytes := make([]byte, i) dataStream := NewStream() for n := 0; n < i; n++ { bytes[n] = byte(n) } dataStream.WriteBytes(bytes) // invalid code bugStream0 := NewStream() bugStream0.PutBytes([]byte{13}) // length overflow bugStream1 := NewStream() bugStream1.PutBytes([]byte{65, 6, 0, 0, 0}) for j := streamPosBody; j < streamBlockSize+20; j++ { stream := NewStream() stream.SetWritePos(j) dataStream.SetReadPos(streamPosBody) // dataStream assert(stream.writeStreamNext(dataStream)).IsTrue() assert(dataStream.GetReadPos()).Equals(dataStream.GetWritePos()) assert(stream.GetWritePos()). Equals(dataStream.GetWritePos() + j - streamPosBody) // bugStream0 assert(stream.writeStreamNext(bugStream0)).IsFalse() assert(bugStream0.GetReadPos()).Equals(streamPosBody) assert(stream.GetWritePos()). Equals(dataStream.GetWritePos() + j - streamPosBody) // bugStream1 assert(stream.writeStreamNext(bugStream1)).IsFalse() assert(bugStream1.GetReadPos()).Equals(streamPosBody) assert(stream.GetWritePos()). Equals(dataStream.GetWritePos() + j - streamPosBody) stream.Release() } } }) }
explode_data.jsonl/21198
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 582 }
[ 2830, 3393, 60248, 3027, 9165, 3027, 5847, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 1944, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 6948, 1669, 2331, 7121, 8534, 1155, 340, 197, 18185, 6046, 1669, 633, 2271, 6046, 20574, 48...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestStringEmpty(t *testing.T) { tests := []struct { msg string result bool }{ { result: true, }, { "data", false, }, } for _, c := range tests { got := IsStringEmpty(c.msg) assert.Equal(t, c.result, got, "result must match") } }
explode_data.jsonl/13332
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 124 }
[ 2830, 3393, 703, 3522, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 21169, 262, 914, 198, 197, 9559, 1807, 198, 197, 59403, 197, 197, 515, 298, 9559, 25, 830, 345, 197, 197, 15766, 197, 197, 515, 298, 197, 97115,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTrackerInit(t *testing.T) { assert := assert.New(t) tracker := InitTracker( RequireEmitter(InitEmitter( RequireCollectorUri("com.acme"), OptionDbName("/home/vagrant/test.db"), )), OptionSubject(InitSubject()), OptionNamespace("namespace"), OptionAppId("app-id"), OptionPlatform("mob"), OptionBase64Encode(false), ) // Assert the option builders assert.NotNil(tracker) assert.NotNil(tracker.Emitter) assert.NotNil(tracker.Subject) assert.Equal("namespace", tracker.Namespace) assert.Equal("app-id", tracker.AppId) assert.Equal("mob", tracker.Platform) assert.Equal(false, tracker.Base64Encode) // Assert defaults tracker = InitTracker( RequireEmitter(InitEmitter( RequireCollectorUri("com.acme"), OptionDbName("/home/vagrant/test.db"), )), ) assert.NotNil(tracker) assert.NotNil(tracker.Emitter) assert.Nil(tracker.Subject) assert.Equal("", tracker.Namespace) assert.Equal("", tracker.AppId) assert.Equal("srv", tracker.Platform) assert.Equal(true, tracker.Base64Encode) // Assert the set functions tracker.SetSubject(InitSubject()) tracker.SetEmitter(InitEmitter( RequireCollectorUri("com.new"), OptionDbName("/home/vagrant/test.db"), )) tracker.SetNamespace("some-namespace") tracker.SetAppId("some-app-id") tracker.SetPlatform("web") tracker.SetBase64Encode(false) assert.NotNil(tracker.Emitter) assert.NotNil(tracker.Subject) assert.Equal("some-namespace", tracker.Namespace) assert.Equal("some-app-id", tracker.AppId) assert.Equal("web", tracker.Platform) assert.Equal(false, tracker.Base64Encode) // Assert panic for no emitter set defer func() { if err := recover(); err != nil { assert.Equal("FATAL: Emitter cannot be nil.", err) } }() tracker = InitTracker() }
explode_data.jsonl/37648
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 677 }
[ 2830, 3393, 31133, 3803, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 340, 25583, 9683, 1669, 15690, 31133, 1006, 197, 197, 17959, 21971, 7, 3803, 21971, 1006, 298, 197, 17959, 53694, 13899, 445, 874, 15399, 2660, 4461, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMarshal_ChallengeReply(t *testing.T) { t.Parallel() bytes, _ := getResponseReference().Marshal() assert.Equal(t, testChallengeReplyFromInitiator, hex.EncodeToString(bytes), "Marshalling did not yield the expected result.") }
explode_data.jsonl/54235
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 78 }
[ 2830, 3393, 55438, 27588, 15832, 20841, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 70326, 11, 716, 1669, 633, 2582, 8856, 1005, 55438, 741, 6948, 12808, 1155, 11, 1273, 62078, 20841, 3830, 3803, 36122, 11, 12371, 50217, 5870, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestImagePixels(t *testing.T) { img0, img, err := openEbitenImage() if err != nil { t.Fatal(err) return } if got := img0.Bounds().Size(); got != img.Bounds().Size() { t.Fatalf("img size: got %d; want %d", got, img.Bounds().Size()) } w, h := img0.Bounds().Size().X, img0.Bounds().Size().Y // Check out of range part w2, h2 := emath.NextPowerOf2Int(w), emath.NextPowerOf2Int(h) for j := -100; j < h2+100; j++ { for i := -100; i < w2+100; i++ { got := img0.At(i, j) want := color.RGBAModel.Convert(img.At(i, j)) if got != want { t.Errorf("img0 At(%d, %d): got %#v; want %#v", i, j, got, want) } } } }
explode_data.jsonl/10886
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 306 }
[ 2830, 3393, 1906, 38529, 1155, 353, 8840, 836, 8, 341, 39162, 15, 11, 4964, 11, 1848, 1669, 1787, 36, 4489, 268, 1906, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 853, 198, 197, 630, 743, 2684, 1669, 4964, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestMemory_race(t *testing.T) { m, err := NewWithDefault() require.NoError(t, err) upca, err := upca.NewWithDefault("../upstreamca-memory/pkg/_test_data/keys/private_key.pem", "../upstreamca-memory/pkg/_test_data/keys/cert.pem") require.NoError(t, err) generateCsrResp, err := m.GenerateCsr(&ca.GenerateCsrRequest{}) require.NoError(t, err) submitCSRResp, err := upca.SubmitCSR(&upstreamca.SubmitCSRRequest{Csr: generateCsrResp.Csr}) require.NoError(t, err) wcsr := createWorkloadCSR(t, "spiffe://localhost") testutil.RaceTest(t, func(t *testing.T) { m.GenerateCsr(&ca.GenerateCsrRequest{}) m.LoadCertificate(&ca.LoadCertificateRequest{SignedIntermediateCert: submitCSRResp.Cert}) m.FetchCertificate(&ca.FetchCertificateRequest{}) m.SignCsr(&ca.SignCsrRequest{Csr: wcsr}) }) }
explode_data.jsonl/73859
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 319 }
[ 2830, 3393, 10642, 91317, 1155, 353, 8840, 836, 8, 341, 2109, 11, 1848, 1669, 1532, 2354, 3675, 741, 17957, 35699, 1155, 11, 1848, 692, 59810, 924, 11, 1848, 1669, 705, 924, 7121, 2354, 3675, 17409, 454, 4027, 924, 64096, 22523, 19632, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestJsonpb(t *testing.T) { r := &Reply{Msg: []byte("OK")} b, err := PBToJSON(r) assert.Nil(t, err) assert.Equal(t, b, []byte(`{"isOk":false,"msg":"0x4f4b"}`)) var newreply Reply err = JSONToPB(b, &newreply) assert.Nil(t, err) assert.Equal(t, r, &newreply) }
explode_data.jsonl/58323
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 131 }
[ 2830, 3393, 5014, 16650, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 609, 20841, 90, 6611, 25, 3056, 3782, 445, 3925, 42132, 2233, 11, 1848, 1669, 30934, 1249, 5370, 2601, 340, 6948, 59678, 1155, 11, 1848, 340, 6948, 12808, 1155, 11, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMegaChecks(t *testing.T) { Convey("Should pass megachecks", t, func() { cmd := exec.Command("megacheck") res, _ := cmd.Output() So(string(res[:]), ShouldBeEmpty) }) }
explode_data.jsonl/25052
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 73 }
[ 2830, 3393, 44, 11188, 49820, 1155, 353, 8840, 836, 8, 1476, 93070, 5617, 445, 14996, 1494, 18740, 1777, 14553, 497, 259, 11, 2915, 368, 1476, 197, 25920, 1669, 3883, 12714, 445, 70276, 1777, 377, 1138, 197, 10202, 11, 716, 1669, 5439, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestPrepareCacheNow(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() orgEnable := core.PreparedPlanCacheEnabled() defer core.SetPreparedPlanCache(orgEnable) core.SetPreparedPlanCache(true) se, err := session.CreateSession4TestWithOpt(store, &session.Opt{ PreparedPlanCache: kvcache.NewSimpleLRUCache(100, 0.1, math.MaxUint64), }) require.NoError(t, err) tk := testkit.NewTestKitWithSession(t, store, se) tk.MustExec("use test") tk.MustExec(`prepare stmt1 from "select now(), current_timestamp(), utc_timestamp(), unix_timestamp(), sleep(0.1), now(), current_timestamp(), utc_timestamp(), unix_timestamp()"`) // When executing one statement at the first time, we don't usTestPrepareCacheDeferredFunctione cache, so we need to execute it at least twice to test the cache. _ = tk.MustQuery("execute stmt1").Rows() rs := tk.MustQuery("execute stmt1").Rows() require.Equal(t, rs[0][5].(string), rs[0][0].(string)) require.Equal(t, rs[0][6].(string), rs[0][1].(string)) require.Equal(t, rs[0][7].(string), rs[0][2].(string)) require.Equal(t, rs[0][8].(string), rs[0][3].(string)) }
explode_data.jsonl/5500
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 421 }
[ 2830, 3393, 50590, 8233, 7039, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1273, 8226, 7251, 11571, 6093, 1155, 340, 16867, 4240, 741, 87625, 11084, 1669, 6200, 28770, 7212, 20485, 8233, 5462, 741, 16867, 6200, 4202, 4703, 7212, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWorkerMaintenanceRefillLowContractFunds(t *testing.T) { if testing.Short() { t.SkipNow() } t.Parallel() deps := &dependencies.DependencyDisableWorker{} wt, err := newWorkerTesterCustomDependency(t.Name(), deps, modules.ProdDependencies) if err != nil { t.Fatal(err) } defer func() { err := wt.Close() if err != nil { t.Fatal(err) } }() // allow for a large balance on the host. is := wt.host.InternalSettings() is.MaxEphemeralAccountBalance = types.UplocoinPrecision.Mul64(math.MaxUint64) err = wt.host.SetInternalSettings(is) if err != nil { t.Fatal(err) } w := wt.worker // fetch a pricetable. w.staticUpdatePriceTable() // balance should be 0 right now. w.staticAccount.mu.Lock() accountBalance := w.staticAccount.balance w.staticAccount.mu.Unlock() if !accountBalance.IsZero() { t.Fatal("balance should be zero at beginning of test") } // check remaining balance on contract. contract, ok := w.renter.hostContractor.ContractByPublicKey(wt.staticHostPubKey) if !ok { t.Fatal("contract not found") } funds := contract.RenterFunds // set the target to the balance. w.staticBalanceTarget = funds // trigger a refill. w.managedRefillAccount() // check if the balance increased. w.staticAccount.mu.Lock() accountBalance = w.staticAccount.balance w.staticAccount.mu.Unlock() expectedBalance := funds.Sub(wt.staticPriceTable().staticPriceTable.FundAccountCost) if !accountBalance.Equals(expectedBalance) { t.Fatalf("expected balance %v but got %v", accountBalance, expectedBalance) } }
explode_data.jsonl/42607
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 560 }
[ 2830, 3393, 21936, 92735, 3945, 483, 24187, 14067, 37, 42950, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 7039, 741, 197, 532, 3244, 41288, 7957, 2822, 58351, 1690, 1669, 609, 54274, 49918, 25479, 21936, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRoleDeleteForbidden(t *testing.T) { h := newHelper(t) u := h.repoMakeRole() h.apiInit(). Delete(fmt.Sprintf("/roles/%d", u.ID)). Header("Accept", "application/json"). Expect(t). Status(http.StatusOK). Assert(helpers.AssertError("not allowed to delete this role")). End() }
explode_data.jsonl/8343
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 121 }
[ 2830, 3393, 9030, 6435, 69115, 1155, 353, 8840, 836, 8, 341, 9598, 1669, 501, 5511, 1155, 340, 10676, 1669, 305, 46169, 8078, 9030, 2822, 9598, 6183, 3803, 25829, 197, 96672, 28197, 17305, 4283, 14643, 12627, 67, 497, 575, 9910, 39467, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWalletSvrWsNtfns(t *testing.T) { t.Parallel() tests := []struct { name string newNtfn func() (interface{}, error) staticNtfn func() interface{} marshalled string unmarshalled interface{} }{ { name: "accountbalance", newNtfn: func() (interface{}, error) { return btcjson.NewCmd("accountbalance", "acct", 1.25, true) }, staticNtfn: func() interface{} { return btcjson.NewAccountBalanceNtfn("acct", 1.25, true) }, marshalled: `{"jsonrpc":"1.0","method":"accountbalance","params":["acct",1.25,true],"id":null}`, unmarshalled: &btcjson.AccountBalanceNtfn{ Account: "acct", Balance: 1.25, Confirmed: true, }, }, { name: "btcdconnected", newNtfn: func() (interface{}, error) { return btcjson.NewCmd("btcdconnected", true) }, staticNtfn: func() interface{} { return btcjson.NewBtcdConnectedNtfn(true) }, marshalled: `{"jsonrpc":"1.0","method":"btcdconnected","params":[true],"id":null}`, unmarshalled: &btcjson.BtcdConnectedNtfn{ Connected: true, }, }, { name: "walletlockstate", newNtfn: func() (interface{}, error) { return btcjson.NewCmd("walletlockstate", true) }, staticNtfn: func() interface{} { return btcjson.NewWalletLockStateNtfn(true) }, marshalled: `{"jsonrpc":"1.0","method":"walletlockstate","params":[true],"id":null}`, unmarshalled: &btcjson.WalletLockStateNtfn{ Locked: true, }, }, { name: "newtx", newNtfn: func() (interface{}, error) { return btcjson.NewCmd("newtx", "acct", `{"account":"acct","address":"1Address","category":"send","amount":1.5,"bip125-replaceable":"unknown","fee":0.0001,"confirmations":1,"trusted":true,"txid":"456","walletconflicts":[],"time":12345678,"timereceived":12345876,"vout":789,"otheraccount":"otheracct"}`) }, staticNtfn: func() interface{} { result := btcjson.ListTransactionsResult{ Abandoned: false, Account: "acct", Address: "1Address", BIP125Replaceable: "unknown", Category: "send", Amount: 1.5, Fee: btcjson.Float64(0.0001), Confirmations: 1, TxID: "456", WalletConflicts: []string{}, Time: 12345678, TimeReceived: 12345876, Trusted: true, Vout: 789, OtherAccount: "otheracct", } return btcjson.NewNewTxNtfn("acct", result) }, marshalled: `{"jsonrpc":"1.0","method":"newtx","params":["acct",{"abandoned":false,"account":"acct","address":"1Address","amount":1.5,"bip125-replaceable":"unknown","category":"send","confirmations":1,"fee":0.0001,"time":12345678,"timereceived":12345876,"trusted":true,"txid":"456","vout":789,"walletconflicts":[],"otheraccount":"otheracct"}],"id":null}`, unmarshalled: &btcjson.NewTxNtfn{ Account: "acct", Details: btcjson.ListTransactionsResult{ Abandoned: false, Account: "acct", Address: "1Address", BIP125Replaceable: "unknown", Category: "send", Amount: 1.5, Fee: btcjson.Float64(0.0001), Confirmations: 1, TxID: "456", WalletConflicts: []string{}, Time: 12345678, TimeReceived: 12345876, Trusted: true, Vout: 789, OtherAccount: "otheracct", }, }, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Marshal the notification as created by the new static // creation function. The ID is nil for notifications. marshalled, err := btcjson.MarshalCmd(nil, test.staticNtfn()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) continue } if !bytes.Equal(marshalled, []byte(test.marshalled)) { t.Errorf("Test #%d (%s) unexpected marshalled data - "+ "got %s, want %s", i, test.name, marshalled, test.marshalled) continue } // Ensure the notification is created without error via the // generic new notification creation function. cmd, err := test.newNtfn() if err != nil { t.Errorf("Test #%d (%s) unexpected NewCmd error: %v ", i, test.name, err) } // Marshal the notification as created by the generic new // notification creation function. The ID is nil for // notifications. marshalled, err = btcjson.MarshalCmd(nil, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) continue } if !bytes.Equal(marshalled, []byte(test.marshalled)) { t.Errorf("Test #%d (%s) unexpected marshalled data - "+ "got %s, want %s", i, test.name, marshalled, test.marshalled) continue } var request btcjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, test.name, err) continue } cmd, err = btcjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) continue } if !reflect.DeepEqual(cmd, test.unmarshalled) { t.Errorf("Test #%d (%s) unexpected unmarshalled command "+ "- got %s, want %s", i, test.name, fmt.Sprintf("(%T) %+[1]v", cmd), fmt.Sprintf("(%T) %+[1]v\n", test.unmarshalled)) continue } } }
explode_data.jsonl/17063
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2503 }
[ 2830, 3393, 38259, 50, 18920, 74733, 45, 8935, 4412, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78216, 1669, 3056, 1235, 341, 197, 11609, 260, 914, 198, 197, 8638, 45, 83, 8822, 414, 2915, 368, 320, 4970, 22655, 1465, 340,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProjectDeletion(t *testing.T) { fixture.EnsureCleanState(t) projectName := "proj-" + strconv.FormatInt(time.Now().Unix(), 10) proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Create( context.Background(), &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: projectName}}, metav1.CreateOptions{}) assert.NoError(t, err) _, err = fixture.RunCli("proj", "delete", projectName) assert.NoError(t, err) _, err = fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.ArgoCDNamespace).Get(context.Background(), projectName, metav1.GetOptions{}) assert.True(t, errors.IsNotFound(err)) assertProjHasEvent(t, proj, "delete", argo.EventReasonResourceDeleted) }
explode_data.jsonl/58439
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 266 }
[ 2830, 3393, 7849, 1912, 52625, 1155, 353, 8840, 836, 8, 341, 1166, 12735, 22834, 19098, 27529, 1397, 1155, 692, 72470, 675, 1669, 330, 30386, 27651, 488, 33317, 9978, 1072, 9730, 13244, 1005, 55832, 1507, 220, 16, 15, 340, 197, 30386, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfigMarketplaceDefaults(t *testing.T) { t.Parallel() t.Run("no marketplace url", func(t *testing.T) { c := Config{} c.SetDefaults() require.True(t, *c.PluginSettings.EnableMarketplace) require.Equal(t, PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL, *c.PluginSettings.MarketplaceUrl) }) t.Run("old marketplace url", func(t *testing.T) { c := Config{} c.SetDefaults() *c.PluginSettings.MarketplaceUrl = PLUGIN_SETTINGS_OLD_MARKETPLACE_URL c.SetDefaults() require.True(t, *c.PluginSettings.EnableMarketplace) require.Equal(t, PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL, *c.PluginSettings.MarketplaceUrl) }) t.Run("custom marketplace url", func(t *testing.T) { c := Config{} c.SetDefaults() *c.PluginSettings.MarketplaceUrl = "https://marketplace.example.com" c.SetDefaults() require.True(t, *c.PluginSettings.EnableMarketplace) require.Equal(t, "https://marketplace.example.com", *c.PluginSettings.MarketplaceUrl) }) }
explode_data.jsonl/50703
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 377 }
[ 2830, 3393, 2648, 38822, 2007, 16273, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 3244, 16708, 445, 2152, 29533, 2515, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 1444, 1669, 5532, 16094, 197, 1444, 4202, 16273, 2822, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExecutor(t *testing.T) { exec := testexecutor.New() t.Run("calls query on executable schema", func(t *testing.T) { resp := query(exec, "", "{name}") assert.Equal(t, `{"name":"test"}`, string(resp.Data)) }) t.Run("invokes operation middleware in order", func(t *testing.T) { var calls []string exec.AroundOperations(func(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler { calls = append(calls, "first") return next(ctx) }) exec.AroundOperations(func(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler { calls = append(calls, "second") return next(ctx) }) resp := query(exec, "", "{name}") assert.Equal(t, `{"name":"test"}`, string(resp.Data)) assert.Equal(t, []string{"first", "second"}, calls) }) t.Run("invokes response middleware in order", func(t *testing.T) { var calls []string exec.AroundResponses(func(ctx context.Context, next graphql.ResponseHandler) *graphql.Response { calls = append(calls, "first") return next(ctx) }) exec.AroundResponses(func(ctx context.Context, next graphql.ResponseHandler) *graphql.Response { calls = append(calls, "second") return next(ctx) }) resp := query(exec, "", "{name}") assert.Equal(t, `{"name":"test"}`, string(resp.Data)) assert.Equal(t, []string{"first", "second"}, calls) }) t.Run("invokes root field middleware in order", func(t *testing.T) { var calls []string exec.AroundRootFields(func(ctx context.Context, next graphql.RootResolver) graphql.Marshaler { calls = append(calls, "first") return next(ctx) }) exec.AroundRootFields(func(ctx context.Context, next graphql.RootResolver) graphql.Marshaler { calls = append(calls, "second") return next(ctx) }) resp := query(exec, "", "{name}") assert.Equal(t, `{"name":"test"}`, string(resp.Data)) assert.Equal(t, []string{"first", "second"}, calls) }) t.Run("invokes field middleware in order", func(t *testing.T) { var calls []string exec.AroundFields(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { calls = append(calls, "first") return next(ctx) }) exec.AroundFields(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { calls = append(calls, "second") return next(ctx) }) resp := query(exec, "", "{name}") assert.Equal(t, `{"name":"test"}`, string(resp.Data)) assert.Equal(t, []string{"first", "second"}, calls) }) t.Run("invokes operation mutators", func(t *testing.T) { var calls []string exec.Use(&testParamMutator{ Mutate: func(ctx context.Context, req *graphql.RawParams) *gqlerror.Error { calls = append(calls, "param") return nil }, }) exec.Use(&testCtxMutator{ Mutate: func(ctx context.Context, rc *graphql.OperationContext) *gqlerror.Error { calls = append(calls, "context") return nil }, }) resp := query(exec, "", "{name}") assert.Equal(t, `{"name":"test"}`, string(resp.Data)) assert.Equal(t, []string{"param", "context"}, calls) }) t.Run("get query parse error in AroundResponses", func(t *testing.T) { var errors1 gqlerror.List var errors2 gqlerror.List exec.AroundResponses(func(ctx context.Context, next graphql.ResponseHandler) *graphql.Response { resp := next(ctx) errors1 = graphql.GetErrors(ctx) errors2 = resp.Errors return resp }) resp := query(exec, "", "invalid") assert.Equal(t, "", string(resp.Data)) assert.Equal(t, 1, len(resp.Errors)) assert.Equal(t, 1, len(errors1)) assert.Equal(t, 1, len(errors2)) }) t.Run("query caching", func(t *testing.T) { ctx := context.Background() cache := &graphql.MapCache{} exec.SetQueryCache(cache) qry := `query Foo {name}` t.Run("cache miss populates cache", func(t *testing.T) { resp := query(exec, "Foo", qry) assert.Equal(t, `{"name":"test"}`, string(resp.Data)) cacheDoc, ok := cache.Get(ctx, qry) require.True(t, ok) require.Equal(t, "Foo", cacheDoc.(*ast.QueryDocument).Operations[0].Name) }) t.Run("cache hits use document from cache", func(t *testing.T) { doc, err := parser.ParseQuery(&ast.Source{Input: `query Bar {name}`}) require.Nil(t, err) cache.Add(ctx, qry, doc) resp := query(exec, "Bar", qry) assert.Equal(t, `{"name":"test"}`, string(resp.Data)) cacheDoc, ok := cache.Get(ctx, qry) require.True(t, ok) require.Equal(t, "Bar", cacheDoc.(*ast.QueryDocument).Operations[0].Name) }) }) }
explode_data.jsonl/79116
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1752 }
[ 2830, 3393, 25255, 1155, 353, 8840, 836, 8, 341, 67328, 1669, 1273, 80787, 7121, 2822, 3244, 16708, 445, 44620, 3239, 389, 32156, 10802, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 34653, 1669, 3239, 46896, 11, 7342, 13868, 606, 14451...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFontProp_MakeValid(t *testing.T) { cases := []struct { name string signatureProp *props.Font assert func(t *testing.T, prop *props.Font) }{ { "When family is not defined, should define arial", &props.Font{ Family: "", }, func(t *testing.T, prop *props.Font) { assert.Equal(t, prop.Family, consts.Arial) }, }, { "When style is not defined, should define normal", &props.Font{ Style: "", }, func(t *testing.T, prop *props.Font) { assert.Equal(t, prop.Style, consts.Bold) }, }, { "When size is zero, should define 10.0", &props.Font{ Size: 0.0, }, func(t *testing.T, prop *props.Font) { assert.Equal(t, prop.Size, 8.0) }, }, } for _, c := range cases { // Act c.signatureProp.MakeValid() // Assert c.assert(t, c.signatureProp) } }
explode_data.jsonl/68878
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 416 }
[ 2830, 3393, 5447, 2008, 1245, 726, 4088, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 11609, 688, 914, 198, 197, 69054, 1568, 2008, 353, 4761, 4356, 198, 197, 6948, 286, 2915, 1155, 353, 8840, 836, 11, 2004, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWorkflowGetInitialTaskTemplate(t *testing.T) { workflow := &Workflow{ TaskTemplates: []TaskTemplate{ TaskTemplate{ TaskName: "First", Initial: false, }, TaskTemplate{ TaskName: "Second", Initial: true, }, }, } initialTT := workflow.GetInitialTaskTemplate() assert.Equal(t, &workflow.TaskTemplates[1], initialTT) }
explode_data.jsonl/19598
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 145 }
[ 2830, 3393, 62768, 1949, 6341, 6262, 7275, 1155, 353, 8840, 836, 8, 341, 197, 56249, 1669, 609, 62768, 515, 197, 81153, 51195, 25, 3056, 6262, 7275, 515, 298, 81153, 7275, 515, 571, 81153, 675, 25, 330, 5338, 756, 571, 197, 6341, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCAloadUsersTable(t *testing.T) { testDirClean(t) cfg = CAConfig{} u := &CAConfigIdentity{Name: "a", MaxEnrollments: -10} cfg.Registry = CAConfigRegistry{Identities: []CAConfigIdentity{*u}, MaxEnrollments: 10} ca, err := newCA(configFile, &cfg, &srv, false) t.Log("ca.newCA error: ", err) if err == nil { t.Error("ca.newCA should have failed") } // Chase down all error paths using duplicate entries i := make([]interface{}, 3) i[1] = []string{"", "root", "root"} cfg.Affiliations = make(map[string]interface{}, 3) cfg.Affiliations["a"] = i // Valid registration err = os.Remove(testdir + dbname) if err != nil { t.Fatalf("Remove failed: %s", err) } u = &CAConfigIdentity{Name: "a", MaxEnrollments: 10} cfg.Registry = CAConfigRegistry{Identities: []CAConfigIdentity{*u}, MaxEnrollments: 10} ca, err = newCA(configFile, &cfg, &srv, false) if err != nil { t.Fatal("newCA FAILED", err) } u = &CAConfigIdentity{Name: "a", MaxEnrollments: 10} ca.Config.Registry = CAConfigRegistry{Identities: []CAConfigIdentity{*u}, MaxEnrollments: 10} err = ca.loadUsersTable() if err != nil { t.Error("ca.loadUsersTable failed ", err) } // Duplicate resgistration, non-error u = &CAConfigIdentity{Name: "a", MaxEnrollments: 10} ca.Config.Registry = CAConfigRegistry{Identities: []CAConfigIdentity{*u}, MaxEnrollments: 10} err = ca.loadUsersTable() if err != nil { t.Error("ca.loadUsersTable error path should have succeeded: ", err) } // Database error (db is closed) u = &CAConfigIdentity{Name: "b", MaxEnrollments: 10} ca.Config.Registry = CAConfigRegistry{Identities: []CAConfigIdentity{*u}, MaxEnrollments: 10} err = ca.closeDB() if err != nil { t.Fatalf("CloseDB failed: %s", err) } err = os.Remove(testdir + dbname) if err != nil { t.Fatalf("Remove failed: %s", err) } err = ca.loadUsersTable() t.Log("ca.loadUsersTable error: ", err) if err == nil { t.Error("ca.loadUsersTable should have failed due to DB error ", err) } CAclean(ca, t) }
explode_data.jsonl/82707
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 802 }
[ 2830, 3393, 5049, 1078, 7137, 2556, 1155, 353, 8840, 836, 8, 341, 18185, 6184, 27529, 1155, 340, 50286, 284, 9183, 2648, 16094, 10676, 1669, 609, 5049, 2648, 18558, 63121, 25, 330, 64, 497, 7487, 1702, 1100, 1368, 25, 481, 16, 15, 532...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestJobRunsController_Show_Unauthenticated(t *testing.T) { t.Parallel() ethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) defer assertMocksCalled() app, cleanup := cltest.NewApplication(t, ethClient, ) defer cleanup() app.Start() resp, err := http.Get(app.Server.URL + "/v2/runs/notauthorized") assert.NoError(t, err) assert.Equal(t, http.StatusUnauthorized, resp.StatusCode, "Response should be forbidden") }
explode_data.jsonl/49860
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 12245, 73920, 2051, 79665, 40687, 57707, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 769, 2959, 11, 8358, 2060, 72577, 20960, 1669, 1185, 1944, 7121, 65390, 11571, 16056, 39076, 90206, 1155, 340, 16867, 2060, 72...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestResultOutputs_GetContentDigest(t *testing.T) { testcases := []struct { value string wantValue string wantOK bool }{ {value: "abc123", wantValue: "abc123", wantOK: true}, } for _, tc := range testcases { t.Run("existing metadata", func(t *testing.T) { outputs := OutputMetadata{} err := outputs.SetContentDigest("test1", tc.value) require.NoError(t, err, "SetContentDigest failed") generatedByBundle, ok := outputs.GetContentDigest("test1") require.Equal(t, tc.wantOK, ok, "GetGeneratedByBundle did not return the expected ok value") assert.Equal(t, tc.wantValue, generatedByBundle, "GetGeneratedByBundle did not return the expected value") }) } }
explode_data.jsonl/70427
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 258 }
[ 2830, 3393, 2077, 61438, 13614, 2762, 45217, 1155, 353, 8840, 836, 8, 341, 18185, 23910, 1669, 3056, 1235, 341, 197, 16309, 257, 914, 198, 197, 50780, 1130, 914, 198, 197, 50780, 3925, 262, 1807, 198, 197, 59403, 197, 197, 90, 957, 25...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseDestructor(t *testing.T) { t.Parallel() result, errs := ParseProgram(` resource Test { destroy() {} } `) require.Empty(t, errs) utils.AssertEqualWithDiff(t, []ast.Declaration{ &ast.CompositeDeclaration{ CompositeKind: common.CompositeKindResource, Identifier: ast.Identifier{ Identifier: "Test", Pos: ast.Position{Offset: 18, Line: 2, Column: 17}, }, Members: ast.NewMembers( []ast.Declaration{ &ast.SpecialFunctionDeclaration{ Kind: common.DeclarationKindDestructor, FunctionDeclaration: &ast.FunctionDeclaration{ Identifier: ast.Identifier{ Identifier: "destroy", Pos: ast.Position{Offset: 37, Line: 3, Column: 12}, }, ParameterList: &ast.ParameterList{ Range: ast.Range{ StartPos: ast.Position{Offset: 44, Line: 3, Column: 19}, EndPos: ast.Position{Offset: 45, Line: 3, Column: 20}, }, }, FunctionBlock: &ast.FunctionBlock{ Block: &ast.Block{ Range: ast.Range{ StartPos: ast.Position{Offset: 47, Line: 3, Column: 22}, EndPos: ast.Position{Offset: 48, Line: 3, Column: 23}, }, }, }, StartPos: ast.Position{Offset: 37, Line: 3, Column: 12}, }, }, }, ), Range: ast.Range{ StartPos: ast.Position{Offset: 9, Line: 2, Column: 8}, EndPos: ast.Position{Offset: 58, Line: 4, Column: 8}, }, }, }, result.Declarations(), ) }
explode_data.jsonl/35996
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 769 }
[ 2830, 3393, 14463, 84961, 1155, 353, 8840, 836, 8, 1476, 3244, 41288, 7957, 2822, 9559, 11, 70817, 1669, 14775, 10690, 61528, 286, 5101, 3393, 341, 310, 6921, 368, 5613, 286, 456, 197, 24183, 17957, 11180, 1155, 11, 70817, 692, 80206, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCanvas_Tappable(t *testing.T) { content := &touchableLabel{Label: widget.NewLabel("Hi\nHi\nHi")} content.ExtendBaseWidget(content) c := NewCanvas().(*mobileCanvas) c.SetContent(content) c.resize(fyne.NewSize(36, 24)) content.Resize(fyne.NewSize(24, 24)) c.tapDown(fyne.NewPos(15, 15), 0) assert.True(t, content.down) c.tapUp(fyne.NewPos(15, 15), 0, func(wid fyne.Tappable, ev *fyne.PointEvent) { }, func(wid fyne.SecondaryTappable, ev *fyne.PointEvent) { }, func(wid fyne.DoubleTappable, ev *fyne.PointEvent) { }, func(wid fyne.Draggable) { }) assert.True(t, content.up) c.tapDown(fyne.NewPos(15, 15), 0) c.tapMove(fyne.NewPos(35, 15), 0, func(wid fyne.Draggable, ev *fyne.DragEvent) { wid.Dragged(ev) }) assert.True(t, content.cancel) }
explode_data.jsonl/43164
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 338 }
[ 2830, 3393, 18226, 1139, 86384, 1155, 353, 8840, 836, 8, 341, 27751, 1669, 609, 22020, 480, 2476, 90, 2476, 25, 9086, 7121, 2476, 445, 13048, 1699, 13048, 1699, 13048, 42132, 27751, 16146, 408, 3978, 4548, 15063, 340, 1444, 1669, 1532, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMonorailSearch(t *testing.T) { unittest.SmallTest(t) ctx := context.Background() mc := &MonorailQueryConfig{ Instance: "skia", Query: "test-query", Client: "Skia", } reqBody := []byte(fmt.Sprintf(`{"projects": ["projects/%s"], "query": "%s", "page_token": ""}`, mc.Instance, mc.Query)) issue1 := "123" issue2 := "456" respBody := []byte(fmt.Sprintf(`{"issues":[{"name": "%s"},{"name": "%s"}],"nextPageToken":""}`, issue1, issue2)) // Monorail API prepends chars to prevent XSS. respBody = append([]byte("abcd\n"), respBody...) r := mux.NewRouter() md := mockhttpclient.MockPostDialogueWithResponseCode("application/json", reqBody, respBody, http.StatusOK) r.Schemes("https").Host("api-dot-monorail-prod.appspot.com").Methods("POST").Path("/prpc/monorail.v3.Issues/SearchIssues").Handler(md) httpClient := mockhttpclient.NewMuxClient(r) testToken := oauth2.Token{ AccessToken: "access-token", } m := monorail{ token: &testToken, httpClient: httpClient, openIssues: bugs.InitOpenIssues(), queryConfig: mc, } issues, countsData, err := m.Search(ctx) require.NoError(t, err) require.Equal(t, 2, len(issues)) require.Equal(t, issue1, issues[0].Id) require.Equal(t, issue2, issues[1].Id) require.Equal(t, 2, countsData.OpenCount) require.Equal(t, 2, countsData.UnassignedCount) require.Equal(t, 0, countsData.UntriagedCount) // Set UnassignedIsUntriaged and assert. mc.UnassignedIsUntriaged = true issues, countsData, err = m.Search(ctx) require.NoError(t, err) require.Equal(t, 2, len(issues)) require.Equal(t, issue1, issues[0].Id) require.Equal(t, issue2, issues[1].Id) require.Equal(t, 2, countsData.OpenCount) require.Equal(t, 2, countsData.UnassignedCount) require.Equal(t, 2, countsData.UntriagedCount) }
explode_data.jsonl/60123
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 705 }
[ 2830, 3393, 11095, 269, 604, 5890, 1155, 353, 8840, 836, 8, 341, 20479, 14267, 90183, 2271, 1155, 340, 20985, 1669, 2266, 19047, 2822, 97662, 1669, 609, 11095, 269, 604, 2859, 2648, 515, 197, 197, 2523, 25, 330, 4886, 685, 756, 197, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestShouldIgnoreAutoGeneratedAttributes(t *testing.T) { modifiedScantype := scanType.DeepCopy() modifiedScantype.ResourceVersion = "ajbsdiavof1t2hvasjhdvaj" assert.Equal( t, HashScanType(scanType), HashScanType(*modifiedScantype), "Should ignore auto generated attributes", ) }
explode_data.jsonl/24501
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 14996, 12497, 13253, 15741, 10516, 1155, 353, 8840, 836, 8, 1476, 42228, 1870, 3326, 517, 499, 1669, 8569, 929, 55602, 12106, 2822, 42228, 1870, 3326, 517, 499, 20766, 5637, 284, 330, 1630, 1279, 8579, 402, 1055, 16, 83, 17,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateOrUpdateNeverRateLimiter(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() vmCreateOrUpdateErr := &retry.Error{ RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "VMCreateOrUpdate"), Retriable: true, } armClient := mockarmclient.NewMockInterface(ctrl) vmClient := getTestVMClientWithNeverRateLimiter(armClient) testVM := getTestVM("vm1") rerr := vmClient.CreateOrUpdate(context.TODO(), "rg", "vm1", testVM, "test") assert.NotNil(t, rerr) assert.Equal(t, vmCreateOrUpdateErr, rerr) }
explode_data.jsonl/16703
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 215 }
[ 2830, 3393, 4021, 56059, 26155, 11564, 43, 17700, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 54879, 4021, 56059, 7747, 1669, 609, 44848, 6141, 515, 197, 11143, 672, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { const msg = "Hello" const msg2 = "World" doRead := make(chan bool, 1) defer close(doRead) // fallback cleanup st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, msg) w.(http.Flusher).Flush() // Do a read, which might force a 100-continue status to be sent. <-doRead r.Body.Read(make([]byte, 10)) io.WriteString(w, msg2) }, optOnlyServer) defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) req.Header.Set("Expect", "100-continue") res, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) } defer res.Body.Close() buf := make([]byte, len(msg)) if _, err := io.ReadFull(res.Body, buf); err != nil { t.Fatal(err) } if string(buf) != msg { t.Fatalf("msg = %q; want %q", buf, msg) } doRead <- true if _, err := io.ReadFull(res.Body, buf); err != nil { t.Fatal(err) } if string(buf) != msg2 { t.Fatalf("second msg = %q; want %q", buf, msg2) } }
explode_data.jsonl/71700
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 469 }
[ 2830, 3393, 17536, 16, 15, 15, 23526, 6025, 3050, 93638, 1155, 353, 8840, 836, 8, 341, 4777, 3750, 284, 330, 9707, 698, 4777, 3750, 17, 284, 330, 10134, 1837, 19935, 4418, 1669, 1281, 35190, 1807, 11, 220, 16, 340, 16867, 3265, 66405,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeleteFileShareDBEntry(t *testing.T) { var fileshare = &model.FileShareSpec{ BaseModel: &model.BaseModel{ Id: "bd5b12a8-a101-11e7-941e-d77981b584d8", }, Status: model.FileShareAvailable, ProfileId: "3769855c-a102-11e7-b772-17b880d2f537", PoolId: "3762355c-a102-11e7-b772-17b880d2f537", } var in = &model.FileShareSpec{ BaseModel: &model.BaseModel{ Id: "bd5b12a8-a101-11e7-941e-d77981b584d8", }, Status: model.FileShareInUse, ProfileId: "3769855c-a102-11e7-b772-17b880d2f537", PoolId: "3762355c-a102-11e7-b772-17b880d2f537", } t.Run("FileShare to be deleted should not be in-use", func(t *testing.T) { fileshare.Status = model.FileShareInUse mockClient := new(dbtest.Client) mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil) mockClient.On("ListFileShareAclsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil) mockClient.On("UpdateFileShare", context.NewAdminContext(), in).Return(nil, nil) mockClient.On("DeleteFileShare", context.NewAdminContext(), fileshare.Id).Return(nil) db.C = mockClient err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare) expectedError := fmt.Sprintf("only the fileshare with the status available, error, errorDeleting, can be deleted, the fileshare status is %s", in.Status) assertTestResult(t, err.Error(), expectedError) }) var sampleSnapshots = []*model.FileShareSnapshotSpec{&SampleShareSnapshots[0]} t.Run("FileShare should not be deleted if it has dependent snapshots", func(t *testing.T) { //in.Status = model.FileShareAvailable fileshare.Status = model.FileShareAvailable mockClient := new(dbtest.Client) mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(sampleSnapshots, nil) db.C = mockClient err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare) expectedError := fmt.Sprintf("file share %s can not be deleted, because it still has snapshots", in.Id) assertTestResult(t, err.Error(), expectedError) }) var sampleAcls = []*model.FileShareAclSpec{&SampleFileSharesAcl[2]} t.Run("FileShare should not be deleted if it has dependent acls", func(t *testing.T) { //in.Status = model.FileShareAvailable fileshare.Status = model.FileShareAvailable mockClient := new(dbtest.Client) mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil) mockClient.On("ListFileShareAclsByShareId", context.NewAdminContext(), fileshare.Id).Return(sampleAcls, nil) db.C = mockClient err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare) expectedError := fmt.Sprintf("file share %s can not be deleted, because it still has acls", in.Id) assertTestResult(t, err.Error(), expectedError) }) t.Run("FileShare deletion when it is available", func(t *testing.T) { in.Status = model.FileShareDeleting //fileshare.Status = model.FileShareAvailable mockClient := new(dbtest.Client) mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil) mockClient.On("ListFileShareAclsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil) mockClient.On("UpdateFileShare", context.NewAdminContext(), in).Return(nil, nil) mockClient.On("DeleteFileShare", context.NewAdminContext(), fileshare.Id).Return(nil) db.C = mockClient err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare) if err != nil { t.Errorf("failed to delete fileshare, err is %v\n", err) } }) }
explode_data.jsonl/29981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1283 }
[ 2830, 3393, 6435, 1703, 12115, 3506, 5874, 1155, 353, 8840, 836, 8, 341, 2405, 1034, 19368, 284, 609, 2528, 8576, 12115, 8327, 515, 197, 66732, 1712, 25, 609, 2528, 13018, 1712, 515, 298, 67211, 25, 330, 8940, 20, 65, 16, 17, 64, 23...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateQueryingWithHandler(t *testing.T) { keptnEvent := createKeptnEvent("sockshop", "dev", "carts") dh, url, teardown := createQueryingWithHandler(t, keptnEvent, nil) defer teardown() assert.EqualValues(t, createDynatraceCredentials(t, url), dh.dtClient.Credentials()) assert.EqualValues(t, keptnEvent, dh.eventData) }
explode_data.jsonl/34366
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 4021, 2859, 287, 2354, 3050, 1155, 353, 8840, 836, 8, 341, 197, 97920, 77, 1556, 1669, 1855, 6608, 417, 77, 1556, 445, 13199, 8675, 497, 330, 3583, 497, 330, 66, 7038, 1138, 2698, 71, 11, 2515, 11, 49304, 1669, 1855, 285...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfigCertAndKey(t *testing.T) { certFile, err := ioutil.TempFile(os.TempDir(), "kubeadm-external-etcd-test-certfile") if err != nil { t.Errorf( "failed configCertAndKey:\n\texpected: succeed creating temp CertFile file\n\tactual:%v", err, ) } defer os.Remove(certFile.Name()) if err := ioutil.WriteFile(certFile.Name(), []byte(externalEtcdCertFileContent), 0644); err != nil { t.Errorf( "failed configCertAndKey:\n\texpected: succeed writing contents to temp CertFile file %s\n\tactual:%v", certFile.Name(), err, ) } keyFile, err := ioutil.TempFile(os.TempDir(), "kubeadm-external-etcd-test-keyfile") if err != nil { t.Errorf( "failed configCertAndKey:\n\texpected: succeed creating temp KeyFile file\n\tactual:%v", err, ) } defer os.Remove(keyFile.Name()) if err := ioutil.WriteFile(keyFile.Name(), []byte(externalEtcdKeyFileContent), 0644); err != nil { t.Errorf( "failed configCertAndKey:\n\texpected: succeed writing contents to temp KeyFile file %s\n\tactual:%v", keyFile.Name(), err, ) } c := ExternalEtcdVersionCheck{Etcd: kubeadmapi.Etcd{ CertFile: certFile.Name(), KeyFile: keyFile.Name(), }} config, err := c.configCertAndKey(nil) if err != nil { t.Errorf( "failed configCertAndKey:\n\texpected: has no error\n\tactual:%v", err, ) } if config.Certificates == nil { t.Errorf( "failed configCertAndKey:\n\texpected: Certificates not equal to nil\n\tactual:%v", config.Certificates, ) } }
explode_data.jsonl/20510
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 620 }
[ 2830, 3393, 2648, 36934, 3036, 1592, 1155, 353, 8840, 836, 8, 341, 1444, 529, 1703, 11, 1848, 1669, 43144, 65009, 1703, 9638, 65009, 6184, 1507, 330, 74, 392, 3149, 76, 12, 20921, 96010, 4385, 16839, 61034, 1192, 1138, 743, 1848, 961, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestMakeLogicSigBasic(t *testing.T) { // basic checks and contracts without delegation var program []byte var args [][]byte var sk ed25519.PrivateKey var pk MultisigAccount // check empty LogicSig lsig, err := MakeLogicSig(program, args, sk, pk) require.Error(t, err) require.Equal(t, types.LogicSig{}, lsig) require.True(t, lsig.Blank()) program = []byte{1, 32, 1, 1, 34} programHash := "6Z3C3LDVWGMX23BMSYMANACQOSINPFIRF77H7N3AWJZYV6OH6GWTJKVMXY" contractSender, err := types.DecodeAddress(programHash) require.NoError(t, err) lsig, err = MakeLogicSig(program, args, sk, pk) require.NoError(t, err) require.Equal(t, program, lsig.Logic) require.Equal(t, args, lsig.Args) require.Equal(t, types.Signature{}, lsig.Sig) require.True(t, lsig.Msig.Blank()) verified := VerifyLogicSig(lsig, contractSender) require.True(t, verified) require.Equal(t, LogicSigAddress(lsig), contractSender) // check arguments args = make([][]byte, 2) args[0] = []byte{1, 2, 3} args[1] = []byte{4, 5, 6} lsig, err = MakeLogicSig(program, args, sk, pk) require.NoError(t, err) require.Equal(t, program, lsig.Logic) require.Equal(t, args, lsig.Args) require.Equal(t, types.Signature{}, lsig.Sig) require.True(t, lsig.Msig.Blank()) verified = VerifyLogicSig(lsig, contractSender) require.True(t, verified) // check serialization var lsig1 types.LogicSig encoded := msgpack.Encode(lsig) err = msgpack.Decode(encoded, &lsig1) require.NoError(t, err) require.Equal(t, lsig, lsig1) // check invalid program fails programMod := make([]byte, len(program)) copy(programMod[:], program) programMod[0] = 128 lsig, err = MakeLogicSig(programMod, args, sk, pk) require.Error(t, err) }
explode_data.jsonl/2158
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 709 }
[ 2830, 3393, 8078, 26751, 47246, 15944, 1155, 353, 8840, 836, 8, 341, 197, 322, 6770, 12341, 323, 17080, 2041, 45261, 198, 2405, 2025, 3056, 3782, 198, 2405, 2827, 52931, 3782, 198, 2405, 1901, 1578, 17, 20, 20, 16, 24, 87738, 1592, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestErrBadConnReconnect(t *testing.T) { db := newTestDB(t, "foo") defer closeDB(t, db) exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool") simulateBadConn := func(name string, hook *func() bool, op func() error) { broken, retried := false, false numOpen := db.numOpen // simulate a broken connection on the first try *hook = func() bool { if !broken { broken = true return true } retried = true return false } if err := op(); err != nil { t.Errorf(name+": %v", err) return } if !broken || !retried { t.Error(name + ": Failed to simulate broken connection") } *hook = nil if numOpen != db.numOpen { t.Errorf(name+": leaked %d connection(s)!", db.numOpen-numOpen) numOpen = db.numOpen } } // db.Exec dbExec := func() error { _, err := db.Exec("INSERT|t1|name=?,age=?,dead=?", "Gordon", 3, true) return err } simulateBadConn("db.Exec prepare", &hookPrepareBadConn, dbExec) simulateBadConn("db.Exec exec", &hookExecBadConn, dbExec) // db.Query dbQuery := func() error { rows, err := db.Query("SELECT|t1|age,name|") if err == nil { err = rows.Close() } return err } simulateBadConn("db.Query prepare", &hookPrepareBadConn, dbQuery) simulateBadConn("db.Query query", &hookQueryBadConn, dbQuery) // db.Prepare simulateBadConn("db.Prepare", &hookPrepareBadConn, func() error { stmt, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?") if err != nil { return err } stmt.Close() return nil }) // Provide a way to force a re-prepare of a statement on next execution forcePrepare := func(stmt *Stmt) { stmt.css = nil } // stmt.Exec stmt1, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?") if err != nil { t.Fatalf("prepare: %v", err) } defer stmt1.Close() // make sure we must prepare the stmt first forcePrepare(stmt1) stmtExec := func() error { _, err := stmt1.Exec("Gopher", 3, false) return err } simulateBadConn("stmt.Exec prepare", &hookPrepareBadConn, stmtExec) simulateBadConn("stmt.Exec exec", &hookExecBadConn, stmtExec) // stmt.Query stmt2, err := db.Prepare("SELECT|t1|age,name|") if err != nil { t.Fatalf("prepare: %v", err) } defer stmt2.Close() // make sure we must prepare the stmt first forcePrepare(stmt2) stmtQuery := func() error { rows, err := stmt2.Query() if err == nil { err = rows.Close() } return err } simulateBadConn("stmt.Query prepare", &hookPrepareBadConn, stmtQuery) simulateBadConn("stmt.Query exec", &hookQueryBadConn, stmtQuery) }
explode_data.jsonl/16020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1034 }
[ 2830, 3393, 7747, 17082, 9701, 693, 6459, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 501, 2271, 3506, 1155, 11, 330, 7975, 1138, 16867, 3265, 3506, 1155, 11, 2927, 340, 67328, 1155, 11, 2927, 11, 330, 22599, 91, 83, 16, 91, 606, 28,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCSClientAPIs(t *testing.T) { for _, st := range testStores { st := st t.Run(st.name, func(t *testing.T) { t.Parallel() defer endTest(t, st) s := startTest(t, st) defer s.Close() // Delete client that does not exist storeDeleteClient(t, s, "client1") // Delete a client before adding it storeDeleteClient(t, s, "client2") // Adding it after the delete storeAddClient(t, s, "client2", "hbInbox") // Adding it another time should not return an error storeAddClient(t, s, "client2", "hbInbox") // Add a client storeAddClient(t, s, "client3", "hbInbox") // Add a client then.. storeAddClient(t, s, "client4", "hbInbox") // Delete it. storeDeleteClient(t, s, "client4") if st.recoverable { // Restart the store s.Close() s, state := testReOpenStore(t, st, nil) defer s.Close() if state == nil { t.Fatal("Expected state to be recovered") } if len(state.Clients) != 2 { t.Fatalf("Expected 2 clients to be recovered, got %v", len(state.Clients)) } for _, c := range state.Clients { if c.ID != "client2" && c.ID != "client3" { t.Fatalf("Unexpected recovered client: %v", c.ID) } } } }) } }
explode_data.jsonl/28298
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 544 }
[ 2830, 3393, 6412, 2959, 7082, 82, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 357, 1669, 2088, 1273, 69026, 341, 197, 18388, 1669, 357, 198, 197, 3244, 16708, 5895, 2644, 11, 2915, 1155, 353, 8840, 836, 8, 341, 298, 3244, 41288, 7957, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestAuthServer_LoginAccessToken(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() _, mockClients, cleanup := testutils.CreateTestAPIEnv(t) defer cleanup() ctx := CreateTestContext() mockClients.MockAuth.EXPECT().Login(gomock.Any(), &authpb.LoginRequest{ AccessToken: "test-token", }). Return(&authpb.LoginReply{ Token: "auth-token", ExpiresAt: 10, }, nil) authServer := &controllers.AuthServer{mockClients.MockAuth} resp, err := authServer.Login(ctx, &cloudpb.LoginRequest{ AccessToken: "test-token", }) require.NoError(t, err) assert.Equal(t, &cloudpb.LoginReply{ Token: "auth-token", ExpiresAt: 10, }, resp) }
explode_data.jsonl/37839
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 270 }
[ 2830, 3393, 5087, 5475, 79232, 37649, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 197, 6878, 7860, 47174, 11, 21290, 1669, 1273, 6031, 7251, 2271, 7082, 14359, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEncodedLen(t *testing.T) { for _, tt := range []struct { enc *Encoding n int want int }{ {RawStdEncoding, 0, 0}, {RawStdEncoding, 1, 2}, {RawStdEncoding, 2, 3}, {RawStdEncoding, 3, 4}, {RawStdEncoding, 7, 10}, {StdEncoding, 0, 0}, {StdEncoding, 1, 4}, {StdEncoding, 2, 4}, {StdEncoding, 3, 4}, {StdEncoding, 4, 8}, {StdEncoding, 7, 12}, } { if got := tt.enc.EncodedLen(tt.n); got != tt.want { t.Errorf("EncodedLen(%d): got %d, want %d", tt.n, got, tt.want) } } }
explode_data.jsonl/35056
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 272 }
[ 2830, 3393, 46795, 11271, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 3056, 1235, 341, 197, 197, 954, 220, 353, 14690, 198, 197, 9038, 262, 526, 198, 197, 50780, 526, 198, 197, 59403, 197, 197, 90, 20015, 22748, 14690...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCRDStatusSubresourceAction(t *testing.T) { actions := ` discovery.lua: | actions = {} actions["update-spec"] = {["disabled"] = false} actions["update-status"] = {["disabled"] = false} actions["update-both"] = {["disabled"] = false} return actions definitions: - name: update-both action.lua: | obj.spec = {} obj.spec.foo = "update-both" obj.status = {} obj.status.bar = "update-both" return obj - name: update-spec action.lua: | obj.spec = {} obj.spec.foo = "update-spec" return obj - name: update-status action.lua: | obj.status = {} obj.status.bar = "update-status" return obj ` Given(t). Path("crd-subresource"). And(func() { SetResourceOverrides(map[string]ResourceOverride{ "argoproj.io/StatusSubResource": { Actions: actions, }, "argoproj.io/NonStatusSubResource": { Actions: actions, }, }) }). When().CreateApp().Sync().Then(). Expect(OperationPhaseIs(OperationSucceeded)).Expect(SyncStatusIs(SyncStatusCodeSynced)). When(). Refresh(RefreshTypeNormal). Then(). // tests resource actions on a CRD using status subresource And(func(app *Application) { _, err := RunCli("app", "actions", "run", app.Name, "--kind", "StatusSubResource", "update-both") assert.NoError(t, err) text := FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.spec.foo}")).(string) assert.Equal(t, "update-both", text) text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.status.bar}")).(string) assert.Equal(t, "update-both", text) _, err = RunCli("app", "actions", "run", app.Name, "--kind", "StatusSubResource", "update-spec") assert.NoError(t, err) text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.spec.foo}")).(string) assert.Equal(t, "update-spec", text) _, err = RunCli("app", "actions", "run", app.Name, "--kind", "StatusSubResource", "update-status") assert.NoError(t, err) text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.status.bar}")).(string) assert.Equal(t, "update-status", text) }). // tests resource actions on a CRD *not* using status subresource And(func(app *Application) { _, err := RunCli("app", "actions", "run", app.Name, "--kind", "NonStatusSubResource", "update-both") assert.NoError(t, err) text := FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.spec.foo}")).(string) assert.Equal(t, "update-both", text) text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.status.bar}")).(string) assert.Equal(t, "update-both", text) _, err = RunCli("app", "actions", "run", app.Name, "--kind", "NonStatusSubResource", "update-spec") assert.NoError(t, err) text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.spec.foo}")).(string) assert.Equal(t, "update-spec", text) _, err = RunCli("app", "actions", "run", app.Name, "--kind", "NonStatusSubResource", "update-status") assert.NoError(t, err) text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.status.bar}")).(string) assert.Equal(t, "update-status", text) }) }
explode_data.jsonl/35665
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1485 }
[ 2830, 3393, 8973, 35, 2522, 3136, 9233, 2512, 1155, 353, 8840, 836, 8, 341, 197, 4020, 1669, 22074, 4243, 7449, 37011, 25, 9248, 220, 6168, 284, 5613, 220, 6168, 1183, 2386, 57794, 1341, 284, 314, 1183, 11978, 1341, 284, 895, 532, 220...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTimeoutHandlerRaceHeaderTimeout(t *testing.T) { defer afterTest(t) sendHi := make(chan bool, 1) writeErrors := make(chan error, 1) sayHi := HandlerFunc(func(w ResponseWriter, r *Request) { w.Header().Set("Content-Type", "text/plain") <-sendHi _, werr := w.Write([]byte("hi")) writeErrors <- werr }) timeout := make(chan time.Time, 1) // write to this to force timeouts cst := newClientServerTest(t, h1Mode, NewTestTimeoutHandler(sayHi, timeout)) defer cst.close() // Succeed without timing out: sendHi <- true res, err := cst.c.Get(cst.ts.URL) if err != nil { t.Error(err) } if g, e := res.StatusCode, StatusOK; g != e { t.Errorf("got res.StatusCode %d; expected %d", g, e) } body, _ := ioutil.ReadAll(res.Body) if g, e := string(body), "hi"; g != e { t.Errorf("got body %q; expected %q", g, e) } if g := <-writeErrors; g != nil { t.Errorf("got unexpected Write error on first request: %v", g) } // Times out: timeout <- time.Time{} res, err = cst.c.Get(cst.ts.URL) if err != nil { t.Error(err) } if g, e := res.StatusCode, StatusServiceUnavailable; g != e { t.Errorf("got res.StatusCode %d; expected %d", g, e) } body, _ = ioutil.ReadAll(res.Body) if !strings.Contains(string(body), "<title>Timeout</title>") { t.Errorf("expected timeout body; got %q", string(body)) } // Now make the previously-timed out handler speak again, // which verifies the panic is handled: sendHi <- true if g, e := <-writeErrors, ErrHandlerTimeout; g != e { t.Errorf("expected Write error of %v; got %v", e, g) } }
explode_data.jsonl/22425
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 620 }
[ 2830, 3393, 7636, 3050, 55991, 4047, 7636, 1155, 353, 8840, 836, 8, 341, 16867, 1283, 2271, 1155, 340, 32817, 13048, 1669, 1281, 35190, 1807, 11, 220, 16, 340, 24945, 13877, 1669, 1281, 35190, 1465, 11, 220, 16, 340, 1903, 352, 13048, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAddUserToTeamByTeamId(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() t.Run("add user", func(t *testing.T) { user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) err := th.App.AddUserToTeamByTeamId(th.BasicTeam.Id, ruser) require.Nil(t, err, "Should add user to the team") }) t.Run("block user", func(t *testing.T) { th.BasicTeam.AllowedDomains = "example.com" _, err := th.App.UpdateTeam(th.BasicTeam) require.Nil(t, err, "Should update the team") user := model.User{Email: strings.ToLower(model.NewId()) + "test@invalid.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) defer th.App.PermanentDeleteUser(&user) err = th.App.AddUserToTeamByTeamId(th.BasicTeam.Id, ruser) require.NotNil(t, err, "Should not add restricted user") require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") }) }
explode_data.jsonl/30272
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 436 }
[ 2830, 3393, 2212, 1474, 1249, 14597, 1359, 14597, 764, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 2822, 3244, 16708, 445, 718, 1196, 497, 2915, 1155, 353, 8840, 836, 8, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_storeTaskSpec(t *testing.T) { ctx := context.Background() tr := tb.TaskRun("foo", tb.TaskRunSpec(tb.TaskRunTaskRef("foo-task"))) ts := tb.Task("some-task", tb.TaskSpec(tb.TaskDescription("foo-task"))).Spec ts1 := tb.Task("some-task", tb.TaskSpec(tb.TaskDescription("foo-task"))).Spec want := ts.DeepCopy() // The first time we set it, it should get copied. if err := storeTaskSpec(ctx, tr, &ts); err != nil { t.Errorf("storeTaskSpec() error = %v", err) } if d := cmp.Diff(tr.Status.TaskSpec, want); d != "" { t.Fatalf(diff.PrintWantGot(d)) } // The next time, it should not get overwritten if err := storeTaskSpec(ctx, tr, &ts1); err != nil { t.Errorf("storeTaskSpec() error = %v", err) } if d := cmp.Diff(tr.Status.TaskSpec, want); d != "" { t.Fatalf(diff.PrintWantGot(d)) } }
explode_data.jsonl/60099
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 325 }
[ 2830, 3393, 14809, 6262, 8327, 1155, 353, 8840, 836, 8, 1476, 20985, 1669, 2266, 19047, 741, 25583, 1669, 16363, 28258, 6727, 445, 7975, 497, 16363, 28258, 6727, 8327, 61414, 28258, 6727, 6262, 3945, 445, 7975, 52579, 2761, 692, 57441, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestRESTList(t *testing.T) { tests := []struct { name string appliedToGroups []*types.AppliedToGroup labelSelector labels.Selector expectedObj runtime.Object }{ { name: "label selector selecting nothing", appliedToGroups: []*types.AppliedToGroup{ { Name: "foo", }, }, labelSelector: labels.Nothing(), expectedObj: &controlplane.AppliedToGroupList{}, }, { name: "label selector selecting everything", appliedToGroups: []*types.AppliedToGroup{ { Name: "foo", }, }, labelSelector: labels.Everything(), expectedObj: &controlplane.AppliedToGroupList{ Items: []controlplane.AppliedToGroup{ { ObjectMeta: v1.ObjectMeta{ Name: "foo", }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { storage := store.NewAppliedToGroupStore() for _, obj := range tt.appliedToGroups { storage.Create(obj) } r := NewREST(storage) actualObj, err := r.List(context.TODO(), &internalversion.ListOptions{LabelSelector: tt.labelSelector}) assert.NoError(t, err) assert.ElementsMatch(t, tt.expectedObj.(*controlplane.AppliedToGroupList).Items, actualObj.(*controlplane.AppliedToGroupList).Items) }) } }
explode_data.jsonl/62399
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 552 }
[ 2830, 3393, 38307, 852, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 310, 914, 198, 197, 69898, 3440, 1249, 22173, 29838, 9242, 22829, 3440, 1249, 2808, 198, 197, 29277, 5877, 256, 9201, 14752, 269, 198, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTryContinueInFinally(t *testing.T) { const SCRIPT = ` var c3 = 0, fin3 = 0; while (c3 < 2) { try { throw "ex1"; } catch(er1) { c3 += 1; } finally { fin3 = 1; continue; } fin3 = 0; } fin3; ` testScript1(SCRIPT, intToValue(1), t) }
explode_data.jsonl/75225
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 166 }
[ 2830, 3393, 21453, 23526, 641, 23949, 1155, 353, 8840, 836, 8, 341, 4777, 53679, 284, 22074, 2405, 272, 18, 284, 220, 15, 11, 1875, 18, 284, 220, 15, 280, 5217, 320, 66, 18, 366, 220, 17, 8, 341, 19271, 6799, 341, 15287, 9581, 330...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIntArray_Chunk(t *testing.T) { gtest.C(t, func(t *gtest.T) { a1 := []int{1, 2, 3, 4, 5} array1 := garray.NewIntArrayFrom(a1) chunks := array1.Chunk(2) t.Assert(len(chunks), 3) t.Assert(chunks[0], []int{1, 2}) t.Assert(chunks[1], []int{3, 4}) t.Assert(chunks[2], []int{5}) t.Assert(array1.Chunk(0), nil) }) gtest.C(t, func(t *gtest.T) { a1 := []int{1, 2, 3, 4, 5} array1 := garray.NewIntArrayFrom(a1) chunks := array1.Chunk(3) t.Assert(len(chunks), 2) t.Assert(chunks[0], []int{1, 2, 3}) t.Assert(chunks[1], []int{4, 5}) t.Assert(array1.Chunk(0), nil) }) gtest.C(t, func(t *gtest.T) { a1 := []int{1, 2, 3, 4, 5, 6} array1 := garray.NewIntArrayFrom(a1) chunks := array1.Chunk(2) t.Assert(len(chunks), 3) t.Assert(chunks[0], []int{1, 2}) t.Assert(chunks[1], []int{3, 4}) t.Assert(chunks[2], []int{5, 6}) t.Assert(array1.Chunk(0), nil) }) gtest.C(t, func(t *gtest.T) { a1 := []int{1, 2, 3, 4, 5, 6} array1 := garray.NewIntArrayFrom(a1) chunks := array1.Chunk(3) t.Assert(len(chunks), 2) t.Assert(chunks[0], []int{1, 2, 3}) t.Assert(chunks[1], []int{4, 5, 6}) t.Assert(array1.Chunk(0), nil) }) }
explode_data.jsonl/47606
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 645 }
[ 2830, 3393, 95338, 27588, 3122, 1155, 353, 8840, 836, 8, 341, 3174, 1944, 727, 1155, 11, 2915, 1155, 353, 82038, 836, 8, 341, 197, 11323, 16, 1669, 3056, 396, 90, 16, 11, 220, 17, 11, 220, 18, 11, 220, 19, 11, 220, 20, 532, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRenderPumpStartScript(t *testing.T) { tests := []struct { name string scheme string clusterName string LogLevel string Namespace string clusterDomain string result string }{ { name: "basic", scheme: "http", clusterName: "demo", LogLevel: "INFO", Namespace: "demo-ns", clusterDomain: "", result: `set -euo pipefail /pump \ -pd-urls=http://demo-pd:2379 \ -L=INFO \ -advertise-addr=` + "`" + `echo ${HOSTNAME}` + "`" + `.demo-pump:8250 \ -config=/etc/pump/pump.toml \ -data-dir=/data \ -log-file= if [ $? == 0 ]; then echo $(date -u +"[%Y/%m/%d %H:%M:%S.%3N %:z]") "pump offline, please delete my pod" tail -f /dev/null fi`, }, { name: "basic with cluster domain", scheme: "http", clusterName: "demo", LogLevel: "INFO", Namespace: "demo-ns", clusterDomain: "demo.com", result: ` pd_url="http://demo-pd:2379" encoded_domain_url=$(echo $pd_url | base64 | tr "\n" " " | sed "s/ //g") discovery_url="demo-discovery.demo-ns.svc.demo.com:10261" until result=$(wget -qO- -T 3 http://${discovery_url}/verify/${encoded_domain_url} 2>/dev/null); do echo "waiting for the verification of PD endpoints ..." sleep $((RANDOM % 5)) done pd_url=$result set -euo pipefail /pump \ -pd-urls=$pd_url \ -L=INFO \ -advertise-addr=` + "`" + `echo ${HOSTNAME}` + "`" + `.demo-pump.demo-ns.svc.demo.com:8250 \ -config=/etc/pump/pump.toml \ -data-dir=/data \ -log-file= if [ $? == 0 ]; then echo $(date -u +"[%Y/%m/%d %H:%M:%S.%3N %:z]") "pump offline, please delete my pod" tail -f /dev/null fi`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { model := PumpStartScriptModel{ Scheme: tt.scheme, ClusterName: tt.clusterName, LogLevel: tt.LogLevel, Namespace: tt.Namespace, ClusterDomain: tt.clusterDomain, } script, err := RenderPumpStartScript(&model) if err != nil { t.Fatal(err) } if diff := cmp.Diff(tt.result, script); diff != "" { t.Errorf("unexpected (-want, +got): %s", diff) } }) } }
explode_data.jsonl/62184
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1063 }
[ 2830, 3393, 6750, 47, 1510, 3479, 5910, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 688, 914, 198, 197, 1903, 8058, 286, 914, 198, 197, 197, 18855, 675, 256, 914, 198, 197, 24201, 4449, 414, 914, 198, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStream_PutBytes(t *testing.T) { t.Run("test", func(t *testing.T) { assert := base.NewAssert(t) testRange := getTestRange(streamPosBody, 3*streamBlockSize, 10, 10, 93) for _, i := range testRange { for _, n := range testRange { stream := NewStream() stream.SetWritePos(i) bytes := make([]byte, n) for z := 0; z < n; z++ { bytes[z] = byte(z) } stream.PutBytes(bytes) assert(stream.GetBuffer()[i:]).Equals(bytes) stream.Release() } } }) }
explode_data.jsonl/21202
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 226 }
[ 2830, 3393, 3027, 1088, 332, 7078, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 1944, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 6948, 1669, 2331, 7121, 8534, 1155, 340, 197, 18185, 6046, 1669, 633, 2271, 6046, 20574, 4859, 5444...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestDeleteVolume(t *testing.T) { testCases := []struct { name string testFunc func(t *testing.T) }{ { name: "success normal", testFunc: func(t *testing.T) { req := &csi.DeleteVolumeRequest{ VolumeId: "vol-test", } expResp := &csi.DeleteVolumeResponse{} ctx := context.Background() mockCtl := gomock.NewController(t) defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) mockCloud.EXPECT().DeleteDisk(gomock.Eq(ctx), gomock.Eq(req.VolumeId)).Return(true, nil) awsDriver := controllerService{ cloud: mockCloud, driverOptions: &DriverOptions{}, } resp, err := awsDriver.DeleteVolume(ctx, req) if err != nil { srvErr, ok := status.FromError(err) if !ok { t.Fatalf("Could not get error status code from error: %v", srvErr) } t.Fatalf("Unexpected error: %v", srvErr.Code()) } if !reflect.DeepEqual(resp, expResp) { t.Fatalf("Expected resp to be %+v, got: %+v", expResp, resp) } }, }, { name: "success invalid volume id", testFunc: func(t *testing.T) { req := &csi.DeleteVolumeRequest{ VolumeId: "invalid-volume-name", } expResp := &csi.DeleteVolumeResponse{} ctx := context.Background() mockCtl := gomock.NewController(t) defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) mockCloud.EXPECT().DeleteDisk(gomock.Eq(ctx), gomock.Eq(req.VolumeId)).Return(false, cloud.ErrNotFound) awsDriver := controllerService{ cloud: mockCloud, driverOptions: &DriverOptions{}, } resp, err := awsDriver.DeleteVolume(ctx, req) if err != nil { srvErr, ok := status.FromError(err) if !ok { t.Fatalf("Could not get error status code from error: %v", srvErr) } t.Fatalf("Unexpected error: %v", srvErr.Code()) } if !reflect.DeepEqual(resp, expResp) { t.Fatalf("Expected resp to be %+v, got: %+v", expResp, resp) } }, }, { name: "fail delete disk", testFunc: func(t *testing.T) { req := &csi.DeleteVolumeRequest{ VolumeId: "test-vol", } ctx := context.Background() mockCtl := gomock.NewController(t) defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) mockCloud.EXPECT().DeleteDisk(gomock.Eq(ctx), gomock.Eq(req.VolumeId)).Return(false, fmt.Errorf("DeleteDisk could not delete volume")) awsDriver := controllerService{ cloud: mockCloud, driverOptions: &DriverOptions{}, } resp, err := awsDriver.DeleteVolume(ctx, req) if err != nil { srvErr, ok := status.FromError(err) if !ok { t.Fatalf("Could not get error status code from error: %v", srvErr) } if srvErr.Code() != codes.Internal { t.Fatalf("Unexpected error: %v", srvErr.Code()) } } else { t.Fatalf("Expected error, got nil") } if resp != nil { t.Fatalf("Expected resp to be nil, got: %+v", resp) } }, }, } for _, tc := range testCases { t.Run(tc.name, tc.testFunc) } }
explode_data.jsonl/61518
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1420 }
[ 2830, 3393, 6435, 18902, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 18185, 9626, 2915, 1155, 353, 8840, 836, 340, 197, 59403, 197, 197, 515, 298, 11609, 25, 330, 5630, 4622, 756,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestAPISIXJsonSchemaValidator_Route_checkRemoteAddr(t *testing.T) { tests := []struct { caseDesc string giveContent string wantNewErr error wantValidateErr error }{ { caseDesc: "correct remote_addr", giveContent: `{ "id": "1", "name": "route1", "uri": "/*", "upstream": { "nodes": [{ "host": "127.0.0.1", "port": 8080, "weight": 1 }], "type": "roundrobin" }, "remote_addr": "127.0.0.1" }`, }, { caseDesc: "correct remote_addr (CIDR)", giveContent: `{ "id": "1", "name": "route1", "uri": "/*", "upstream": { "nodes": [{ "host": "127.0.0.1", "port": 8080, "weight": 1 }], "type": "roundrobin" }, "remote_addr": "192.168.1.0/24" }`, }, { caseDesc: "invalid remote_addr", giveContent: `{ "id": "1", "name": "route1", "uri": "/*", "upstream": { "nodes": [{ "host": "127.0.0.1", "port": 8080, "weight": 1 }], "type": "roundrobin" }, "remote_addr": "127.0.0." }`, wantValidateErr: fmt.Errorf("schema validate failed: remote_addr: Must validate at least one schema (anyOf)\nremote_addr: Does not match format 'ipv4'"), }, { caseDesc: "correct remote_addrs", giveContent: `{ "id": "1", "name": "route1", "uri": "/*", "upstream": { "nodes": [{ "host": "127.0.0.1", "port": 8080, "weight": 1 }], "type": "roundrobin" }, "remote_addrs": ["127.0.0.1", "192.0.0.0/8", "::1", "fe80::1/64"] }`, }, { caseDesc: "invalid remote_addrs", giveContent: `{ "id": "1", "name": "route1", "uri": "/*", "upstream": { "nodes": [{ "host": "127.0.0.1", "port": 8080, "weight": 1 }], "type": "roundrobin" }, "remote_addrs": ["127.0.0.", "192.0.0.0/128", "::1"] }`, wantValidateErr: fmt.Errorf("schema validate failed: remote_addrs.0: Must validate at least one schema (anyOf)\nremote_addrs.0: Does not match format 'ipv4'\nremote_addrs.1: Must validate at least one schema (anyOf)\nremote_addrs.1: Does not match format 'ipv4'"), }, { caseDesc: "invalid remote_addrs (an empty string item)", giveContent: `{ "id": "1", "name": "route1", "uri": "/*", "upstream": { "nodes": [{ "host": "127.0.0.1", "port": 8080, "weight": 1 }], "type": "roundrobin" }, "remote_addrs": [""] }`, wantValidateErr: fmt.Errorf("schema validate failed: remote_addrs.0: Must validate at least one schema (anyOf)\nremote_addrs.0: Does not match format 'ipv4'"), }, } // todo: add a test case for "remote_addr": "" for _, tc := range tests { validator, err := NewAPISIXJsonSchemaValidator("main.route") if err != nil { assert.Equal(t, tc.wantNewErr, err, tc.caseDesc) continue } route := &entity.Route{} err = json.Unmarshal([]byte(tc.giveContent), route) assert.Nil(t, err, tc.caseDesc) err = validator.Validate(route) if tc.wantValidateErr == nil { assert.Equal(t, nil, err, tc.caseDesc) continue } assert.Equal(t, tc.wantValidateErr, err, tc.caseDesc) } }
explode_data.jsonl/50252
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1646 }
[ 2830, 3393, 2537, 1637, 5396, 5014, 8632, 14256, 2568, 2133, 7200, 24703, 13986, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 2722, 11065, 286, 914, 198, 197, 3174, 533, 2762, 257, 914, 198, 197, 50780, 3564, 7747, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestTensorSerializationErrors(t *testing.T) { // String tensors cannot be serialized t1, err := NewTensor("abcd") if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if n, err := t1.WriteContentsTo(buf); n != 0 || err == nil || buf.Len() != 0 { t.Errorf("Got (%v, %v, %v) want (0, <non-nil>, 0)", n, err, buf.Len()) } // Should fail to read a truncated value. if t1, err = NewTensor(int8(8)); err != nil { t.Fatal(err) } n, err := t1.WriteContentsTo(buf) if err != nil { t.Fatal(err) } r := bytes.NewReader(buf.Bytes()[:n-1]) if _, err = ReadTensor(t1.DataType(), t1.Shape(), r); err == nil { t.Error("ReadTensor should have failed if the tensor content was truncated") } }
explode_data.jsonl/45868
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 296 }
[ 2830, 3393, 25336, 35865, 13877, 1155, 353, 8840, 836, 8, 341, 197, 322, 923, 77087, 4157, 387, 32916, 198, 3244, 16, 11, 1848, 1669, 1532, 25336, 445, 68644, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 263...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestGetCertificate_failedAttempt(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) })) defer ts.Close() d := createCertRetryAfter f := testDidRemoveState defer func() { createCertRetryAfter = d testDidRemoveState = f }() createCertRetryAfter = 0 done := make(chan struct{}) testDidRemoveState = func(ck certKey) { if ck != exampleCertKey { t.Errorf("testDidRemoveState: domain = %v; want %v", ck, exampleCertKey) } close(done) } man := &Manager{ Prompt: AcceptTOS, Client: &acme.Client{ DirectoryURL: ts.URL, }, } defer man.stopRenew() hello := clientHelloInfo(exampleDomain, true) if _, err := man.GetCertificate(hello); err == nil { t.Error("GetCertificate: err is nil") } select { case <-time.After(5 * time.Second): t.Errorf("took too long to remove the %q state", exampleCertKey) case <-done: man.stateMu.Lock() defer man.stateMu.Unlock() if v, exist := man.state[exampleCertKey]; exist { t.Errorf("state exists for %v: %+v", exampleCertKey, v) } } }
explode_data.jsonl/65049
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 437 }
[ 2830, 3393, 1949, 33202, 35060, 47052, 1155, 353, 8840, 836, 8, 341, 57441, 1669, 54320, 70334, 7121, 5475, 19886, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 6692, 69794, 19886, 69497, 340, 197, 44194, 16867, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestJobRunsController_Update_Success(t *testing.T) { t.Parallel() ethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) defer assertMocksCalled() app, cleanup := cltest.NewApplication(t, ethClient, ) defer cleanup() app.Start() tests := []struct { name string archived bool }{ {"normal_job", false}, {"archived_job", true}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { bta, bt := cltest.NewBridgeType(t, test.name) require.NoError(t, app.Store.CreateBridgeType(bt)) j := cltest.NewJobWithWebInitiator() j.Tasks = []models.TaskSpec{{Type: bt.Name}} require.NoError(t, app.Store.CreateJob(&j)) jr := cltest.NewJobRunPendingBridge(j) require.NoError(t, app.Store.CreateJobRun(&jr)) if test.archived { require.NoError(t, app.Store.ArchiveJob(j.ID)) } // resume run body := fmt.Sprintf(`{"id":"%v","data":{"result": "100"}}`, jr.ID.String()) headers := map[string]string{"Authorization": "Bearer " + bta.IncomingToken} url := app.Config.ClientNodeURL() + "/v2/runs/" + jr.ID.String() resp, cleanup := cltest.UnauthenticatedPatch(t, url, bytes.NewBufferString(body), headers) defer cleanup() require.Equal(t, http.StatusOK, resp.StatusCode, "Response should be successful") var respJobRun presenters.JobRun assert.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &respJobRun)) require.Equal(t, jr.ID, respJobRun.ID) jr = cltest.WaitForJobRunToComplete(t, app.Store, jr) value := cltest.MustResultString(t, jr.Result) assert.Equal(t, "100", value) }) } }
explode_data.jsonl/49851
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 660 }
[ 2830, 3393, 12245, 73920, 2051, 47393, 87161, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 769, 2959, 11, 8358, 2060, 72577, 20960, 1669, 1185, 1944, 7121, 65390, 11571, 16056, 39076, 90206, 1155, 340, 16867, 2060, 72577, 20...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestFieldContains(t *testing.T) { validate := New() type StringTest struct { Foo string `validate:"fieldcontains=Bar"` Bar string } stringTest := &StringTest{ Foo: "foobar", Bar: "bar", } errs := validate.Struct(stringTest) Equal(t, errs, nil) stringTest = &StringTest{ Foo: "foo", Bar: "bar", } errs = validate.Struct(stringTest) NotEqual(t, errs, nil) AssertError(t, errs, "StringTest.Foo", "StringTest.Foo", "Foo", "Foo", "fieldcontains") errs = validate.VarWithValue("foo", "bar", "fieldcontains") NotEqual(t, errs, nil) AssertError(t, errs, "", "", "", "", "fieldcontains") errs = validate.VarWithValue("bar", "foobarfoo", "fieldcontains") NotEqual(t, errs, nil) AssertError(t, errs, "", "", "", "", "fieldcontains") errs = validate.VarWithValue("foobarfoo", "bar", "fieldcontains") Equal(t, errs, nil) type StringTestMissingField struct { Foo string `validate:"fieldcontains=Bar"` } stringTestMissingField := &StringTestMissingField{ Foo: "foo", } errs = validate.Struct(stringTestMissingField) NotEqual(t, errs, nil) AssertError(t, errs, "StringTestMissingField.Foo", "StringTestMissingField.Foo", "Foo", "Foo", "fieldcontains") }
explode_data.jsonl/77299
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 476 }
[ 2830, 3393, 1877, 23805, 1155, 353, 8840, 836, 8, 341, 197, 7067, 1669, 1532, 2822, 13158, 923, 2271, 2036, 341, 197, 12727, 2624, 914, 1565, 7067, 2974, 2566, 13372, 28, 3428, 8805, 197, 197, 3428, 914, 198, 197, 630, 11357, 2271, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseResourceReturnType(t *testing.T) { t.Parallel() result, errs := ParseProgram(` fun test(): @X {} `) require.Empty(t, errs) utils.AssertEqualWithDiff(t, []ast.Declaration{ &ast.FunctionDeclaration{ Identifier: ast.Identifier{ Identifier: "test", Pos: ast.Position{Offset: 13, Line: 2, Column: 12}, }, ParameterList: &ast.ParameterList{ Range: ast.Range{ StartPos: ast.Position{Offset: 17, Line: 2, Column: 16}, EndPos: ast.Position{Offset: 18, Line: 2, Column: 17}, }, }, ReturnTypeAnnotation: &ast.TypeAnnotation{ IsResource: true, Type: &ast.NominalType{ Identifier: ast.Identifier{ Identifier: "X", Pos: ast.Position{Offset: 22, Line: 2, Column: 21}, }, }, StartPos: ast.Position{Offset: 21, Line: 2, Column: 20}, }, FunctionBlock: &ast.FunctionBlock{ Block: &ast.Block{ Range: ast.Range{ StartPos: ast.Position{Offset: 24, Line: 2, Column: 23}, EndPos: ast.Position{Offset: 25, Line: 2, Column: 24}, }, }, }, StartPos: ast.Position{Offset: 9, Line: 2, Column: 8}, }, }, result.Declarations(), ) }
explode_data.jsonl/35991
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 566 }
[ 2830, 3393, 14463, 4783, 84362, 1155, 353, 8840, 836, 8, 1476, 3244, 41288, 7957, 2822, 9559, 11, 70817, 1669, 14775, 10690, 61528, 286, 2464, 1273, 4555, 569, 55, 5613, 197, 24183, 17957, 11180, 1155, 11, 70817, 692, 80206, 11711, 2993, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMaterializerNotASelect(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ Workflow: "workflow", SourceKeyspace: "sourceks", TargetKeyspace: "targetks", TableSettings: []*vtctldatapb.TableMaterializeSettings{{ TargetTable: "t1", SourceExpression: "update t1 set val=1", CreateDdl: "t1ddl", }}, } env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) err := env.wr.Materialize(context.Background(), ms) require.EqualError(t, err, "unrecognized statement: update t1 set val=1") }
explode_data.jsonl/61880
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 258 }
[ 2830, 3393, 13415, 3135, 2623, 32, 3379, 1155, 353, 8840, 836, 8, 341, 47691, 1669, 609, 9708, 302, 507, 266, 391, 65, 44253, 551, 6086, 515, 197, 197, 62768, 25, 981, 330, 56249, 756, 197, 197, 3608, 8850, 1306, 25, 330, 2427, 2787...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_filterArray(t *testing.T) { as := assert.New(t) t.Run("filterArray - success", func(t *testing.T) { t.Run("int", func(t *testing.T) { resp, err := lambda. New([]int{0, 1, 2, 3}). FilterList(func(idx int, obj interface{}) bool { return obj.(int)%2 == 0 }). ToIntSlice() as.Nil(err) as.Equal([]int{0, 2}, resp) }) }) t.Run("filterArray - fail", func(t *testing.T) { _, err := lambda. New(123). FilterList(func(idx int, obj interface{}) bool { return true }). ToIntSlice() as.NotNil(err) as.Equal("123(int) can't convert to []interface", err.Error()) }) t.Run("filterArray - pre-fail", func(t *testing.T) { _, err := lambda. New(123). MapList(func(idx int, obj interface{}) interface{} { return obj }). FilterList(func(idx int, obj interface{}) bool { return true }). ToIntSlice() as.NotNil(err) as.Equal("123(int) can't convert to []interface", err.Error()) }) t.Run("filterArray - not-change-self", func(t *testing.T) { req := lambda.New([]int{0, 1, 2}) req.FilterList(func(idx int, obj interface{}) bool { return obj.(int)%2 == 0 }) res, err := req.ToIntSlice() as.Nil(err) as.Equal([]int{0, 1, 2}, res) }) }
explode_data.jsonl/2715
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 562 }
[ 2830, 3393, 8727, 1857, 1155, 353, 8840, 836, 8, 341, 60451, 1669, 2060, 7121, 1155, 692, 3244, 16708, 445, 5315, 1857, 481, 2393, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 3244, 16708, 445, 396, 497, 2915, 1155, 353, 8840, 836, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestToContainerStatsZeroCoresGeneratesError(t *testing.T) { libcontainerStats := libcontainer.ContainerStats{ CgroupStats: &cgroups.Stats{ CpuStats: cgroups.CpuStats{ CpuUsage: cgroups.CpuUsage{ TotalUsage: 100, }, }, }, } _, err := toContainerStats(libcontainerStats) if err == nil { t.Error("Expected error converting container stats with empty PercpuUsage") } }
explode_data.jsonl/400
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 149 }
[ 2830, 3393, 1249, 4502, 16635, 17999, 34, 4589, 5531, 973, 1454, 1155, 353, 8840, 836, 8, 341, 93459, 3586, 16635, 1669, 3051, 3586, 33672, 16635, 515, 197, 6258, 4074, 16635, 25, 609, 66, 16753, 7758, 1862, 515, 298, 6258, 5584, 16635,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSetUserStatusUnauthenticated(t *testing.T) { r := getRouter(false) r.GET("/", SetUserStatus(), func(c *gin.Context) { // as the token cookie was not set, the "is_logged_in" should have been set // to false by the SetUserStatus middleware loggedInInterface, exists := c.Get("is_logged_in") if exists && loggedInInterface.(bool) { t.Fail() } }) // Create a response recorder w := httptest.NewRecorder() // Create a request to send to the above route (without any cookies) req, _ := http.NewRequest("GET", "/", nil) // Create the service and process the above request. r.ServeHTTP(w, req) }
explode_data.jsonl/57776
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 213 }
[ 2830, 3393, 1649, 1474, 2522, 1806, 57707, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 633, 9523, 3576, 340, 7000, 17410, 35460, 2573, 1474, 2522, 1507, 2915, 1337, 353, 8163, 9328, 8, 341, 197, 197, 322, 438, 279, 3950, 12544, 572, 537...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestAccSnapshot_fromExistingSnapshot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "second") r := SnapshotResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fromExistingSnapshot(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, }) }
explode_data.jsonl/78020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 140 }
[ 2830, 3393, 14603, 15009, 5673, 53067, 15009, 1155, 353, 8840, 836, 8, 341, 8924, 1669, 25505, 25212, 83920, 1155, 11, 330, 1370, 324, 4195, 53265, 497, 330, 5569, 1138, 7000, 1669, 68697, 4783, 31483, 8924, 20766, 2271, 1155, 11, 435, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConsumer_Transaction(t *testing.T) { for _, tc := range []struct { name string td consumerdata.TraceData }{ {name: "jaeger_full", td: consumerdata.TraceData{SourceFormat: "jaeger", Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, Spans: []*tracepb.Span{{ TraceId: []byte("FFx0"), SpanId: []byte("AAFF"), StartTime: testStartTime(), EndTime: testEndTime(), Name: testTruncatableString("HTTP GET"), ChildSpanCount: testIntToWrappersUint32(10), SameProcessAsParentSpan: testBoolToWrappersBool(true), Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ "error": testAttributeBoolValue(true), "bool.a": testAttributeBoolValue(true), "double.a": testAttributeDoubleValue(14.65), "int.a": testAttributeIntValue(148), "span.kind": testAttributeStringValue("http request"), "http.method": testAttributeStringValue("get"), "http.url": testAttributeStringValue("http://foo.bar.com?a=12"), "http.status_code": testAttributeStringValue("400"), "http.protocol": testAttributeStringValue("HTTP/1.1"), "type": testAttributeStringValue("http_request"), "component": testAttributeStringValue("foo"), "string.a.b": testAttributeStringValue("some note"), }}, TimeEvents: testTimeEvents(), }}}}, {name: "jaeger_type_request", td: consumerdata.TraceData{SourceFormat: "jaeger", Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, Spans: []*tracepb.Span{{ ParentSpanId: []byte("abcd"), Kind: tracepb.Span_SERVER, StartTime: testStartTime(), Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ "http.status_code": testAttributeIntValue(200), "http.protocol": testAttributeStringValue("HTTP"), "http.path": testAttributeStringValue("http://foo.bar.com?a=12"), }}}}}}, {name: "jaeger_type_request_result", td: consumerdata.TraceData{SourceFormat: "jaeger", Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, Spans: []*tracepb.Span{{ ParentSpanId: []byte("abcd"), Kind: tracepb.Span_SERVER, StartTime: testStartTime(), Status: &tracepb.Status{Code: 200}, Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ "http.url": testAttributeStringValue("localhost:8080"), }}}}}}, {name: "jaeger_type_component", td: consumerdata.TraceData{SourceFormat: "jaeger", Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, Spans: []*tracepb.Span{{ Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ "component": testAttributeStringValue("amqp"), }}}}}}, {name: "jaeger_custom", td: consumerdata.TraceData{SourceFormat: "jaeger", Spans: []*tracepb.Span{{Attributes: &tracepb.Span_Attributes{ AttributeMap: map[string]*tracepb.AttributeValue{ "a.b": testAttributeStringValue("foo")}}}}, Node: &commonpb.Node{ Identifier: &commonpb.ProcessIdentifier{}, LibraryInfo: &commonpb.LibraryInfo{}, ServiceInfo: &commonpb.ServiceInfo{}, }}}, {name: "jaeger_no_attrs", td: consumerdata.TraceData{SourceFormat: "jaeger", Node: &commonpb.Node{Identifier: &commonpb.ProcessIdentifier{HostName: "host-abc"}}, Spans: []*tracepb.Span{{ Kind: tracepb.Span_SERVER, StartTime: testStartTime(), EndTime: testEndTime(), Attributes: &tracepb.Span_Attributes{AttributeMap: map[string]*tracepb.AttributeValue{ "error": testAttributeBoolValue(true), }}, Status: &tracepb.Status{Code: 500}}}}}, } { t.Run(tc.name, func(t *testing.T) { reporter := func(ctx context.Context, req publish.PendingReq) error { require.True(t, len(req.Transformables) >= 1) for i, transformable := range req.Transformables { switch data := transformable.(type) { case *transaction.Event: tr, err := json.Marshal(data) require.NoError(t, err) approvals.AssertApproveResult(t, file(fmt.Sprintf("transaction_%s_%d", tc.name, i)), tr) case *model_error.Event: e, err := json.Marshal(data) require.NoError(t, err) approvals.AssertApproveResult(t, file(fmt.Sprintf("transaction_error_%s_%d", tc.name, i)), e) } } return nil } require.NoError(t, (&Consumer{Reporter: reporter}).ConsumeTraceData(context.Background(), tc.td)) }) } }
explode_data.jsonl/33583
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2118 }
[ 2830, 3393, 29968, 34932, 1311, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17130, 1669, 2088, 3056, 1235, 341, 197, 11609, 914, 198, 197, 76373, 256, 11502, 691, 46920, 1043, 198, 197, 59403, 197, 197, 47006, 25, 330, 5580, 1878, 16372, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestPrimitivePutString(t *testing.T) { client := newPrimitiveClient() var c *string a, b, c := "goodrequest", "", nil result, err := client.PutString(context.Background(), StringWrapper{Field: &a, Empty: &b, Null: c}, nil) if err != nil { t.Fatalf("PutString: %v", err) } if s := result.RawResponse.StatusCode; s != http.StatusOK { t.Fatalf("unexpected status code %d", s) } }
explode_data.jsonl/61681
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 147 }
[ 2830, 3393, 33313, 19103, 703, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 501, 33313, 2959, 741, 2405, 272, 353, 917, 198, 11323, 11, 293, 11, 272, 1669, 330, 18536, 2035, 497, 7342, 2092, 198, 9559, 11, 1848, 1669, 2943, 39825, 703, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFIFO_addUpdate(t *testing.T) { f := New(testFifoObjectKeyFunc) f.Add(mkFifoObj("foo", 10)) // nolint: errcheck f.Update(mkFifoObj("foo", 15)) // nolint: errcheck if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) { t.Errorf("Expected %+v, got %+v", e, a) } if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) { t.Errorf("Expected %+v, got %+v", e, a) } got := make(chan testFifoObject, 2) go func() { for { got <- Pop(f).(testFifoObject) } }() first := <-got if e, a := 15, first.val; e != a { t.Errorf("Didn't get updated value (%v), got %v", e, a) } select { case unexpected := <-got: t.Errorf("Got second value %v", unexpected.val) case <-time.After(50 * time.Millisecond): } _, exists, _ := f.Get(mkFifoObj("foo", "")) if exists { t.Errorf("item did not get removed") } }
explode_data.jsonl/69921
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 389 }
[ 2830, 3393, 37, 25997, 2891, 4289, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 1532, 8623, 37, 31497, 1190, 1592, 9626, 340, 1166, 1904, 1255, 74, 37, 31497, 5261, 445, 7975, 497, 220, 16, 15, 593, 262, 442, 308, 337, 396, 25, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_DrawingObjects_DeleteDrawingObjectWithoutNodePath(t *testing.T) { config := ReadConfiguration(t) client, ctx := PrepareTest(t, config) remoteDataFolder := remoteBaseTestDataFolder + "/DocumentElements/DrawingObjectss" localFile := "Common/test_multi_pages.docx" remoteFileName := "TestDeleteDrawingObjectWithoutNodePath.docx" UploadNextFileToStorage(t, ctx, client, GetLocalFile(localFile), remoteDataFolder + "/" + remoteFileName) options := map[string]interface{}{ "folder": remoteDataFolder, } _, err := client.WordsApi.DeleteDrawingObject(ctx, remoteFileName, int32(0), options) if err != nil { t.Error(err) } }
explode_data.jsonl/60058
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 246 }
[ 2830, 3393, 1557, 1696, 11543, 57418, 37437, 1190, 26040, 1955, 1820, 1155, 353, 8840, 836, 8, 341, 262, 2193, 1669, 4457, 7688, 1155, 340, 262, 2943, 11, 5635, 1669, 31166, 2271, 1155, 11, 2193, 340, 262, 8699, 1043, 13682, 1669, 8699,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestInstallRelease_WrongKubeVersion(t *testing.T) { c := helm.NewContext() rs := rsFixture() // TODO: Refactor this into a mock. req := &services.InstallReleaseRequest{ Chart: &chart.Chart{ Metadata: &chart.Metadata{Name: "hello", KubeVersion: ">=5.0.0"}, Templates: []*chart.Template{ {Name: "templates/hello", Data: []byte("hello: world")}, {Name: "templates/hooks", Data: []byte(manifestWithHook)}, }, }, } _, err := rs.InstallRelease(c, req) if err == nil { t.Fatalf("Expected to fail because of wrong version") } expect := "Chart requires kubernetesVersion" if !strings.Contains(err.Error(), expect) { t.Errorf("Expected %q to contain %q", err.Error(), expect) } }
explode_data.jsonl/45615
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 276 }
[ 2830, 3393, 24690, 16077, 2763, 14347, 42, 3760, 5637, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 33765, 7121, 1972, 741, 41231, 1669, 10036, 18930, 2822, 197, 322, 5343, 25, 8550, 5621, 419, 1119, 264, 7860, 624, 24395, 1669, 609, 12779...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSSH_UnknownDroplet(t *testing.T) { withTestClient(t, func(config *CmdConfig, tm *tcMocks) { tm.droplets.EXPECT().List().Return(testDropletList, nil) config.Args = append(config.Args, "missing") err := RunSSH(config) assert.EqualError(t, err, "Could not find Droplet") }) }
explode_data.jsonl/6477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 113 }
[ 2830, 3393, 62419, 62, 13790, 35, 299, 10819, 1155, 353, 8840, 836, 8, 341, 46948, 2271, 2959, 1155, 11, 2915, 8754, 353, 15613, 2648, 11, 17333, 353, 10413, 72577, 8, 341, 197, 3244, 76, 950, 299, 89492, 22402, 7285, 1005, 852, 1005,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWatchListFromZeroIndex(t *testing.T) { codec := latest.Codec pod := &api.Pod{JSONBase: api.JSONBase{ID: "foo"}} fakeClient := NewFakeEtcdClient(t) fakeClient.Data["/some/key"] = EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Dir: true, Nodes: etcd.Nodes{ &etcd.Node{ Value: runtime.EncodeOrDie(codec, pod), CreatedIndex: 1, ModifiedIndex: 1, Nodes: etcd.Nodes{}, }, &etcd.Node{ Value: runtime.EncodeOrDie(codec, pod), CreatedIndex: 2, ModifiedIndex: 2, Nodes: etcd.Nodes{}, }, }, }, Action: "get", EtcdIndex: 3, }, } h := EtcdHelper{fakeClient, codec, versioner} watching, err := h.WatchList("/some/key", 0, Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } // the existing node is detected and the index set event, open := <-watching.ResultChan() if !open { t.Fatalf("unexpected channel close") } for i := 0; i < 2; i++ { if e, a := watch.Added, event.Type; e != a { t.Errorf("Expected %v, got %v", e, a) } actualPod, ok := event.Object.(*api.Pod) if !ok { t.Fatalf("expected a pod, got %#v", event.Object) } if actualPod.ResourceVersion != 1 { t.Errorf("Expected pod with resource version %d, Got %#v", 1, actualPod) } pod.ResourceVersion = 1 if e, a := pod, event.Object; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } } fakeClient.WaitForWatchCompletion() watching.Stop() }
explode_data.jsonl/40981
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 711 }
[ 2830, 3393, 14247, 852, 3830, 17999, 1552, 1155, 353, 8840, 836, 8, 341, 43343, 66, 1669, 5535, 20274, 66, 198, 3223, 347, 1669, 609, 2068, 88823, 90, 5370, 3978, 25, 6330, 18009, 3978, 90, 915, 25, 330, 7975, 9207, 630, 1166, 726, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestRemoteDivergedStatus(t *testing.T) { output := `On branch chore/test Your branch and 'origin/chore/test' have diverged, and have 1 and 1 different commits each, respectively. (use "git pull" to merge the remote branch into yours) nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, DIVERGED) { t.Errorf("Should be diverged status") } }
explode_data.jsonl/14065
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 24703, 35, 1524, 3556, 2522, 1155, 353, 8840, 836, 8, 341, 21170, 1669, 1565, 1925, 8870, 49571, 12697, 198, 7771, 8870, 323, 364, 8611, 21284, 460, 12697, 6, 614, 36341, 3556, 345, 437, 614, 220, 16, 323, 220, 16, 2155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetCapacity(t *testing.T) { d, _ := NewFakeDriver(t) req := csi.GetCapacityRequest{} resp, err := d.GetCapacity(context.Background(), &req) assert.Nil(t, resp) if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) { t.Errorf("Unexpected error: %v", err) } }
explode_data.jsonl/59394
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 116 }
[ 2830, 3393, 1949, 29392, 1155, 353, 8840, 836, 8, 341, 2698, 11, 716, 1669, 1532, 52317, 11349, 1155, 340, 24395, 1669, 272, 6321, 2234, 29392, 1900, 16094, 34653, 11, 1848, 1669, 294, 2234, 29392, 5378, 19047, 1507, 609, 2958, 340, 694...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMetricVec(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) testMetricVec(t, vec) }
explode_data.jsonl/14622
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 54310, 10050, 1155, 353, 8840, 836, 8, 341, 40213, 1669, 1532, 38, 19392, 10050, 1006, 197, 9600, 19392, 43451, 515, 298, 21297, 25, 330, 1944, 756, 298, 197, 12689, 25, 330, 8653, 1717, 756, 197, 197, 1583, 197, 197, 1294...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestConverterFlushesBatches(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Reset batch size setting upon test completion. defer row.TestingSetDatumRowConverterBatchSize(0)() // Helper to generate test name. testName := func(format roachpb.IOFileFormat, batchSize int) string { switch batchSize { case 0: return fmt.Sprintf("%s-default-batch-size", format.Format) case 1: return fmt.Sprintf("%s-always-flush", format.Format) default: return fmt.Sprintf("%s-flush-%d-records", format.Format, batchSize) } } ctx := context.Background() evalCtx := tree.MakeTestingEvalContext(nil) tests := []testSpec{ newTestSpec(t, csvFormat(), "testdata/csv/data-0"), newTestSpec(t, mysqlDumpFormat(), "testdata/mysqldump/simple.sql"), newTestSpec(t, pgDumpFormat(), "testdata/pgdump/simple.sql"), newTestSpec(t, avroFormat(t, roachpb.AvroOptions_OCF), "testdata/avro/simple.ocf"), } const endBatchSize = -1 for _, testCase := range tests { expectedNumRecords := 0 expectedNumBatches := 0 converterSpec := testCase.getConverterSpec() // Run multiple tests, increasing batch size until it exceeds the // total number of records. When batch size is 0, we run converters // with the default batch size, and use that run to figure out the // expected number of records and batches for the subsequent run. for batchSize := 0; batchSize != endBatchSize; { t.Run(testName(testCase.format, batchSize), func(t *testing.T) { if batchSize > 0 { row.TestingSetDatumRowConverterBatchSize(batchSize) } kvCh := make(chan row.KVBatch, batchSize) conv, err := makeInputConverter(ctx, converterSpec, &evalCtx, kvCh) if err != nil { t.Fatalf("makeInputConverter() error = %v", err) } group := ctxgroup.WithContext(ctx) group.Go(func() error { defer close(kvCh) return conv.readFiles(ctx, testCase.inputs, nil, converterSpec.Format, externalStorageFactory, security.RootUser) }) lastBatch := 0 testNumRecords := 0 testNumBatches := 0 // Read from the channel; we expect batches of testCase.batchSize // size, with the exception of the last batch. for batch := range kvCh { if batchSize > 0 { assert.True(t, lastBatch == 0 || lastBatch == batchSize) } lastBatch = len(batch.KVs) testNumRecords += lastBatch testNumBatches++ } if err := group.Wait(); err != nil { t.Fatalf("Conversion failed: %v", err) } if batchSize == 0 { expectedNumRecords = testNumRecords // Next batch: flush every record. batchSize = 1 expectedNumBatches = expectedNumRecords } else if batchSize > expectedNumRecords { // Done with this test case. batchSize = endBatchSize return } else { // Number of records and batches ought to be correct. assert.Equal(t, expectedNumRecords, testNumRecords) assert.Equal(t, expectedNumBatches, testNumBatches) // Progressively increase the batch size. batchSize += (batchSize << 2) expectedNumBatches = int(math.Ceil(float64(expectedNumRecords) / float64(batchSize))) } }) } } }
explode_data.jsonl/23766
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1234 }
[ 2830, 3393, 14920, 46874, 288, 33, 9118, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 741, 16867, 1487, 77940, 1155, 568, 7925, 1155, 340, 197, 322, 16932, 7162, 1379, 6243, 5193, 1273, 9755, 624, 16867, 2802,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCreatingSubscriptionCloseCacheFirst(t *testing.T) { cache := newSyncMapCache(t) // create a subscription s, err := cache.NewSubscription("ITB-1101") if err != nil { t.Fatalf("failed to create subscription: %s", err) } closeCache(t, cache) s.Stop() }
explode_data.jsonl/35586
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 96 }
[ 2830, 3393, 24973, 33402, 7925, 8233, 5338, 1155, 353, 8840, 836, 8, 341, 52680, 1669, 501, 12154, 2227, 8233, 1155, 692, 197, 322, 1855, 264, 15142, 198, 1903, 11, 1848, 1669, 6500, 7121, 33402, 445, 952, 33, 12, 16, 16, 15, 16, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSigPool(t *testing.T) { blockHashs := make(map[int32]*common.Hash) sigPool := NewSigPool() sigArray := make([]*asiutil.BlockSign, 0) for i := 1; i < 101; i++ { descs := sigPool.MiningDescs(int32(i)) size := common.BlockSignDepth if i <= 10 { size = i - 1 } if len(descs) != size { t.Error("MiningDescs size error") } msg := protos.MsgBlockSign{ BlockHeight: int32(i), BlockHash: common.HexToHash(strconv.Itoa(i)), Signer: common.HexToAddress(strconv.Itoa(i)), } blockSign := asiutil.NewBlockSign(&msg) blockHashs[msg.BlockHeight] = &msg.BlockHash sigArray = append(sigArray, blockSign) err := sigPool.ProcessSig(blockSign) if err != nil { t.Error("ProcessSig error: ", err) } fetchSign, err := sigPool.FetchSignature(sigArray[i-1].Hash()) if err != nil { t.Error("FetchSignature error: ", err) return } if fetchSign.Hash() != blockSign.Hash() { t.Error("FetchSignature height error: ") } } }
explode_data.jsonl/53801
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 430 }
[ 2830, 3393, 47246, 10551, 1155, 353, 8840, 836, 8, 1476, 47996, 6370, 82, 1669, 1281, 9147, 18640, 18, 17, 8465, 5464, 15103, 340, 84841, 10551, 1669, 1532, 47246, 10551, 741, 84841, 1857, 1669, 1281, 85288, 10215, 1314, 28477, 7264, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestGetUndeliveredEvents(t *testing.T) { common.SetUpMockConfig() defer func() { err := common.TruncateDB(common.OnDisk) if err != nil { t.Fatalf("error: %v", err) } }() eventByte := []byte(`event`) if cerr := SaveUndeliveredEvents("destination", eventByte); cerr != nil { t.Errorf("Error while making save undelivered events : %v\n", cerr.Error()) } eventData, err := GetUndeliveredEvents("destination") assert.Nil(t, err, "error should be nil") assert.Equal(t, string(eventData), eventData, "there should be event data") }
explode_data.jsonl/49420
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 205 }
[ 2830, 3393, 1949, 19957, 301, 43056, 7900, 1155, 353, 8840, 836, 8, 341, 83825, 4202, 2324, 11571, 2648, 741, 16867, 2915, 368, 341, 197, 9859, 1669, 4185, 8240, 26900, 3506, 57802, 8071, 47583, 340, 197, 743, 1848, 961, 2092, 341, 298,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestContainer_getContainer(t *testing.T) { x := Container{ Image: "my-image", VolumeMounts: []corev1.VolumeMount{{Name: "my-vm"}}, Command: []string{"my-cmd"}, Args: []string{"my-args"}, Env: []corev1.EnvVar{{Name: "my-envvar"}}, Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ "cpu": resource.MustParse("2"), }, }, } c := x.getContainer(getContainerReq{}) assert.Equal(t, x.Image, c.Image) assert.Contains(t, c.VolumeMounts, c.VolumeMounts[0]) assert.Equal(t, x.Command, c.Command) assert.Equal(t, x.Args, c.Args) assert.Equal(t, x.Env, c.Env) assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c.Resources) }
explode_data.jsonl/72929
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 348 }
[ 2830, 3393, 4502, 3062, 4502, 1155, 353, 8840, 836, 8, 341, 10225, 1669, 9678, 515, 197, 53397, 25, 286, 330, 2408, 13746, 756, 197, 17446, 4661, 16284, 82, 25, 3056, 98645, 16, 79106, 16284, 2979, 675, 25, 330, 2408, 12, 7338, 48085,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInnodbLockWaitTimeout(t *testing.T) { store, clean := createMockStoreAndSetup(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists tk") tk.MustExec("create table tk (c1 int primary key, c2 int)") tk.MustExec("insert into tk values(1,1),(2,2),(3,3),(4,4),(5,5)") // tk set global tk.MustExec("set global innodb_lock_wait_timeout = 3") tk.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 50")) tk2 := testkit.NewTestKit(t, store) tk2.MustExec("use test") tk2.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 3")) tk2.MustExec("set innodb_lock_wait_timeout = 2") tk2.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 2")) // to check whether it will set to innodb_lock_wait_timeout to max value tk2.MustExec("set innodb_lock_wait_timeout = 3602") tk2.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 3600")) tk2.MustExec("set innodb_lock_wait_timeout = 2") tk3 := testkit.NewTestKit(t, store) tk3.MustExec("use test") tk3.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 3")) tk3.MustExec("set innodb_lock_wait_timeout = 1") tk3.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 1")) tk2.MustExec("set @@autocommit = 0") tk3.MustExec("set @@autocommit = 0") tk4 := testkit.NewTestKit(t, store) tk4.MustExec("use test") tk4.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 3")) tk4.MustExec("set @@autocommit = 0") // tk2 lock c1 = 1 tk2.MustExec("begin pessimistic") tk2.MustExec("select * from tk where c1 = 1 for update") // lock succ c1 = 1 // Parallel the blocking tests to accelerate CI. var wg sync.WaitGroup wg.Add(2) timeoutErrCh := make(chan error, 2) go func() { defer wg.Done() // tk3 try lock c1 = 1 timeout 1sec tk3.MustExec("begin pessimistic") _, err := tk3.Exec("select * from tk where c1 = 1 for update") timeoutErrCh <- err tk3.MustExec("commit") }() go func() { defer wg.Done() // tk5 try lock c1 = 1 timeout 2sec tk5 := testkit.NewTestKit(t, store) tk5.MustExec("use test") tk5.MustExec("set innodb_lock_wait_timeout = 2") tk5.MustExec("begin pessimistic") _, err := tk5.Exec("update tk set c2 = c2 - 1 where c1 = 1") timeoutErrCh <- err tk5.MustExec("rollback") }() timeoutErr := <-timeoutErrCh require.Error(t, timeoutErr) require.Equal(t, storeerr.ErrLockWaitTimeout.Error(), timeoutErr.Error()) timeoutErr = <-timeoutErrCh require.Error(t, timeoutErr) require.Equal(t, storeerr.ErrLockWaitTimeout.Error(), timeoutErr.Error()) // tk4 lock c1 = 2 tk4.MustExec("begin pessimistic") tk4.MustExec("update tk set c2 = c2 + 1 where c1 = 2") // lock succ c1 = 2 by update tk2.MustExec("set innodb_lock_wait_timeout = 1") tk2.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 1")) start := time.Now() _, err := tk2.Exec("delete from tk where c1 = 2") require.GreaterOrEqual(t, time.Since(start), 1000*time.Millisecond) require.Less(t, time.Since(start), 3000*time.Millisecond) // unit test diff should not be too big require.Equal(t, storeerr.ErrLockWaitTimeout.Error(), err.Error()) tk4.MustExec("commit") tk.MustQuery(`show variables like "innodb_lock_wait_timeout"`).Check(testkit.Rows("innodb_lock_wait_timeout 50")) tk.MustQuery(`select * from tk where c1 = 2`).Check(testkit.Rows("2 3")) // tk4 update commit work, tk2 delete should be rollbacked // test stmtRollBack caused by timeout but not the whole transaction tk2.MustExec("update tk set c2 = c2 + 2 where c1 = 2") // tk2 lock succ c1 = 2 by update tk2.MustQuery(`select * from tk where c1 = 2`).Check(testkit.Rows("2 5")) // tk2 update c2 succ tk3.MustExec("begin pessimistic") tk3.MustExec("select * from tk where c1 = 3 for update") // tk3 lock c1 = 3 succ start = time.Now() _, err = tk2.Exec("delete from tk where c1 = 3") // tk2 tries to lock c1 = 3 fail, this delete should be rollback, but previous update should be keeped require.GreaterOrEqual(t, time.Since(start), 1000*time.Millisecond) require.Less(t, time.Since(start), 3000*time.Millisecond) // unit test diff should not be too big require.Equal(t, storeerr.ErrLockWaitTimeout.Error(), err.Error()) tk2.MustExec("commit") tk3.MustExec("commit") tk.MustQuery(`select * from tk where c1 = 1`).Check(testkit.Rows("1 1")) tk.MustQuery(`select * from tk where c1 = 2`).Check(testkit.Rows("2 5")) // tk2 update succ tk.MustQuery(`select * from tk where c1 = 3`).Check(testkit.Rows("3 3")) // tk2 delete should fail tk.MustQuery(`select * from tk where c1 = 4`).Check(testkit.Rows("4 4")) tk.MustQuery(`select * from tk where c1 = 5`).Check(testkit.Rows("5 5")) // clean tk.MustExec("drop table if exists tk") tk4.MustExec("commit") wg.Wait() }
explode_data.jsonl/12467
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1998 }
[ 2830, 3393, 641, 77, 16853, 11989, 14190, 7636, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1855, 11571, 6093, 3036, 21821, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3553, 340, 3244, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestObjectCreateAbort(t *testing.T) { ctx := context.Background() c, rollback := makeConnectionWithContainer(t) defer rollback() out, err := c.ObjectCreate(ctx, CONTAINER, OBJECT2, true, "", "", nil) if err != nil { t.Fatal(err) } defer func() { _ = c.ObjectDelete(ctx, CONTAINER, OBJECT2) // Ignore error }() expectedContents := "foo" _, err = out.Write([]byte(expectedContents)) if err != nil { t.Error(err) } errAbort := fmt.Errorf("abort") err = out.CloseWithError(errAbort) if err != nil { t.Errorf("Unexpected error %#v", err) } _, err = c.ObjectGetString(ctx, CONTAINER, OBJECT2) if err != swift.ObjectNotFound { t.Errorf("Unexpected error: %#v", err) } }
explode_data.jsonl/12681
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 280 }
[ 2830, 3393, 1190, 4021, 85891, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 1444, 11, 60414, 1669, 1281, 4526, 2354, 4502, 1155, 340, 16867, 60414, 2822, 13967, 11, 1848, 1669, 272, 8348, 4021, 7502, 11, 16120, 34521, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_MemoryStore_write(t *testing.T) { m := NewMemoryStore() m.Open() pm := newPublishMsg(QOS_ONE, "/a/b/c", []byte{0xBE, 0xEF, 0xED}) pm.setMsgId(91) key := ibound_mid2key(pm.MsgId()) m.Put(key, pm) if len(m.messages) != 1 { t.Fatalf("message not in store") } }
explode_data.jsonl/37295
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 132 }
[ 2830, 3393, 1245, 4731, 6093, 9165, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 1532, 10642, 6093, 741, 2109, 12953, 2822, 86511, 1669, 501, 50145, 6611, 6253, 3126, 34727, 11, 3521, 64, 3470, 2899, 497, 3056, 3782, 90, 15, 85449, 11, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestResourceNames(t *testing.T) { testCases := map[string]struct { a api.ResourceList expected []api.ResourceName }{ "empty": { a: api.ResourceList{}, expected: []api.ResourceName{}, }, "values": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), }, expected: []api.ResourceName{api.ResourceMemory, api.ResourceCPU}, }, } for testName, testCase := range testCases { actualSet := ToSet(ResourceNames(testCase.a)) expectedSet := ToSet(testCase.expected) if !actualSet.Equal(expectedSet) { t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) } } }
explode_data.jsonl/59927
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 286 }
[ 2830, 3393, 4783, 7980, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 11323, 286, 6330, 20766, 852, 198, 197, 42400, 3056, 2068, 20766, 675, 198, 197, 59403, 197, 197, 1, 3194, 788, 341, 298, 11323...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRunNdt7(t *testing.T) { if testing.Short() { t.Skip("skip test in short mode") } sess := newSessionForTesting(t) defer sess.Close() builder, err := sess.NewExperimentBuilder("ndt7") if err != nil { t.Fatal(err) } if !builder.Interruptible() { t.Fatal("ndt7 not marked as interruptible") } runexperimentflow(t, builder.NewExperiment(), "") }
explode_data.jsonl/26309
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 147 }
[ 2830, 3393, 6727, 45, 8047, 22, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 20599, 1273, 304, 2805, 3856, 1138, 197, 532, 1903, 433, 1669, 501, 5283, 2461, 16451, 1155, 340, 16867, 21875, 10421, 74...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestWithLogger(t *testing.T) { t.Parallel() Convey("Given a logger and dialer", t, func() { dialer := &mockDialerStruct{} Convey("When Dial is called with logger", func() { c, _ := mockDial(dialer, WithLogger(dialer.logger)) Convey("Then the client logger should be set", func() { So(c.logger, ShouldResemble, dialer.logger) }) }) }) }
explode_data.jsonl/53402
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 2354, 7395, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 93070, 5617, 445, 22043, 264, 5925, 323, 27860, 261, 497, 259, 11, 2915, 368, 341, 197, 2698, 530, 261, 1669, 609, 16712, 35, 530, 261, 9422, 16094, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSecWebSocketAccept(t *testing.T) { nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") accept, err := getNonceAccept(nonce) if err != nil { t.Errorf("getNonceAccept: returned error %v", err) return } if !bytes.Equal(expected, accept) { t.Errorf("getNonceAccept: expected %q got %q", expected, accept) } }
explode_data.jsonl/53434
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 164 }
[ 2830, 3393, 8430, 61238, 16646, 1155, 353, 8840, 836, 8, 341, 197, 39593, 1669, 3056, 3782, 445, 67, 38, 17958, 40, 43165, 49039, 59272, 82, 57, 16680, 392, 17, 20, 73, 57, 48, 418, 1138, 42400, 1669, 3056, 3782, 445, 82, 18, 79, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestWebRequestEventInterfaces(t *testing.T) { assert := assert.New(t) ee := NewHTTPRequestEvent(&http.Request{Host: "test.com", URL: &url.URL{}}).WithHeadings("heading").WithLabel("foo", "bar") eventProvider, isEvent := MarshalEvent(ee) assert.True(isEvent) assert.Equal(HTTPRequest, eventProvider.Flag()) assert.False(eventProvider.Timestamp().IsZero()) headingProvider, isHeadingProvider := MarshalEventHeadings(ee) assert.True(isHeadingProvider) assert.Equal([]string{"heading"}, headingProvider.Headings()) metaProvider, isMetaProvider := MarshalEventMetaProvider(ee) assert.True(isMetaProvider) assert.Equal("bar", metaProvider.Labels()["foo"]) }
explode_data.jsonl/8485
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 213 }
[ 2830, 3393, 46295, 1556, 41066, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 197, 2127, 1669, 1532, 63765, 1556, 2099, 1254, 9659, 90, 9296, 25, 330, 1944, 905, 497, 5548, 25, 609, 1085, 20893, 90, 3417, 568, 2354,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPKCSKeyGeneratorFailure(t *testing.T) { dec := new(rsaDecrypterSigner) dec.privateKey = rsaTestKey generator := failingKeyGenerator{} _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator) if err != ErrCryptoFailure { t.Error("should return error on invalid algorithm") } }
explode_data.jsonl/63200
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 22242, 6412, 1592, 12561, 17507, 1155, 353, 8840, 836, 8, 341, 197, 8169, 1669, 501, 2601, 9081, 89660, 261, 7264, 261, 340, 197, 8169, 61603, 1592, 284, 68570, 2271, 1592, 198, 3174, 15312, 1669, 21394, 1592, 12561, 16094, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults()) sb.state.runConfig.Env = []string{"var1=old", "var2=fromenv"} envCommand := &instructions.EnvCommand{ Env: instructions.KeyValuePairs{ instructions.KeyValuePair{Key: "var1", Value: "val1"}, }, } err := dispatch(sb, envCommand) require.NoError(t, err) expected := []string{ "var1=val1", "var2=fromenv", } assert.Equal(t, expected, sb.state.runConfig.Env) }
explode_data.jsonl/34860
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 228 }
[ 2830, 3393, 14359, 1130, 2354, 53067, 6727, 2648, 14359, 1155, 353, 8840, 836, 8, 341, 2233, 1669, 501, 3297, 2354, 11571, 29699, 741, 24842, 1669, 501, 11283, 1900, 1883, 11, 28078, 516, 2092, 11, 501, 11066, 4117, 36944, 9147, 14032, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFloat32Between(t *testing.T) { assert := assert.New(t) var verr error var val float32 = 5.0 verr = Float32(&val).Between(1, 10)() assert.Nil(verr) verr = Float32(nil).Between(5, 10)() assert.NotNil(verr) assert.Nil(ErrValue(verr)) assert.Equal(ErrFloat32Min, ErrCause(verr)) val = 1.0 verr = Float32(&val).Between(5, 10)() assert.NotNil(verr) assert.Equal(1, ErrValue(verr)) assert.Equal(ErrFloat32Min, ErrCause(verr)) val = 5.0 verr = Float32(&val).Between(5, 10)() assert.Nil(verr) val = 10.0 verr = Float32(&val).Between(5, 10)() assert.Nil(verr) val = 11.0 verr = Float32(&val).Between(5, 10)() assert.NotNil(verr) assert.Equal(11, ErrValue(verr)) assert.Equal(ErrFloat32Max, ErrCause(verr)) }
explode_data.jsonl/11544
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 335 }
[ 2830, 3393, 5442, 18, 17, 25477, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 2405, 71467, 1465, 198, 2405, 1044, 2224, 18, 17, 284, 220, 20, 13, 15, 198, 197, 423, 81, 284, 13001, 18, 17, 2099, 831, 568, 25477...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildServiceMapAddRemove(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) services := []*v1.Service{ makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.ClusterIP = "172.16.55.4" svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somesctp", "SCTP", 1236, 6321, 0) }), makeTestService("somewhere-else", "node-port", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.ClusterIP = "172.16.55.10" svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpblah", "SCTP", 343, 676, 0) }), makeTestService("somewhere", "load-balancer", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ClusterIP = "172.16.55.11" svc.Spec.LoadBalancerIP = "5.6.7.8" svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpfoo", "SCTP", 8677, 30063, 7002) svc.Status.LoadBalancer = v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{ {IP: "10.1.2.4"}, }, } }), makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ClusterIP = "172.16.55.12" svc.Spec.LoadBalancerIP = "5.6.7.8" svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003) svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpbaz", "SCTP", 8679, 30065, 7004) svc.Status.LoadBalancer = v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{ {IP: "10.1.2.3"}, }, } svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal svc.Spec.HealthCheckNodePort = 345 }), } for i := range services { fp.OnServiceAdd(services[i]) } result := fp.serviceMap.Update(fp.serviceChanges) if len(fp.serviceMap) != 12 { t.Errorf("expected service map length 12, got %v", fp.serviceMap) } // The only-local-loadbalancer ones get added if len(result.HCServiceNodePorts) != 1 { t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts) } else { nsn := makeNSN("somewhere", "only-local-load-balancer") if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 { t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts) } } if len(result.UDPStaleClusterIP) != 0 { // Services only added, so nothing stale yet t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP)) } // Remove some stuff // oneService is a modification of services[0] with removed first port. oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.ClusterIP = "172.16.55.4" svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0) }) fp.OnServiceUpdate(services[0], oneService) fp.OnServiceDelete(services[1]) fp.OnServiceDelete(services[2]) fp.OnServiceDelete(services[3]) result = fp.serviceMap.Update(fp.serviceChanges) if len(fp.serviceMap) != 1 { t.Errorf("expected service map length 1, got %v", fp.serviceMap) } if len(result.HCServiceNodePorts) != 0 { t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts) } // All services but one were deleted. While you'd expect only the ClusterIPs // from the three deleted services here, we still have the ClusterIP for // the not-deleted service, because one of it's ServicePorts was deleted. expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"} if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) { t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.List()) } for _, ip := range expectedStaleUDPServices { if !result.UDPStaleClusterIP.Has(ip) { t.Errorf("expected stale UDP service service %s", ip) } } }
explode_data.jsonl/44363
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1968 }
[ 2830, 3393, 11066, 1860, 2227, 2212, 13021, 1155, 353, 8840, 836, 8, 341, 8230, 417, 1669, 66068, 480, 267, 477, 7121, 52317, 741, 46531, 11562, 1669, 45475, 267, 477, 7121, 52317, 741, 46531, 746, 1669, 5997, 746, 1944, 7121, 52317, 86...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCheckMassTransferWithProofs(t *testing.T) { to, path := createCheckerTestObjects(t) defer func() { to.stor.close(t) err := common.CleanTemporaryDirs(path) assert.NoError(t, err, "failed to clean test data dirs") }() entriesNum := 50 entries := generateMassTransferEntries(t, entriesNum) tx := createMassTransferWithProofs(t, entries) info := defaultCheckerInfo(t) _, err := to.tc.checkMassTransferWithProofs(tx, info) assert.Error(t, err, "checkMassTransferWithProofs did not fail prior to feature activation") assert.EqualError(t, err, "MassTransfer transaction has not been activated yet") // Activate MassTransfer. to.stor.activateFeature(t, int16(settings.MassTransfer)) _, err = to.tc.checkMassTransferWithProofs(tx, info) assert.Error(t, err, "checkMassTransferWithProofs did not fail with unissued asset") assert.EqualError(t, err, fmt.Sprintf("unknown asset %s", tx.Asset.ID.String())) to.stor.createAsset(t, testGlobal.asset0.asset.ID) _, err = to.tc.checkMassTransferWithProofs(tx, info) assert.NoError(t, err, "checkMassTransferWithProofs failed with valid massTransfer tx") // Check that smart assets are detected properly. to.stor.createSmartAsset(t, tx.Asset.ID) smartAssets, err := to.tc.checkMassTransferWithProofs(tx, info) assert.NoError(t, err) assert.Equal(t, 1, len(smartAssets)) assert.Equal(t, tx.Asset.ID, smartAssets[0]) }
explode_data.jsonl/63094
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 485 }
[ 2830, 3393, 3973, 25824, 21970, 2354, 31076, 82, 1155, 353, 8840, 836, 8, 341, 31709, 11, 1815, 1669, 1855, 35188, 2271, 11543, 1155, 692, 16867, 2915, 368, 341, 197, 31709, 1236, 269, 4653, 1155, 692, 197, 9859, 1669, 4185, 727, 2675, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetPipelineTemplate(t *testing.T) { store, manager, p := initWithPipeline(t) defer store.Close() actualTemplate, err := manager.GetPipelineTemplate(p.UUID) assert.Nil(t, err) assert.Equal(t, []byte(testWorkflow.ToStringForStore()), actualTemplate) }
explode_data.jsonl/28352
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 97 }
[ 2830, 3393, 1949, 34656, 7275, 1155, 353, 8840, 836, 8, 341, 57279, 11, 6645, 11, 281, 1669, 13864, 34656, 1155, 340, 16867, 3553, 10421, 741, 88814, 7275, 11, 1848, 1669, 6645, 2234, 34656, 7275, 1295, 39636, 340, 6948, 59678, 1155, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestUCompact_EncodeNegative(t *testing.T) { negNumber := NewUCompact(big.NewInt(-100)) var buffer = bytes.Buffer{} err := scale.NewEncoder(&buffer).Encode(negNumber) assert.Error(t, err) }
explode_data.jsonl/18412
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 75 }
[ 2830, 3393, 52, 98335, 93529, 534, 38489, 1155, 353, 8840, 836, 8, 341, 9038, 791, 2833, 1669, 1532, 52, 98335, 75616, 7121, 1072, 4080, 16, 15, 15, 1171, 2405, 4147, 284, 5820, 22622, 16094, 9859, 1669, 5452, 7121, 19921, 2099, 7573, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestInvalidJSONFields(t *testing.T) { env := NewTestVDBEnv(t) env.Cleanup("testinvalidfields_") defer env.Cleanup("testinvalidfields_") db, err := env.DBProvider.GetDBHandle("testinvalidfields") testutil.AssertNoError(t, err, "") db.Open() defer db.Close() batch := statedb.NewUpdateBatch() jsonValue1 := `{"_id":"key1","asset_name":"marble1","color":"blue","size":1,"owner":"tom"}` batch.Put("ns1", "key1", []byte(jsonValue1), version.NewHeight(1, 1)) savePoint := version.NewHeight(1, 2) err = db.ApplyUpdates(batch, savePoint) testutil.AssertError(t, err, "Invalid field _id should have thrown an error") batch = statedb.NewUpdateBatch() jsonValue1 = `{"_rev":"rev1","asset_name":"marble1","color":"blue","size":1,"owner":"tom"}` batch.Put("ns1", "key1", []byte(jsonValue1), version.NewHeight(1, 1)) savePoint = version.NewHeight(1, 2) err = db.ApplyUpdates(batch, savePoint) testutil.AssertError(t, err, "Invalid field _rev should have thrown an error") batch = statedb.NewUpdateBatch() jsonValue1 = `{"_deleted":"true","asset_name":"marble1","color":"blue","size":1,"owner":"tom"}` batch.Put("ns1", "key1", []byte(jsonValue1), version.NewHeight(1, 1)) savePoint = version.NewHeight(1, 2) err = db.ApplyUpdates(batch, savePoint) testutil.AssertError(t, err, "Invalid field _deleted should have thrown an error") batch = statedb.NewUpdateBatch() jsonValue1 = `{"~version":"v1","asset_name":"marble1","color":"blue","size":1,"owner":"tom"}` batch.Put("ns1", "key1", []byte(jsonValue1), version.NewHeight(1, 1)) savePoint = version.NewHeight(1, 2) err = db.ApplyUpdates(batch, savePoint) testutil.AssertError(t, err, "Invalid field ~version should have thrown an error") }
explode_data.jsonl/600
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 628 }
[ 2830, 3393, 7928, 5370, 8941, 1155, 353, 8840, 836, 8, 1476, 57538, 1669, 1532, 2271, 53, 3506, 14359, 1155, 340, 57538, 727, 60639, 445, 1944, 11808, 9007, 62, 1138, 16867, 6105, 727, 60639, 445, 1944, 11808, 9007, 62, 5130, 20939, 11,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUpdateQueriesPrepared(t *testing.T) { skipPreparedTests(t) var skipped []string if types.IsFormat_DOLT_1(types.Format_Default) { // skip select join for update skipped = make([]string, 0) for _, q := range queries.UpdateTests { if strings.Contains(strings.ToLower(q.WriteQuery), "join") { skipped = append(skipped, q.WriteQuery) } } } enginetest.TestUpdateQueriesPrepared(t, newDoltHarness(t).WithSkippedQueries(skipped)) }
explode_data.jsonl/38736
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 177 }
[ 2830, 3393, 4289, 55261, 4703, 7212, 1155, 353, 8840, 836, 8, 341, 1903, 13389, 4703, 7212, 18200, 1155, 340, 2405, 35157, 3056, 917, 198, 743, 4494, 4506, 4061, 1557, 35320, 62, 16, 52613, 9978, 60336, 8, 341, 197, 197, 322, 10706, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestMsgMsgMintNFTValidateBasicMethod(t *testing.T) { newMsgMintNFT := types.NewMsgMintNFT(id, denom, nftName, tokenURI, uriHash, tokenData, "", address2.String()) err := newMsgMintNFT.ValidateBasic() require.Error(t, err) newMsgMintNFT = types.NewMsgMintNFT("", denom, nftName, tokenURI, uriHash, tokenData, address.String(), address2.String()) err = newMsgMintNFT.ValidateBasic() require.Error(t, err) newMsgMintNFT = types.NewMsgMintNFT(id, "", nftName, tokenURI, uriHash, tokenData, address.String(), address2.String()) err = newMsgMintNFT.ValidateBasic() require.Error(t, err) newMsgMintNFT = types.NewMsgMintNFT(id, denom, nftName, tokenURI, uriHash, tokenData, address.String(), address2.String()) err = newMsgMintNFT.ValidateBasic() require.NoError(t, err) }
explode_data.jsonl/28176
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 303 }
[ 2830, 3393, 6611, 6611, 44, 396, 45, 3994, 17926, 15944, 3523, 1155, 353, 8840, 836, 8, 341, 8638, 6611, 44, 396, 45, 3994, 1669, 4494, 7121, 6611, 44, 396, 45, 3994, 3724, 11, 49744, 11, 308, 723, 675, 11, 3950, 10301, 11, 13071, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBackupRestoreSubsetCreatedStats(t *testing.T) { defer leaktest.AfterTest(t)() const numAccounts = 1 _, _, sqlDB, _, cleanupFn := BackupRestoreTestSetup(t, singleNode, numAccounts, InitNone) defer cleanupFn() sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`) sqlDB.Exec(t, `CREATE TABLE data.foo (a INT)`) sqlDB.Exec(t, `CREATE STATISTICS foo_stats FROM data.foo`) sqlDB.Exec(t, `CREATE STATISTICS bank_stats FROM data.bank`) sqlDB.Exec(t, `BACKUP data.bank, data.foo TO $1 WITH revision_history`, LocalFoo) sqlDB.Exec(t, `DELETE FROM system.table_statistics WHERE name = 'foo_stats' OR name = 'bank_stats'`) sqlDB.Exec(t, `CREATE DATABASE "data 2"`) sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH skip_missing_foreign_keys, into_db = $2`, LocalFoo, "data 2") // Ensure that the bank_stats have been restored, but foo_stats have not. sqlDB.CheckQueryResults(t, `SELECT name, "columnIDs", "rowCount", "distinctCount", "nullCount" FROM system.table_statistics`, [][]string{ {"bank_stats", "{1}", "1", "1", "0"}, // id column {"bank_stats", "{2}", "1", "1", "0"}, // balance column {"bank_stats", "{3}", "1", "1", "0"}, // payload column }) }
explode_data.jsonl/57620
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 468 }
[ 2830, 3393, 56245, 56284, 70584, 11694, 16635, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 4777, 1629, 41369, 284, 220, 16, 198, 197, 6878, 8358, 5704, 3506, 11, 8358, 21290, 24911, 1669, 43438, 56284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1