text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestStruct(t *testing.T) { o := &Options{ NotifyNoData: true, NotifyAudit: false, Locked: false, NoDataTimeframe: 120, NewHostDelay: 600, RequireFullWindow: true, Silenced: map[string]int{}, } f := &Monitor{ Message: "Test message", Query: "max(last_1m):max:custom.zookeeper.isleader{env:prod} < 1", Name: "Monitor name", Options: o, Type: "metric alert", Tags: make([]string, 0), OverallState: "No Data", } m := NewStruct(f) k := m.Keys() if m.TypeOf("NIL") != "nil" { t.Errorf("TypeOf should be nil for unknown path, but got %s", m.TypeOf("NIL")) } if m.TypeOf("Monitor.Name") != "string" { t.Errorf("TypeOf should be string, but got %s", m.TypeOf("Monitor.Name")) } if m.Value("Monitor.Name") != "Monitor name" { t.Errorf("Value should be 'Monitor name', but got %s", m.Value("Monitor.Name")) } if len(k) != 21 { t.Errorf("Number of keys should be 21, but got %d", len(k)) } }
explode_data.jsonl/68477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 471 }
[ 2830, 3393, 9422, 1155, 353, 8840, 836, 8, 1476, 22229, 1669, 609, 3798, 515, 197, 197, 28962, 2753, 1043, 25, 414, 830, 345, 197, 197, 28962, 74516, 25, 981, 895, 345, 197, 197, 49010, 25, 310, 895, 345, 197, 197, 2753, 1043, 1462,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestIsOnlineDDLTableName(t *testing.T) { names := []string{ "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_gho", "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_ghc", "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_del", "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114013_new", "_84371a37_6153_11eb_9917_f875a4d24e90_20210128122816_vrepl", "_table_old", "__table_old", } for _, tableName := range names { assert.True(t, IsOnlineDDLTableName(tableName)) } irrelevantNames := []string{ "t", "_table_new", "__table_new", "_table_gho", "_table_ghc", "_table_del", "_table_vrepl", "table_old", } for _, tableName := range irrelevantNames { assert.False(t, IsOnlineDDLTableName(tableName)) } }
explode_data.jsonl/42827
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 389 }
[ 2830, 3393, 3872, 19598, 58781, 33227, 1155, 353, 8840, 836, 8, 341, 93940, 1669, 3056, 917, 515, 197, 197, 35089, 19, 68, 20, 7628, 69, 23, 15, 62, 18, 20, 19, 65, 62, 16, 16, 3065, 62, 23, 17, 4385, 761, 23, 22, 20, 64, 19, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestClient(t *testing.T) { t.Parallel() storageConfig := CreateStorageConfig("Client") var err error svc := createDynamoDB() storage := New(svc, storageConfig) err = storage.CreateSchema() assert.Nil(t, err, "%s", err) defer storage.DropSchema() client := &osin.DefaultClient{ Id: "1234", Secret: "aabbccdd", } got, err := storage.GetClient(client.Id) assert.Equal(t, ErrClientNotFound, err) assert.Nil(t, got) err = storage.CreateClient(client) assert.Nil(t, err, "%s", err) got, err = storage.GetClient(client.Id) assert.Nil(t, err, "%s", err) assert.Equal(t, client, got) err = storage.RemoveClient(client.Id) assert.Nil(t, err, "%s", err) got, err = storage.GetClient(client.Id) assert.Equal(t, ErrClientNotFound, err) assert.Nil(t, got) }
explode_data.jsonl/78079
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 317 }
[ 2830, 3393, 2959, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 16172, 2648, 1669, 4230, 5793, 2648, 445, 2959, 1138, 2405, 1848, 1465, 198, 1903, 7362, 1669, 1855, 35, 85608, 3506, 741, 197, 16172, 1669, 1532, 1141, 7362, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGover(t *testing.T) { ctx := context.WithValue(context.Background(), "name", "Mr. Meowingston") var cat Animal var timeNull time.Duration _, err := New(timeNull, cat.setName) assert.Error(t, err) gover, err := New(time.Second*5, cat.setName) assert.NoError(t, err) err = gover.Run() assert.Error(t, err) assert.Equal(t, "", cat.Name) gover.Context = ctx err = gover.Run() assert.NoError(t, err) assert.Equal(t, "Mr. Meowingston", cat.Name) }
explode_data.jsonl/63038
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 190 }
[ 2830, 3393, 38, 1975, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 26124, 1130, 5378, 19047, 1507, 330, 606, 497, 330, 12275, 13, 2157, 23184, 7720, 1138, 2405, 8251, 21292, 271, 2405, 882, 3280, 882, 33795, 198, 197, 6878, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_Problem49(t *testing.T) { qs := []question49{ question49{ para49{[]string{"eat", "tea", "tan", "ate", "nat", "bat"}}, ans49{[][]string{[]string{"ate", "eat", "tea"}, []string{"nat", "tan"}, []string{"bat"}}}, }, } fmt.Printf("------------------------Leetcode Problem 49------------------------\n") for _, q := range qs { _, p := q.ans49, q.para49 fmt.Printf("【input】:%v 【output】:%v\n", p, groupAnagrams(p.one)) } fmt.Printf("\n\n\n") }
explode_data.jsonl/28249
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 211 }
[ 2830, 3393, 16670, 10121, 19, 24, 1155, 353, 8840, 836, 8, 1476, 18534, 82, 1669, 3056, 7841, 19, 24, 4257, 197, 197, 7841, 19, 24, 515, 298, 197, 14794, 19, 24, 90, 1294, 917, 4913, 32066, 497, 330, 81594, 497, 330, 52591, 497, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMatMultiply(t *testing.T) { mat1 := NewMatWithSize(101, 102, MatTypeCV8U) mat2 := NewMatWithSize(101, 102, MatTypeCV8U) mat3 := NewMat() Multiply(mat1, mat2, &mat3) if mat3.Empty() { t.Error("TestMatMultiply dest mat3 should not be empty.") } }
explode_data.jsonl/81715
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 112 }
[ 2830, 3393, 11575, 95155, 1155, 353, 8840, 836, 8, 341, 59874, 16, 1669, 1532, 11575, 2354, 1695, 7, 16, 15, 16, 11, 220, 16, 15, 17, 11, 6867, 929, 19589, 23, 52, 340, 59874, 17, 1669, 1532, 11575, 2354, 1695, 7, 16, 15, 16, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestObservabilityConfiguration(t *testing.T) { observabilityConfigTests := []struct { name string wantErr bool wantController interface{} config *corev1.ConfigMap }{{ name: "observability configuration with all inputs", wantErr: false, wantController: &Observability{ LoggingURLTemplate: "https://logging.io", FluentdSidecarOutputConfig: "the-config", FluentdSidecarImage: "gcr.io/log-stuff/fluentd:latest", EnableVarLogCollection: true, RequestLogTemplate: `{"requestMethod": "{{.Request.Method}}"}`, RequestMetricsBackend: "stackdriver", }, config: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: ObservabilityConfigName, }, Data: map[string]string{ "logging.enable-var-log-collection": "true", "logging.fluentd-sidecar-image": "gcr.io/log-stuff/fluentd:latest", "logging.fluentd-sidecar-output-config": "the-config", "logging.revision-url-template": "https://logging.io", "logging.write-request-logs": "true", "logging.request-log-template": `{"requestMethod": "{{.Request.Method}}"}`, "metrics.request-metrics-backend-destination": "stackdriver", }, }, }, { name: "observability config with no map", wantErr: false, wantController: &Observability{ EnableVarLogCollection: false, LoggingURLTemplate: defaultLogURLTemplate, RequestLogTemplate: "", RequestMetricsBackend: "", }, config: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: ObservabilityConfigName, }, }, }, { name: "observability configuration with no side car image", wantErr: true, wantController: (*Observability)(nil), config: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: ObservabilityConfigName, }, Data: map[string]string{ "logging.enable-var-log-collection": "true", }, }, }, { name: "invalid request log template", wantErr: true, wantController: (*Observability)(nil), config: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: ObservabilityConfigName, }, Data: map[string]string{ "logging.request-log-template": `{{ something }}`, }, }, }} for _, tt := range observabilityConfigTests { t.Run(tt.name, func(t *testing.T) { actualController, err := NewObservabilityFromConfigMap(tt.config) if (err != nil) != tt.wantErr { t.Fatalf("Test: %q; NewObservabilityFromConfigMap() error = %v, WantErr %v", tt.name, err, tt.wantErr) } if diff := cmp.Diff(actualController, tt.wantController); diff != "" { t.Fatalf("Test: %q; want %v, but got %v", tt.name, tt.wantController, actualController) } }) } }
explode_data.jsonl/53566
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1295 }
[ 2830, 3393, 37763, 2897, 7688, 1155, 353, 8840, 836, 8, 341, 197, 22764, 2897, 2648, 18200, 1669, 3056, 1235, 341, 197, 11609, 1843, 914, 198, 197, 50780, 7747, 286, 1807, 198, 197, 50780, 2051, 3749, 16094, 197, 25873, 260, 353, 98645,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestScheduler(t *testing.T) { t.Run("Test run once", func(t *testing.T) { clusterState := testClusterState("testCluster", 1) reconRepo := reconciliation.NewInMemoryReconciliationRepository() scheduler := newScheduler(logger.NewLogger(true)) require.NoError(t, scheduler.RunOnce(clusterState, reconRepo, &SchedulerConfig{})) requiredReconciliationEntity(t, reconRepo, clusterState) }) t.Run("Test run", func(t *testing.T) { reconRepo := reconciliation.NewInMemoryReconciliationRepository() scheduler := newScheduler(logger.NewLogger(true)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() start := time.Now() clusterState := testClusterState("testCluster", 1) err := scheduler.Run(ctx, &ClusterStatusTransition{ conn: db.NewTestConnection(t), inventory: &cluster.MockInventory{ //this will cause the creation of a reconciliation for the same cluster multiple times: ClustersToReconcileResult: []*cluster.State{ clusterState, }, //simulate an updated cluster status (required when transition updates the cluster status) UpdateStatusResult: func() *cluster.State { updatedState := testClusterState("testCluster", 1) updatedState.Status.Status = model.ClusterStatusReconciling return updatedState }(), GetResult: func() *cluster.State { return clusterState }(), }, reconRepo: reconRepo, logger: logger.NewLogger(true), }, &SchedulerConfig{ InventoryWatchInterval: 250 * time.Millisecond, ClusterReconcileInterval: 100 * time.Second, ClusterQueueSize: 5, }) require.NoError(t, err) time.Sleep(500 * time.Millisecond) //give it some time to shutdown require.WithinDuration(t, start, time.Now(), 4*time.Second) requiredReconciliationEntity(t, reconRepo, clusterState) }) }
explode_data.jsonl/20371
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 682 }
[ 2830, 3393, 38878, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 2271, 1598, 3055, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 197, 18855, 1397, 1669, 1273, 28678, 1397, 445, 1944, 28678, 497, 220, 16, 340, 197, 17200, 443, 25243,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFS_Logs(t *testing.T) { t.Parallel() require := require.New(t) rpcPort := 0 c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { rpcPort = c.Ports.RPC c.Client = &testutil.ClientConfig{ Enabled: true, } }) defer s.Stop() //TODO There should be a way to connect the client to the servers in //makeClient above require.NoError(c.Agent().SetServers([]string{fmt.Sprintf("127.0.0.1:%d", rpcPort)})) index := uint64(0) testutil.WaitForResult(func() (bool, error) { nodes, qm, err := c.Nodes().List(&QueryOptions{WaitIndex: index}) if err != nil { return false, err } index = qm.LastIndex if len(nodes) != 1 { return false, fmt.Errorf("expected 1 node but found: %s", pretty.Sprint(nodes)) } if nodes[0].Status != "ready" { return false, fmt.Errorf("node not ready: %s", nodes[0].Status) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) var input strings.Builder input.Grow(units.MB) lines := 80 * units.KB for i := 0; i < lines; i++ { fmt.Fprintf(&input, "%d\n", i) } job := &Job{ ID: helper.StringToPtr("TestFS_Logs"), Region: helper.StringToPtr("global"), Datacenters: []string{"dc1"}, Type: helper.StringToPtr("batch"), TaskGroups: []*TaskGroup{ { Name: helper.StringToPtr("TestFS_LogsGroup"), Tasks: []*Task{ { Name: "logger", Driver: "mock_driver", Config: map[string]interface{}{ "stdout_string": input.String(), }, }, }, }, }, } jobs := c.Jobs() jobResp, _, err := jobs.Register(job, nil) require.NoError(err) index = jobResp.EvalCreateIndex evals := c.Evaluations() testutil.WaitForResult(func() (bool, error) { evalResp, qm, err := evals.Info(jobResp.EvalID, &QueryOptions{WaitIndex: index}) if err != nil { return false, err } if evalResp.BlockedEval != "" { t.Fatalf("Eval blocked: %s", pretty.Sprint(evalResp)) } index = qm.LastIndex if evalResp.Status != "complete" { return false, fmt.Errorf("eval status: %v", evalResp.Status) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) allocID := "" testutil.WaitForResult(func() (bool, error) { allocs, _, err := jobs.Allocations(*job.ID, true, &QueryOptions{WaitIndex: index}) if err != nil { return false, err } if len(allocs) != 1 { return false, fmt.Errorf("unexpected number of allocs: %d", len(allocs)) } if allocs[0].ClientStatus != "complete" { return false, fmt.Errorf("alloc not complete: %s", allocs[0].ClientStatus) } allocID = allocs[0].ID return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) alloc, _, err := c.Allocations().Info(allocID, nil) require.NoError(err) for i := 0; i < 3; i++ { stopCh := make(chan struct{}) defer close(stopCh) frames, errors := c.AllocFS().Logs(alloc, false, "logger", "stdout", "start", 0, stopCh, nil) var result bytes.Buffer READ_FRAMES: for { select { case f := <-frames: if f == nil { break READ_FRAMES } result.Write(f.Data) case err := <-errors: // Don't Fatal here as the other assertions may // contain helpeful information. t.Errorf("Error: %v", err) } } // Check length assert.Equal(t, input.Len(), result.Len(), "file size mismatch") // Check complete ordering for i := 0; i < lines; i++ { line, err := result.ReadBytes('\n') require.NoErrorf(err, "unexpected error on line %d: %v", i, err) require.Equal(fmt.Sprintf("%d\n", i), string(line)) } } }
explode_data.jsonl/39049
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1515 }
[ 2830, 3393, 8485, 44083, 82, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 17957, 1669, 1373, 7121, 1155, 340, 7000, 3992, 7084, 1669, 220, 15, 198, 1444, 11, 274, 1669, 1281, 2959, 1155, 11, 2092, 11, 2915, 1337, 353, 1944, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGarbageCollectorSync(t *testing.T) { serverResources := []*metav1.APIResourceList{ { GroupVersion: "v1", APIResources: []metav1.APIResource{ {Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}}, }, }, } fakeDiscoveryClient := &fakeServerResources{ PreferredResources: serverResources, Error: nil, Lock: sync.Mutex{}, InterfaceUsedCount: 0, } testHandler := &fakeActionHandler{ response: map[string]FakeResponse{ "GET" + "/api/v1/pods": { 200, []byte("{}"), }, }, } srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP) defer srv.Close() clientConfig.ContentConfig.NegotiatedSerializer = nil client, err := kubernetes.NewForConfig(clientConfig) if err != nil { t.Fatal(err) } rm := &testRESTMapper{legacyscheme.Registry.RESTMapper()} metaOnlyClientPool := dynamic.NewClientPool(clientConfig, rm, dynamic.LegacyAPIPathResolverFunc) clientPool := dynamic.NewClientPool(clientConfig, rm, dynamic.LegacyAPIPathResolverFunc) podResource := map[schema.GroupVersionResource]struct{}{ {Group: "", Version: "v1", Resource: "pods"}: {}, } sharedInformers := informers.NewSharedInformerFactory(client, 0) alwaysStarted := make(chan struct{}) close(alwaysStarted) gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted) if err != nil { t.Fatal(err) } stopCh := make(chan struct{}) defer close(stopCh) go gc.Run(1, stopCh) go gc.Sync(fakeDiscoveryClient, 10*time.Millisecond, stopCh) // Wait until the sync discovers the initial resources fmt.Printf("Test output") time.Sleep(1 * time.Second) err = expectSyncNotBlocked(fakeDiscoveryClient) if err != nil { t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err) } // Simulate the discovery client returning an error fakeDiscoveryClient.setPreferredResources(nil) fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()")) // Wait until sync discovers the change time.Sleep(1 * time.Second) // Remove the error from being returned and see if the garbage collector sync is still working fakeDiscoveryClient.setPreferredResources(serverResources) fakeDiscoveryClient.setError(nil) err = expectSyncNotBlocked(fakeDiscoveryClient) if err != nil { t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err) } }
explode_data.jsonl/62008
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 904 }
[ 2830, 3393, 43930, 20652, 53694, 12154, 1155, 353, 8840, 836, 8, 341, 41057, 11277, 1669, 29838, 4059, 402, 16, 24922, 4783, 852, 515, 197, 197, 515, 298, 197, 2808, 5637, 25, 330, 85, 16, 756, 298, 197, 7082, 11277, 25, 3056, 4059, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestDeadlockBreakpoint(t *testing.T) { skipOn(t, "upstream issue - https://github.com/golang/go/issues/29322", "pie") deadlockBp := proc.FatalThrow if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 11) { deadlockBp = proc.UnrecoveredPanic } withTestProcess("testdeadlock", t, func(p *proc.Target, fixture protest.Fixture) { assertNoError(p.Continue(), t, "Continue()") bp := p.CurrentThread().Breakpoint() if bp.Breakpoint == nil || bp.Name != deadlockBp { t.Fatalf("did not stop at deadlock breakpoint %v", bp) } }) }
explode_data.jsonl/56328
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 209 }
[ 2830, 3393, 28320, 1023, 22524, 2768, 1155, 353, 8840, 836, 8, 341, 1903, 13389, 1925, 1155, 11, 330, 454, 4027, 4265, 481, 3703, 1110, 5204, 905, 4846, 37287, 25525, 38745, 14, 17, 24, 18, 17, 17, 497, 330, 20283, 1138, 197, 33754, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_Pagination_PrevNext_NextPostLinks(t *testing.T) { doc := testutil.CreateHTML() body := dom.QuerySelector(doc, "body") root := testutil.CreateDiv(0) dom.AppendChild(body, root) anchor := testutil.CreateAnchor("page2", "next post") dom.AppendChild(root, anchor) assertDefaultDocumentNextLink(t, doc, nil) }
explode_data.jsonl/10836
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 118 }
[ 2830, 3393, 1088, 10353, 1088, 7282, 5847, 1604, 427, 4133, 24089, 1155, 353, 8840, 836, 8, 341, 59536, 1669, 1273, 1314, 7251, 5835, 741, 35402, 1669, 4719, 15685, 5877, 19153, 11, 330, 2599, 5130, 33698, 1669, 1273, 1314, 7251, 12509, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_HealthCheckNodePortWhenTerminating(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) fp.servicesSynced = true // fp.endpointsSynced = true fp.endpointSlicesSynced = true serviceName := "svc1" namespaceName := "ns1" fp.OnServiceAdd(&v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName}, Spec: v1.ServiceSpec{ ClusterIP: "172.20.1.1", Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}}, }, }) tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), Namespace: namespaceName, Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ Name: utilpointer.StringPtr(""), Port: utilpointer.Int32Ptr(80), Protocol: &tcpProtocol, }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)}, NodeName: utilpointer.StringPtr(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)}, NodeName: utilpointer.StringPtr(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)}, }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)}, NodeName: utilpointer.StringPtr(testHostname), }}, } fp.OnEndpointSliceAdd(endpointSlice) result := fp.endpointsMap.Update(fp.endpointsChanges) if len(result.HCEndpointsLocalIPSize) != 1 { t.Errorf("unexpected number of health check node ports, expected 1 but got: %d", len(result.HCEndpointsLocalIPSize)) } // set all endpoints to terminating endpointSliceTerminating := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), Namespace: namespaceName, Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ Name: utilpointer.StringPtr(""), Port: utilpointer.Int32Ptr(80), Protocol: &tcpProtocol, }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ Ready: utilpointer.BoolPtr(false), Serving: utilpointer.BoolPtr(true), Terminating: utilpointer.BoolPtr(false), }, NodeName: utilpointer.StringPtr(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ Ready: utilpointer.BoolPtr(false), Serving: utilpointer.BoolPtr(true), Terminating: utilpointer.BoolPtr(true), }, NodeName: utilpointer.StringPtr(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ Ready: utilpointer.BoolPtr(false), Serving: utilpointer.BoolPtr(true), Terminating: utilpointer.BoolPtr(true), }, NodeName: utilpointer.StringPtr(testHostname), }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ Ready: utilpointer.BoolPtr(false), Serving: utilpointer.BoolPtr(false), Terminating: utilpointer.BoolPtr(true), }, NodeName: utilpointer.StringPtr(testHostname), }}, } fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating) result = fp.endpointsMap.Update(fp.endpointsChanges) if len(result.HCEndpointsLocalIPSize) != 0 { t.Errorf("unexpected number of health check node ports, expected 0 but got: %d", len(result.HCEndpointsLocalIPSize)) } }
explode_data.jsonl/44378
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1633 }
[ 2830, 3393, 62, 14542, 3973, 1955, 7084, 4498, 21209, 64283, 1155, 353, 8840, 836, 8, 341, 8230, 417, 1669, 66068, 480, 267, 477, 7121, 52317, 741, 46531, 11562, 1669, 45475, 267, 477, 7121, 52317, 741, 46531, 746, 1669, 5997, 746, 1944...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestBindConditional(t *testing.T) { env := environment(map[string]interface{}{ "a": NewOutputType(BoolType), "b": NewPromiseType(BoolType), }) scope := env.scope() cases := []exprTestCase{ {x: "true ? 0 : 1", t: NumberType}, {x: "true ? 0 : false", t: NewUnionType(NumberType, BoolType)}, {x: "true ? a : b", t: NewOutputType(BoolType)}, // Lifted operations {x: "a ? 0 : 1", t: NewOutputType(NumberType)}, {x: "b ? 0 : 1", t: NewPromiseType(NumberType)}, {x: "a ? 0 : false", t: NewOutputType(NewUnionType(NumberType, BoolType))}, {x: "b ? 0 : false", t: NewPromiseType(NewUnionType(NumberType, BoolType))}, {x: "a ? a : b", t: NewOutputType(BoolType)}, {x: "b ? b : b", t: NewPromiseType(BoolType)}, } for _, c := range cases { t.Run(c.x, func(t *testing.T) { expr, diags := BindExpressionText(c.x, scope, hcl.Pos{}) assert.Len(t, diags, 0) assert.Equal(t, c.t, expr.Type()) _, ok := expr.(*ConditionalExpression) assert.True(t, ok) assert.Equal(t, c.x, fmt.Sprintf("%v", expr)) }) } }
explode_data.jsonl/42564
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 465 }
[ 2830, 3393, 9950, 79233, 1155, 353, 8840, 836, 8, 341, 57538, 1669, 4573, 9147, 14032, 31344, 67066, 197, 197, 56693, 788, 1532, 5097, 929, 7, 11233, 929, 1326, 197, 197, 1, 65, 788, 1532, 21041, 929, 7, 11233, 929, 1326, 197, 3518, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQueuedRetry_ThrottleError(t *testing.T) { qCfg := CreateDefaultQueueSettings() qCfg.NumConsumers = 1 rCfg := CreateDefaultRetrySettings() rCfg.InitialInterval = 10 * time.Millisecond be := newBaseExporter(defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) }) mockR := newMockRequest(context.Background(), 2, NewThrottleRetry(errors.New("throttle error"), 100*time.Millisecond)) start := time.Now() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. droppedItems, err := be.sender.send(mockR) require.NoError(t, err) assert.Equal(t, 0, droppedItems) }) ocs.awaitAsyncProcessing() // The initial backoff is 10ms, but because of the throttle this should wait at least 100ms. assert.True(t, 100*time.Millisecond < time.Since(start)) mockR.checkNumRequests(t, 2) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) require.Zero(t, be.qrSender.queue.Size()) }
explode_data.jsonl/46010
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 453 }
[ 2830, 3393, 25776, 3260, 51560, 62, 1001, 27535, 1454, 1155, 353, 8840, 836, 8, 341, 18534, 42467, 1669, 4230, 3675, 7554, 6086, 741, 18534, 42467, 39847, 41966, 388, 284, 220, 16, 198, 7000, 42467, 1669, 4230, 3675, 51560, 6086, 741, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMuxSubroutesBasic(t *testing.T) { hIndex := HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte("index")) }) hArticlesList := HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte("articles-list")) }) hSearchArticles := HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte("search-articles")) }) hGetArticle := HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte(fmt.Sprintf("get-article:%s", URLParam(rc, "id")))) }) hSyncArticle := HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte(fmt.Sprintf("sync-article:%s", URLParam(rc, "id")))) }) r := NewRouter() // var rr1, rr2 *Mux r.Get("/", hIndex) r.Route("/articles", func(r Router) { // rr1 = r.(*Mux) r.Get("/", hArticlesList) r.Get("/search", hSearchArticles) r.Route("/{id}", func(r Router) { // rr2 = r.(*Mux) r.Get("/", hGetArticle) r.Get("/sync", hSyncArticle) }) }) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, r.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, rr1.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") // debugPrintTree(0, 0, rr2.tree, 0) // log.Println("~~~~~~~~~") // log.Println("~~~~~~~~~") ts := NewTestServer(r) defer ts.Close() var body, expected string _, body = testRequest(t, ts, "GET", "/", nil) expected = "index" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles", nil) expected = "articles-list" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/search", nil) expected = "search-articles" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/123", nil) expected = "get-article:123" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } _, body = testRequest(t, ts, "GET", "/articles/123/sync", nil) expected = "sync-article:123" if body != expected { t.Fatalf("expected:%s got:%s", expected, body) } }
explode_data.jsonl/47952
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 970 }
[ 2830, 3393, 44, 2200, 3136, 19794, 15944, 1155, 353, 8840, 836, 8, 341, 9598, 1552, 1669, 19954, 9626, 18552, 7502, 2266, 9328, 11, 10192, 353, 9349, 1254, 9659, 23684, 8, 341, 197, 30295, 4073, 10556, 3782, 445, 1252, 5455, 197, 3518, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConstraintContainsSpan(t *testing.T) { st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) // Each test case has a bunch of spans that are expected to be contained, and // a bunch of spans that are expected not to be contained. testData := []struct { constraint string containedSpans string notContainedSpans string }{ { constraint: "/1: [/1 - /3]", containedSpans: "[/1 - /1] (/1 - /2) (/1 - /3) [/2 - /3] [/1 - /3]", notContainedSpans: "[/0 - /1] (/0 - /1] (/0 - /2] (/0 - /3) [/1 - /4) [/2 - /5]", }, { constraint: "/1/2: [ - /2] [/4 - /4] [/5/3 - /7) [/9 - /9/20]", containedSpans: "[ - /1] [ - /2) [ - /2] [/1 - /2] [/2 - /2] [/4 - /4] " + "[/5/3 - /5/3/1] [/6 - /6] [/5/5 - /7) [/9/10 - /9/15] [/9/19 - /9/20]", notContainedSpans: "[ - /3] [/1 - /3] [/3 - /4] [/3 - /6] [/5/3 - /7] [/6 - /8] " + "[/9/20 - /9/21] [/8 - /9]", }, { constraint: "/1/-2: [/1/5 - /1/2] [/3/5 - /5/2] [/7 - ]", containedSpans: "[/1/5 - /1/2] [/1/4 - /1/3] [/1/4 - /1/2] [/4 - /5) [/4/6 - /5/3] [/7/1 - ]", notContainedSpans: "[/1/5 - /1/1] [/1/3 - /1/1] [/3/6 - /3/5] [/4 - /5] [/4 - /5/1] [/6/10 - ]", }, } for i, tc := range testData { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { c := ParseConstraint(&evalCtx, tc.constraint) spans := parseSpans(&evalCtx, tc.containedSpans) for i := 0; i < spans.Count(); i++ { if sp := spans.Get(i); !c.ContainsSpan(&evalCtx, sp) { t.Errorf("%s should contain span %s", c, sp) } } spans = parseSpans(&evalCtx, tc.notContainedSpans) for i := 0; i < spans.Count(); i++ { if sp := spans.Get(i); c.ContainsSpan(&evalCtx, sp) { t.Errorf("%s should not contain span %s", c, sp) } } }) } }
explode_data.jsonl/59308
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 922 }
[ 2830, 3393, 17890, 23805, 12485, 1155, 353, 8840, 836, 8, 341, 18388, 1669, 10652, 50133, 16451, 28678, 6086, 741, 93413, 23684, 1669, 4916, 50133, 16451, 54469, 1972, 5895, 692, 197, 322, 8886, 1273, 1142, 702, 264, 15493, 315, 44295, 42...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestSummaryDataPoint_Sum(t *testing.T) { ms := NewSummaryDataPoint() ms.InitEmpty() assert.EqualValues(t, float64(0.0), ms.Sum()) testValSum := float64(17.13) ms.SetSum(testValSum) assert.EqualValues(t, testValSum, ms.Sum()) }
explode_data.jsonl/19579
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 96 }
[ 2830, 3393, 19237, 1043, 2609, 1098, 372, 1155, 353, 8840, 836, 8, 341, 47691, 1669, 1532, 19237, 1043, 2609, 741, 47691, 26849, 3522, 741, 6948, 12808, 6227, 1155, 11, 2224, 21, 19, 7, 15, 13, 15, 701, 9829, 41676, 2398, 18185, 2208,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestVarRelocatingNoBundle(t *testing.T) { splitting_suite.expectBundled(t, bundled{ files: relocateFiles, entryPaths: relocateEntries, options: config.Options{ Mode: config.ModeConvertFormat, OutputFormat: config.FormatESModule, AbsOutputDir: "/out", }, }) }
explode_data.jsonl/38606
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 121 }
[ 2830, 3393, 3962, 6740, 509, 1095, 2753, 8409, 1155, 353, 8840, 836, 8, 341, 1903, 2292, 1280, 57239, 25952, 33, 1241, 832, 1155, 11, 51450, 515, 197, 74075, 25, 414, 69572, 10809, 345, 197, 48344, 26901, 25, 69572, 24533, 345, 197, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRefundSale(t *testing.T) { c, _ := NewClient(testClientID, testSecret, APIBaseSandBox) c.GetAccessToken() _, err := c.RefundSale(testSaleID, nil) if err == nil { t.Errorf("404 must be returned for ID=%s", testSaleID) } _, err = c.RefundSale(testSaleID, &Amount{Total: "7.00", Currency: "USD"}) if err == nil { t.Errorf("404 must be returned for ID=%s", testSaleID) } }
explode_data.jsonl/18454
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 3945, 1241, 44800, 1155, 353, 8840, 836, 8, 341, 1444, 11, 716, 1669, 1532, 2959, 8623, 2959, 915, 11, 1273, 19773, 11, 5333, 3978, 47044, 1611, 340, 1444, 2234, 37649, 2822, 197, 6878, 1848, 1669, 272, 18369, 1241, 44800, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestBreakersNoBreakerFor(t *testing.T) { NoBreakFor("any") errDummy := errors.New("any") for i := 0; i < 10000; i++ { assert.Equal(t, errDummy, GetBreaker("any").Do(func() error { return errDummy })) } assert.Equal(t, nil, Do("any", func() error { return nil })) }
explode_data.jsonl/20899
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 122 }
[ 2830, 3393, 22524, 388, 2753, 22524, 261, 2461, 1155, 353, 8840, 836, 8, 341, 197, 2753, 22524, 2461, 445, 3767, 1138, 9859, 43344, 1669, 5975, 7121, 445, 3767, 1138, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 16, 15, 15, 15, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadBalancer(t *testing.T) { ipt, fp := buildFakeProxier() svcIP := "10.20.30.41" svcPort := 80 svcNodePort := 3001 svcLBIP := "1.2.3.4" svcPortName := proxy.ServicePortName{ NamespacedName: makeNSN("ns1", "svc1"), Port: "p80", } makeServiceMap(fp, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { svc.Spec.Type = "LoadBalancer" svc.Spec.ClusterIP = svcIP svc.Spec.Ports = []v1.ServicePort{{ Name: svcPortName.Port, Port: int32(svcPort), Protocol: v1.ProtocolTCP, NodePort: int32(svcNodePort), }} svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ IP: svcLBIP, }} }), ) epIP := "10.180.0.1" udpProtocol := v1.ProtocolUDP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIP}, }} eps.Ports = []discovery.EndpointPort{{ Name: utilpointer.StringPtr(svcPortName.Port), Port: utilpointer.Int32(int32(svcPort)), Protocol: &udpProtocol, }} }), ) fp.syncProxyRules() // Expect 2 services and 1 destination epVS := &netlinktest.ExpectedVirtualServer{ VSNum: 2, IP: svcLBIP, Port: uint16(svcNodePort), Protocol: string(v1.ProtocolTCP), RS: []netlinktest.ExpectedRealServer{{ IP: epIP, Port: uint16(svcPort), }}} checkIPVS(t, fp, epVS) // check ipSet rules epIPSet := netlinktest.ExpectedIPSet{ kubeLoadBalancerSet: {{ IP: svcLBIP, Port: svcPort, Protocol: strings.ToLower(string(v1.ProtocolTCP)), SetType: utilipset.HashIPPort, }}, } checkIPSet(t, fp, epIPSet) // Check iptables chain and rules epIpt := netlinktest.ExpectedIptablesChain{ string(kubeServicesChain): {{ JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet, }, { JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet, }, { JumpChain: string(KubeNodePortChain), MatchSet: "", }, { JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet, }, { JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet, }}, string(kubeLoadBalancerSet): {{ JumpChain: string(KubeMarkMasqChain), MatchSet: "", }}, } checkIptables(t, ipt, epIpt) }
explode_data.jsonl/44357
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1049 }
[ 2830, 3393, 5879, 93825, 1155, 353, 8840, 836, 8, 341, 8230, 417, 11, 12007, 1669, 1936, 52317, 1336, 87, 1268, 741, 1903, 7362, 3298, 1669, 330, 16, 15, 13, 17, 15, 13, 18, 15, 13, 19, 16, 698, 1903, 7362, 7084, 1669, 220, 23, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLowCardinalityOfStr(t *testing.T) { col := (&ColStr{}).LowCardinality() v := []string{"foo", "bar", "foo", "foo", "baz"} col.AppendArr(v) require.NoError(t, col.Prepare()) var buf Buffer col.EncodeColumn(&buf) t.Run("Golden", func(t *testing.T) { gold.Bytes(t, buf.Buf, "col_low_cardinality_of_str") }) t.Run("Ok", func(t *testing.T) { br := bytes.NewReader(buf.Buf) r := NewReader(br) dec := (&ColStr{}).LowCardinality() require.NoError(t, dec.DecodeColumn(r, col.Rows())) require.Equal(t, col.Rows(), dec.Rows()) for i, s := range v { assert.Equal(t, s, col.Row(i)) } assert.Equal(t, ColumnType("LowCardinality(String)"), dec.Type()) }) t.Run("ErrUnexpectedEOF", func(t *testing.T) { r := NewReader(bytes.NewReader(nil)) dec := (&ColStr{}).LowCardinality() require.ErrorIs(t, dec.DecodeColumn(r, col.Rows()), io.ErrUnexpectedEOF) }) t.Run("NoShortRead", func(t *testing.T) { dec := (&ColStr{}).LowCardinality() requireNoShortRead(t, buf.Buf, colAware(dec, col.Rows())) }) }
explode_data.jsonl/64142
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 460 }
[ 2830, 3393, 24187, 5770, 80777, 2124, 2580, 1155, 353, 8840, 836, 8, 341, 46640, 1669, 15899, 6127, 2580, 6257, 568, 24187, 5770, 80777, 741, 5195, 1669, 3056, 917, 4913, 7975, 497, 330, 2257, 497, 330, 7975, 497, 330, 7975, 497, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDatabase_Client_GetStepList(t *testing.T) { // setup types sOne := testStep() sOne.SetID(1) sOne.SetRepoID(1) sOne.SetBuildID(1) sOne.SetNumber(1) sOne.SetName("foo") sOne.SetImage("baz") sTwo := testStep() sTwo.SetID(2) sTwo.SetRepoID(1) sTwo.SetBuildID(1) sTwo.SetNumber(2) sTwo.SetName("bar") sTwo.SetImage("baz") want := []*library.Step{sOne, sTwo} // setup database db, _ := NewTest() defer func() { db.Database.Exec("delete from steps;") db.Database.Close() }() _ = db.CreateStep(sOne) _ = db.CreateStep(sTwo) // run test got, err := db.GetStepList() if err != nil { t.Errorf("GetStepList returned err: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("GetStepList is %v, want %v", got, want) } }
explode_data.jsonl/69133
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 343 }
[ 2830, 3393, 5988, 46102, 13614, 8304, 852, 1155, 353, 8840, 836, 8, 341, 197, 322, 6505, 4494, 198, 1903, 3966, 1669, 1273, 8304, 741, 1903, 3966, 4202, 915, 7, 16, 340, 1903, 3966, 4202, 25243, 915, 7, 16, 340, 1903, 3966, 4202, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNormalBranch(t *testing.T) { g := *mp1.InitializeGame(LER, mp1.GameConfig{MaxTurns: 20}) g.Players[0].CurrentSpace = mp1.NewChainSpace(0, 10) g.NextEvent.Handle(mp1.NewChainSpace(3, 2), &g) g.NextEvent.Handle(1, &g) //Move g.NextEvent.Handle(mp1.NewChainSpace(1, 0), &g) //Branch SpaceIs(mp1.NewChainSpace(1, 0), 0, g, "", t) }
explode_data.jsonl/70291
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 174 }
[ 2830, 3393, 12206, 18197, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 353, 1307, 16, 45829, 4868, 4957, 640, 11, 10490, 16, 20940, 2648, 90, 5974, 19389, 82, 25, 220, 17, 15, 3518, 3174, 98182, 58, 15, 936, 5405, 9914, 284, 10490, 16,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStateChangeDuringTransactionPoolReset(t *testing.T) { t.Parallel() var ( key, _ = crypto.GenerateKey() address = crypto.PubkeyToAddress(key.PublicKey) statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) trigger = false ) // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) blockchain := &testChain{&testBlockChain{statedb, 1000000000, new(notify.Feed)}, address, &trigger} tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() nonce := pool.Nonce(address) if nonce != 0 { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } pool.AddRemotesSync([]*types.Transaction{tx0, tx1}) nonce = pool.Nonce(address) if nonce != 2 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } // trigger state change in the background trigger = true <-pool.requestReset(nil, nil) _, err := pool.Pending() if err != nil { t.Fatalf("Could not fetch pending transactions: %v", err) } nonce = pool.Nonce(address) if nonce != 2 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } }
explode_data.jsonl/7854
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 461 }
[ 2830, 3393, 1397, 4072, 16014, 8070, 10551, 14828, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 2405, 2399, 197, 23634, 11, 716, 257, 284, 19028, 57582, 1592, 741, 197, 63202, 262, 284, 19028, 1069, 392, 792, 1249, 4286, 4857,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestPanicRecovery(t *testing.T) { dummy := auction{ cfg: nil, syncers: nil, gdprPerms: &auctionMockPermissions{ allowBidderSync: false, allowHostCookies: false, }, metricsEngine: &metricsConf.DummyMetricsEngine{}, } panicker := func(bidder *pbs.PBSBidder, blables metrics.AdapterLabels) { panic("panic!") } recovered := dummy.recoverSafely(panicker) recovered(nil, metrics.AdapterLabels{}) }
explode_data.jsonl/35896
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 176 }
[ 2830, 3393, 47, 31270, 693, 7449, 1155, 353, 8840, 836, 8, 341, 2698, 8574, 1669, 21165, 515, 197, 50286, 25, 257, 2092, 345, 197, 1903, 1721, 388, 25, 2092, 345, 197, 3174, 67, 649, 3889, 1011, 25, 609, 79762, 11571, 23851, 515, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInfo(t *testing.T) { goqueue_tests.Info(t, func(size int) interface { goqueue.Owner goqueue.Enqueuer goqueue.Dequeuer goqueue.Info } { return finite.New(size) }) }
explode_data.jsonl/54514
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 80 }
[ 2830, 3393, 1731, 1155, 353, 8840, 836, 8, 341, 30680, 4584, 32509, 20132, 1155, 11, 2915, 6856, 526, 8, 3749, 341, 197, 30680, 4584, 49920, 198, 197, 30680, 4584, 22834, 591, 8801, 198, 197, 30680, 4584, 8934, 591, 8801, 198, 197, 30...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestTagsAppliedToTelemetry(t *testing.T) { tests := []struct { name string fields map[string]interface{} tags map[string]string metricValueFields []string }{ { "value but no count", map[string]interface{}{"value": 16.5, "alpha": 3.5, "bravo": 17}, map[string]string{"alpha": "a tag is not a field", "charlie": "charlie"}, []string{"value", "alpha", "bravo"}, }, } for _, tt := range tests { tf := func(t *testing.T) { assert := assert.New(t) now := time.Now().UTC() transmitter := new(mocks.Transmitter) transmitter.On("Track", mock.Anything) metricName := "ShouldBeSimpleMetric" m, err := metric.New( metricName, tt.tags, tt.fields, now, ) assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking } err = ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} ai.Write(mSet) transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields)) transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry")) // Will verify that all original tags are present in telemetry.Properies map verifyAdditionalTelemetry(assert, m, transmitter, tt.metricValueFields, metricName) } t.Run(tt.name, tf) } }
explode_data.jsonl/50943
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 650 }
[ 2830, 3393, 15930, 75856, 1249, 6639, 35958, 1155, 353, 8840, 836, 8, 972, 78216, 1669, 3056, 1235, 972, 197, 11609, 1060, 914, 319, 197, 55276, 310, 2415, 14032, 31344, 90, 1771, 197, 3244, 2032, 1060, 2415, 14032, 30953, 319, 197, 210...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeliverServiceAbruptStop(t *testing.T) { defer ensureNoGoroutineLeak(t)() // Scenario: The deliver service is started and abruptly stopped. // The block provider instance is run in a separate goroutine, and thus // it might be scheduled after the deliver client is stopped. gossipServiceAdapter := &mocks.MockGossipServiceAdapter{GossipBlockDisseminations: make(chan uint64)} service, err := NewDeliverService(&Config{ Endpoints: []string{"a"}, Gossip: gossipServiceAdapter, CryptoSvc: &mockMCS{}, ABCFactory: DefaultABCFactory, ConnFactory: DefaultConnectionFactory, }) assert.NoError(t, err) li := &mocks.MockLedgerInfo{Height: uint64(100)} service.StartDeliverForChannel("mychannel", li, func() {}) service.StopDeliverForChannel("mychannel") }
explode_data.jsonl/10581
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 256 }
[ 2830, 3393, 16532, 1524, 1860, 89428, 7564, 10674, 1155, 353, 8840, 836, 8, 341, 16867, 5978, 2753, 38, 269, 14159, 2304, 585, 1155, 8, 741, 197, 322, 58663, 25, 576, 6359, 2473, 374, 3855, 323, 59745, 10497, 624, 197, 322, 576, 2504,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFprinto(t *testing.T) { t.Run("enabled output", func(t *testing.T) { pterm.Output = true for _, randomString := range internal.RandomStrings { out := captureStdout(func(w io.Writer) { pterm.Fprinto(w, randomString) }) testza.AssertEqual(t, "\r"+randomString, out) } }) t.Run("disabled output", func(t *testing.T) { pterm.Output = false for _, randomString := range internal.RandomStrings { out := captureStdout(func(w io.Writer) { pterm.Fprinto(w, randomString) }) testza.AssertEqual(t, "", out) } pterm.Output = true }) }
explode_data.jsonl/49140
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 247 }
[ 2830, 3393, 37, 1350, 78, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 15868, 2550, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 60796, 4195, 34246, 284, 830, 198, 197, 2023, 8358, 4194, 703, 1669, 2088, 5306, 26709, 20859, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClientCancelRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { <-req.Context().Done() })) defer server.Close() canceled := make(chan struct{}, 1) transport := &cancelRequester{ RoundTripper: http.DefaultTransport, cancelRequest: func(*http.Request) { select { case canceled <- struct{}{}: default: } }, } apmtest.WithTransaction(func(ctx context.Context) { client := &http.Client{ Transport: apmhttp.WrapRoundTripper(transport), Timeout: time.Nanosecond, } _, err := ctxhttp.Get(ctx, client, server.URL) require.Error(t, err) }) select { case <-canceled: case <-time.After(10 * time.Second): t.Fatalf("timed out waiting for CancelRequest to be called") } }
explode_data.jsonl/49620
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 307 }
[ 2830, 3393, 2959, 9269, 1900, 1155, 353, 8840, 836, 8, 341, 41057, 1669, 54320, 70334, 7121, 5475, 19886, 89164, 18552, 3622, 1758, 37508, 11, 4232, 353, 1254, 9659, 8, 341, 197, 197, 45342, 2958, 9328, 1005, 17453, 741, 197, 44194, 168...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBindNilReturn(t *testing.T) { t.Parallel() ctx := NewIsolate().NewContext() xyz := ctx.Bind("xyz", func(CallbackArgs) (*Value, error) { return nil, nil }) res, err := xyz.Call(nil) if err != nil { t.Error(err) } if str := res.String(); str != "undefined" { t.Errorf("Expected undefined, got %q", res) } }
explode_data.jsonl/81569
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 132 }
[ 2830, 3393, 9950, 19064, 5598, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 20985, 1669, 1532, 3872, 33066, 1005, 3564, 1972, 2822, 197, 28854, 1669, 5635, 32451, 445, 28854, 497, 2915, 3025, 3420, 4117, 8, 4609, 1130, 11, 1465...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSuperadminPriviledge(t *testing.T) { r := NewRequest() r.PathParams["event"] = "testevent" r.Env["REMOTE_USER"] = "superadmin" assert.True(t, hasAdminPriviledge(r)) r = NewRequest() r.Env["REMOTE_USER"] = "superadmin" assert.True(t, hasAdminPriviledge(r)) assert.True(t, hasSuperAdminPriviledge(r)) }
explode_data.jsonl/6970
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 19284, 2882, 32124, 2181, 709, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 1900, 741, 7000, 17474, 4870, 1183, 3087, 1341, 284, 330, 1944, 3087, 698, 7000, 81214, 1183, 56020, 9107, 1341, 284, 330, 9522, 2882, 698, 6948,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMembershipAfterExpiration(t *testing.T) { // Scenario: // Start 3 peers (peer0, peer1, peer2). Set peer0 as the anchor peer. // Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership config := defaultTestConfig // Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test. config.AliveExpirationTimeout = 2 * config.AliveTimeInterval config.ReconnectInterval = config.AliveExpirationTimeout config.MsgExpirationFactor = 5 peersNum := 3 ports := []int{9120, 9121, 9122} anchorPeer := "localhost:9120" bootPeers := []string{} instances := []*gossipInstance{} var inst *gossipInstance mockTracker := &mockAnchorPeerTracker{[]string{anchorPeer}} l, err := zap.NewDevelopment() assert.NoError(t, err) expired := make(chan struct{}, 1) // use a custom logger to verify messages from expiration callback loggerThatTracksCustomMessage := func() util.Logger { var lock sync.RWMutex expectedMsgs := map[string]struct{}{ "Do not remove bootstrap or anchor peer endpoint localhost:9120 from membership": {}, "Removing member: Endpoint: localhost:9121, InternalEndpoint: localhost:9121, PKIID: 6c6f63616c686f73743a39313231": {}, } return flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error { // do nothing if we already found all the expectedMsgs lock.RLock() expectedMsgSize := len(expectedMsgs) lock.RUnlock() if expectedMsgSize == 0 { select { case expired <- struct{}{}: default: // no room is fine, continue } return nil } lock.Lock() defer lock.Unlock() if _, matched := expectedMsgs[entry.Message]; matched { delete(expectedMsgs, entry.Message) } return nil })) } // Start all peers, connect to the anchor peer and verify full membership for i := 0; i < peersNum; i++ { id := fmt.Sprintf("d%d", i) logger := loggerThatTracksCustomMessage() inst = createDiscoveryInstanceWithAnchorPeerTracker(ports[i], id, bootPeers, true, noopPolicy, func(_ *protoext.SignedGossipMessage) {}, config, mockTracker, logger) instances = append(instances, inst) } for i := 1; i < peersNum; i++ { connect(instances[i], anchorPeer) } assertMembership(t, instances, peersNum-1) // Stop peer0 and peer1 so that peer2 would stay alone stopInstances(t, instances[0:peersNum-1]) // waitTime is the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL) // Add a second as buffer waitTime := config.AliveExpirationTimeout*time.Duration(config.MsgExpirationFactor) + time.Second select { case <-expired: case <-time.After(waitTime): t.Fatalf("timed out") } // peer2's deadMembership should contain the anchor peer deadMemeberShip := instances[peersNum-1].discoveryImpl().deadMembership assert.Equal(t, 1, deadMemeberShip.Size()) assertMembership(t, instances[peersNum-1:], 0) // Start again peer0 and peer1 and wait for all the peers to get full membership. // Especially, we want to test that peer2 won't be isolated for i := 0; i < peersNum-1; i++ { id := fmt.Sprintf("d%d", i) inst = createDiscoveryInstanceWithAnchorPeerTracker(ports[i], id, bootPeers, true, noopPolicy, func(_ *protoext.SignedGossipMessage) {}, config, mockTracker, nil) instances[i] = inst } connect(instances[1], anchorPeer) assertMembership(t, instances, peersNum-1) }
explode_data.jsonl/62281
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1219 }
[ 2830, 3393, 80904, 6025, 66301, 1155, 353, 8840, 836, 8, 341, 197, 322, 58663, 510, 197, 322, 5145, 220, 18, 25029, 320, 16537, 15, 11, 14397, 16, 11, 14397, 17, 568, 2573, 14397, 15, 438, 279, 17105, 14397, 624, 197, 322, 14215, 14...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestBadgerReceiptsBig(t *testing.T) { t.Skip("ignore") t.SkipNow() // Don't remove the database (it's not temp) manager, _ := GetManager(2, false, "40million", t) PopulateDatabase(manager, 1000000*40) // Create a 40 million element Merkle Tree GenerateReceipts(manager, 1500000, t) }
explode_data.jsonl/26305
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 107 }
[ 2830, 3393, 17082, 1389, 67461, 82, 15636, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 445, 13130, 1138, 3244, 57776, 7039, 741, 197, 322, 4320, 944, 4057, 279, 4625, 320, 275, 594, 537, 2730, 340, 92272, 11, 716, 1669, 2126, 2043, 7, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetDuration(t *testing.T) { assert := assert.New(t) duration := "P1DT23H45M20S" offset := "25877s" expected := "1:16:34:03" actual := getDuration(duration, offset) assert.Equal(expected, actual, "the data have to be equal") duration = "P1DT" offset = "5000s" expected = "22:36:40" actual = getDuration(duration, offset) assert.Equal(expected, actual, "the data have to be equal") duration = "PT1H" offset = "300s" expected = "55:00" actual = getDuration(duration, offset) assert.Equal(expected, actual, "the data have to be equal") duration = "PT1M" offset = "20s" expected = "00:40" actual = getDuration(duration, offset) assert.Equal(expected, actual, "the data have to be equal") duration = "PT4H2S" offset = "260s" expected = "03:55:42" actual = getDuration(duration, offset) assert.Equal(expected, actual, "the data have to be equal") }
explode_data.jsonl/71233
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 322 }
[ 2830, 3393, 1949, 12945, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 89300, 1669, 330, 47, 16, 10599, 17, 18, 39, 19, 20, 44, 17, 15, 50, 698, 40668, 1669, 330, 17, 20, 23, 22, 22, 82, 698, 42400, 1669, 330,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewUserDicRecords02(t *testing.T) { s := ` 日本経済新聞,日本 経済 新聞,ニホン ケイザイ シンブン,カスタム名詞 # 関西国際空港,関西 国際 空港,カンサイ コクサイ クウコウ,カスタム地名 朝青龍,朝青龍,アサショウリュウ,カスタム人名 ` r := strings.NewReader(s) rec, err := NewUserDicRecords(r) if err != nil { t.Fatalf("user dic build error, %v", err) } udic, err := rec.NewUserDic() if err != nil { t.Fatalf("user dic build error, %v", err) } if ids := udic.dic.Index.Search("日本経済新聞"); len(ids) != 1 { t.Errorf("user dic search failed") } else if !reflect.DeepEqual(udic.dic.Contents[ids[0]].Tokens, []string{"日本", "経済", "新聞"}) { t.Errorf("got %+v, expected %+v", udic.dic.Contents[ids[0]].Tokens, []string{"日本", "経済", "新聞"}) } if ids := udic.dic.Index.Search("関西国際空港"); len(ids) != 0 { t.Errorf("user dic build failed") } if ids := udic.dic.Index.Search("朝青龍"); len(ids) == 0 { t.Errorf("user dic search failed") } else if !reflect.DeepEqual(udic.dic.Contents[ids[0]].Tokens, []string{"朝青龍"}) { t.Errorf("got %+v, expected %+v", udic.dic.Contents[ids[0]].Tokens, []string{"朝青龍"}) } }
explode_data.jsonl/44234
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 627 }
[ 2830, 3393, 3564, 1474, 44545, 25876, 15, 17, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 22074, 101059, 57154, 234, 33986, 230, 111716, 11, 101059, 10236, 113, 234, 33986, 230, 78919, 103508, 11, 77583, 124688, 15698, 17587, 109, 24218, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestInterfaceToFloat64(t *testing.T) { var out float64 var ok bool out, ok = InterfaceToFloat64(float64(1.1)) require.True(t, ok) require.Equal(t, float64(1.1), out) out, ok = InterfaceToFloat64(float32(2.2)) require.True(t, ok) require.Equal(t, float64(float32(2.2)), out) out, ok = InterfaceToFloat64(int(3)) require.True(t, ok) require.Equal(t, float64(3), out) _, ok = InterfaceToFloat64("test") require.False(t, ok) }
explode_data.jsonl/52292
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 186 }
[ 2830, 3393, 5051, 1249, 5442, 21, 19, 1155, 353, 8840, 836, 8, 341, 2405, 700, 2224, 21, 19, 198, 2405, 5394, 1807, 271, 13967, 11, 5394, 284, 20019, 1249, 5442, 21, 19, 8268, 21, 19, 7, 16, 13, 16, 1171, 17957, 32443, 1155, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidate(t *testing.T) { cases := map[string]struct { user users.User err error }{ "validate user with valid data": { user: users.User{ Email: email, Password: password, }, err: nil, }, "validate user with empty email": { user: users.User{ Email: "", Password: password, }, err: users.ErrMalformedEntity, }, "validate user with empty password": { user: users.User{ Email: email, Password: "", }, err: users.ErrMalformedEntity, }, "validate user with invalid email": { user: users.User{ Email: "userexample.com", Password: password, }, err: users.ErrMalformedEntity, }, } for desc, tc := range cases { err := tc.user.Validate() assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) } }
explode_data.jsonl/68799
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 376 }
[ 2830, 3393, 17926, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 2415, 14032, 60, 1235, 341, 197, 19060, 3847, 7344, 198, 197, 9859, 220, 1465, 198, 197, 59403, 197, 197, 1, 7067, 1196, 448, 2697, 821, 788, 341, 298, 19060, 25, 38...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMsgAppFlowControlMoveForward(t *testing.T) { r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage()) r.becomeCandidate() r.becomeLeader() pr2 := r.prs.prs[2] // force the progress to be in replicate state pr2.becomeReplicate() // fill in the inflights window for i := 0; i < r.prs.maxInflight; i++ { r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}}) r.readMessages() } // 1 is noop, 2 is the first proposal we just sent. // so we start with 2. for tt := 2; tt < r.prs.maxInflight; tt++ { // move forward the window r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: uint64(tt)}) r.readMessages() // fill in the inflights window again r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}}) ms := r.readMessages() if len(ms) != 1 { t.Fatalf("#%d: len(ms) = %d, want 1", tt, len(ms)) } // ensure 1 if !pr2.ins.full() { t.Fatalf("inflights.full = %t, want %t", pr2.ins.full(), true) } // ensure 2 for i := 0; i < tt; i++ { r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: uint64(i)}) if !pr2.ins.full() { t.Fatalf("#%d: inflights.full = %t, want %t", tt, pr2.ins.full(), true) } } } }
explode_data.jsonl/81199
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 580 }
[ 2830, 3393, 6611, 2164, 18878, 3273, 9860, 25925, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 501, 2271, 55535, 723, 7, 16, 11, 3056, 2496, 21, 19, 90, 16, 11, 220, 17, 2137, 220, 20, 11, 220, 16, 11, 1532, 10642, 5793, 2398, 7000, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestNewLogsExporter_ProcessLogError(t *testing.T) { want := errors.New("my_error") me, err := NewLogsProcessor(testCfg, exportertest.NewNopLogsExporter(), newTestLProcessor(want)) require.NoError(t, err) assert.Equal(t, want, me.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) }
explode_data.jsonl/4020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 112 }
[ 2830, 3393, 3564, 51053, 88025, 70241, 2201, 1454, 1155, 353, 8840, 836, 8, 341, 50780, 1669, 5975, 7121, 445, 2408, 4096, 1138, 49294, 11, 1848, 1669, 1532, 51053, 22946, 8623, 42467, 11, 7485, 83386, 7121, 45, 453, 51053, 88025, 1507, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLexAlphaNumeric(t *testing.T) { l := lex("aaa zzz", nil) if !l.acceptToks(0, isAlphaNumeric, nil) { t.Errorf("unexpected alphanumeric") } token := l.popToken() if token.val != "aaa" { t.Errorf("expected 'aaa' but got '%s'", token.val) } }
explode_data.jsonl/81031
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 111 }
[ 2830, 3393, 47778, 19384, 36296, 1155, 353, 8840, 836, 8, 341, 8810, 1669, 22429, 445, 32646, 1147, 10400, 497, 2092, 340, 743, 753, 75, 28562, 51, 34222, 7, 15, 11, 374, 19384, 36296, 11, 2092, 8, 341, 197, 3244, 13080, 445, 53859, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_read_int64_overflow(t *testing.T) { should := require.New(t) input := "123456789123456789123456789123456789," iter := ParseString(ConfigDefault, input) iter.ReadInt64() should.NotNil(iter.Error) }
explode_data.jsonl/51195
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 6443, 4042, 21, 19, 79073, 1155, 353, 8840, 836, 8, 341, 197, 5445, 1669, 1373, 7121, 1155, 340, 22427, 1669, 330, 16, 17, 18, 19, 20, 21, 22, 23, 24, 16, 17, 18, 19, 20, 21, 22, 23, 24, 16, 17, 18, 19, 20, 21, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHyphenAsOption(t *testing.T) { var args struct { Foo string } err := parse("--foo -", &args) require.NoError(t, err) assert.Equal(t, "-", args.Foo) }
explode_data.jsonl/13067
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 71 }
[ 2830, 3393, 30816, 14769, 2121, 5341, 1155, 353, 8840, 836, 8, 341, 2405, 2827, 2036, 341, 197, 12727, 2624, 914, 198, 197, 532, 9859, 1669, 4715, 21549, 7975, 481, 497, 609, 2116, 340, 17957, 35699, 1155, 11, 1848, 340, 6948, 12808, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestGenerateDeployment(t *testing.T) { applicationName := "test-application" componentName := "test-component" namespace := "test-namespace" replicas := int32(1) otherReplicas := int32(3) k8slabels := map[string]string{ "app.kubernetes.io/name": componentName, "app.kubernetes.io/instance": componentName, "app.kubernetes.io/part-of": applicationName, "app.kubernetes.io/managed-by": "kustomize", "app.kubernetes.io/created-by": "application-service", } matchLabels := map[string]string{ "app.kubernetes.io/instance": componentName, } tests := []struct { name string component appstudiov1alpha1.Component wantDeployment appsv1.Deployment }{ { name: "Simple component, no optional fields set", component: appstudiov1alpha1.Component{ ObjectMeta: v1.ObjectMeta{ Name: componentName, Namespace: namespace, }, Spec: appstudiov1alpha1.ComponentSpec{ ComponentName: componentName, Application: applicationName, }, }, wantDeployment: appsv1.Deployment{ TypeMeta: v1.TypeMeta{ Kind: "Deployment", APIVersion: "apps/v1", }, ObjectMeta: v1.ObjectMeta{ Name: componentName, Namespace: namespace, Labels: k8slabels, }, Spec: appsv1.DeploymentSpec{ Replicas: &replicas, Selector: &v1.LabelSelector{ MatchLabels: matchLabels, }, Template: corev1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: matchLabels, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: "container-image", ImagePullPolicy: corev1.PullAlways, }, }, }, }, }, }, }, { name: "Component, optional fields set", component: appstudiov1alpha1.Component{ ObjectMeta: v1.ObjectMeta{ Name: componentName, Namespace: namespace, }, Spec: appstudiov1alpha1.ComponentSpec{ ComponentName: componentName, Application: applicationName, Replicas: 3, TargetPort: 5000, Build: appstudiov1alpha1.Build{ ContainerImage: "quay.io/test/test-image:latest", }, Env: []corev1.EnvVar{ { Name: "test", Value: "value", }, }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2M"), corev1.ResourceMemory: resource.MustParse("1Gi"), }, Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1M"), corev1.ResourceMemory: resource.MustParse("256Mi"), }, }, }, }, wantDeployment: appsv1.Deployment{ TypeMeta: v1.TypeMeta{ Kind: "Deployment", APIVersion: "apps/v1", }, ObjectMeta: v1.ObjectMeta{ Name: componentName, Namespace: namespace, Labels: k8slabels, }, Spec: appsv1.DeploymentSpec{ Replicas: &otherReplicas, Selector: &v1.LabelSelector{ MatchLabels: matchLabels, }, Template: corev1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: matchLabels, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: "container-image", Image: "quay.io/test/test-image:latest", ImagePullPolicy: corev1.PullAlways, Env: []corev1.EnvVar{ { Name: "test", Value: "value", }, }, Ports: []corev1.ContainerPort{ { ContainerPort: int32(5000), }, }, ReadinessProbe: &corev1.Probe{ InitialDelaySeconds: 10, PeriodSeconds: 10, Handler: corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{ Port: intstr.FromInt(5000), }, }, }, LivenessProbe: &corev1.Probe{ InitialDelaySeconds: 10, PeriodSeconds: 10, Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Port: intstr.FromInt(5000), Path: "/", }, }, }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2M"), corev1.ResourceMemory: resource.MustParse("1Gi"), }, Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1M"), corev1.ResourceMemory: resource.MustParse("256Mi"), }, }, }, }, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { generatedDeployment := generateDeployment(tt.component) if !reflect.DeepEqual(*generatedDeployment, tt.wantDeployment) { t.Errorf("TestGenerateDeployment() error: expected %v got %v", tt.wantDeployment, generatedDeployment) } }) } }
explode_data.jsonl/15090
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2561 }
[ 2830, 3393, 31115, 75286, 1155, 353, 8840, 836, 8, 341, 197, 5132, 675, 1669, 330, 1944, 92479, 698, 52228, 675, 1669, 330, 1944, 41387, 698, 56623, 1669, 330, 1944, 12, 2231, 698, 73731, 52210, 1669, 526, 18, 17, 7, 16, 340, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTaskStackConfig_Template(t *testing.T) { testCases := map[string]struct { mockReadParser func(m *mocks.MockReadParser) wantedTemplate string wantedError error }{ "should return error if unable to read": { mockReadParser: func(m *mocks.MockReadParser) { m.EXPECT().Parse(taskTemplatePath, gomock.Any()).Return(nil, errors.New("error reading template")) }, wantedError: errors.New("read template for task stack: error reading template"), }, "should return template body when present": { mockReadParser: func(m *mocks.MockReadParser) { m.EXPECT().Parse(taskTemplatePath, gomock.Any()).Return(&template.Content{ Buffer: bytes.NewBufferString("This is the task template"), }, nil) }, wantedTemplate: "This is the task template", }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockReadParser := mocks.NewMockReadParser(ctrl) if tc.mockReadParser != nil { tc.mockReadParser(mockReadParser) } taskInput := deploy.CreateTaskResourcesInput{} taskStackConfig := &taskStackConfig{ CreateTaskResourcesInput: &taskInput, parser: mockReadParser, } got, err := taskStackConfig.Template() if tc.wantedError != nil { require.EqualError(t, tc.wantedError, err.Error()) } else { require.Equal(t, tc.wantedTemplate, got) } }) } }
explode_data.jsonl/16272
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 548 }
[ 2830, 3393, 6262, 4336, 2648, 57917, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 77333, 4418, 6570, 2915, 1255, 353, 16712, 82, 24664, 4418, 6570, 692, 197, 6692, 7566, 7275, 914, 198, 197, 6692, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCallerSubscriberClient_GetERC20Balance(t *testing.T) { t.Parallel() expectedBig, _ := big.NewInt(0).SetString("100000000000000000000000000000000000000", 10) tests := []struct { name string input string expected *big.Int }{ {"small", "0x0100", big.NewInt(256)}, {"big", "0x4b3b4ca85a86c47a098a224000000000", expectedBig}, } for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { ethClientMock := new(mocks.CallerSubscriber) ethClient := &eth.CallerSubscriberClient{CallerSubscriber: ethClientMock} contractAddress := cltest.NewAddress() userAddress := cltest.NewAddress() functionSelector := eth.HexToFunctionSelector("0x70a08231") // balanceOf(address) data := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(userAddress.Bytes(), utils.EVMWordByteLen)) callArgs := eth.CallArgs{ To: contractAddress, Data: data, } ethClientMock.On("Call", mock.Anything, "eth_call", callArgs, "latest"). Return(nil). Run(func(args mock.Arguments) { res := args.Get(0).(*string) *res = test.input }) result, err := ethClient.GetERC20Balance(userAddress, contractAddress) assert.NoError(t, err) assert.NoError(t, err) assert.Equal(t, test.expected, result) }) } }
explode_data.jsonl/63847
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 519 }
[ 2830, 3393, 58735, 40236, 2959, 13614, 27434, 17, 15, 21190, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 42400, 15636, 11, 716, 1669, 2409, 7121, 1072, 7, 15, 568, 1649, 703, 445, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReflect(t *testing.T) { var str = "hello world" v := reflect.ValueOf(str) // 获取值 t.Log("value:", v) t.Log("value:", v.String()) // 获取类型 t.Log("type:", v.Type()) t.Log("kind:", v.Kind()) // 修改值 // 判断是否可以修改 canSet := v.CanSet() t.Log("can set:", canSet) // 如果想修改其值,必须传递的是指针 v = reflect.ValueOf(&str) v = v.Elem() v.SetString("new world") // 不可以直接修改 t.Log("value:", v) // 通过反射修改结构体 test := TestStruct{A: 23, B: "hello world"} s := reflect.ValueOf(&test).Elem() typeOfT := s.Type() for i := 0; i < s.NumField(); i++ { f := s.Field(i) t.Logf("%s: Type ==>%s Value==> %v \n", typeOfT.Field(i).Name, f.Type(), f.Interface()) } s.Field(0).SetInt(77) s.Field(1).SetString("new world") t.Logf("%+v", test) }
explode_data.jsonl/81131
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 442 }
[ 2830, 3393, 72789, 1155, 353, 8840, 836, 8, 1476, 2405, 607, 284, 330, 14990, 1879, 1837, 5195, 1669, 8708, 6167, 2124, 4199, 340, 197, 322, 40671, 25511, 198, 3244, 5247, 445, 957, 12147, 348, 340, 3244, 5247, 445, 957, 12147, 348, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestYamlConfig(t *testing.T) { // Arrange config := NewConfig() var configYaml = []byte(` # Connection properties connectionString: "connectionStringExample" schema: "public" # Define what tables should be used useAllTables: false selectedTables: - city - customer # Additional flags showAllConstraints: true outputFileName: "my-db.mmd" encloseWithMermaidBackticks: false # These connection strings are available as suggestions in the cli (use tab to access) connectionStringSuggestions: - suggestion1 - suggestion2 `) // Act viper.SetConfigType("yaml") err := viper.ReadConfig(bytes.NewBuffer(configYaml)) // Assert assert.Nil(t, err) assert.Equal(t, "connectionStringExample", config.ConnectionString()) assert.Equal(t, "public", config.Schema()) assert.Equal(t, false, config.UseAllTables()) assert.ElementsMatch(t, []string{"city", "customer"}, config.SelectedTables()) assert.Equal(t, true, config.ShowAllConstraints()) assert.Equal(t, "my-db.mmd", config.OutputFileName()) assert.Equal(t, false, config.EncloseWithMermaidBackticks()) assert.ElementsMatch(t, []string{"suggestion1", "suggestion2"}, config.ConnectionStringSuggestions()) }
explode_data.jsonl/77481
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 390 }
[ 2830, 3393, 56, 9467, 2648, 1155, 353, 8840, 836, 8, 341, 197, 322, 40580, 198, 25873, 1669, 1532, 2648, 741, 2405, 2193, 56, 9467, 284, 3056, 3782, 61528, 2, 11032, 5888, 198, 7742, 703, 25, 330, 7742, 703, 13314, 698, 17349, 25, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFormatterFaint(test *testing.T) { formatted, err := formatter.Format("{faint}text{faint | off}") assert.NoError(test, err) assert.Equal(test, "\x1b[2mtext\x1b[22m", formatted) }
explode_data.jsonl/39770
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 78 }
[ 2830, 3393, 14183, 37, 1641, 8623, 353, 8840, 836, 8, 341, 37410, 12127, 11, 1848, 1669, 24814, 9978, 13976, 69, 1641, 92, 1318, 90, 69, 1641, 760, 1007, 55266, 6948, 35699, 8623, 11, 1848, 340, 6948, 12808, 8623, 11, 2917, 87, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestTrieDB_UnsubscribeAll(t *testing.T) { a := assert.New(t) db := NewStore() tt := []struct { clientID string topic packets.Topic }{ {clientID: "id0", topic: packets.Topic{Name: "name0", Qos: packets.QOS_0}}, {clientID: "id1", topic: packets.Topic{Name: "name1", Qos: packets.QOS_1}}, {clientID: "id2", topic: packets.Topic{Name: "name2", Qos: packets.QOS_2}}, {clientID: "id0", topic: packets.Topic{Name: "name3", Qos: packets.QOS_2}}, } for _, v := range tt { db.Subscribe(v.clientID, v.topic) } removedCid := "id0" db.UnsubscribeAll(removedCid) for _, v := range tt { if v.clientID == removedCid { rs := db.getMatchedTopicFilter(v.topic.Name) if _, ok := rs[v.clientID]; ok { t.Fatalf("remove error") } continue } got := db.userIndex[v.clientID][v.topic.Name] a.Equal(got.topicName, v.topic.Name) a.Equal(got.clients[v.clientID], v.topic.Qos) rs := db.getMatchedTopicFilter(v.topic.Name) a.Equal(rs[v.clientID][0].Qos, v.topic.Qos) } }
explode_data.jsonl/80975
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 460 }
[ 2830, 3393, 51, 7231, 3506, 40687, 9384, 2403, 1155, 353, 8840, 836, 8, 341, 11323, 1669, 2060, 7121, 1155, 340, 20939, 1669, 1532, 6093, 741, 3244, 83, 1669, 3056, 1235, 341, 197, 25291, 915, 914, 198, 197, 3244, 24810, 262, 27035, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestEscapeValue(t *testing.T) { type args struct { value string } tests := []struct { name string args args want string }{ { name: "case1", args: args{ value: "jdbc.url=jdbc:mysql", }, want: "jdbc.url=jdbc:mysql", }, { name: "case2", args: args{ value: "jdbc.url = jdbc:mysql", }, want: "jdbc.url\\ =\\ jdbc:mysql", }, { name: "escape multiline value", args: args{ value: "line1\nline2", }, want: "line1\\nline2", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := escapeValue(tt.args.value); got != tt.want { t.Errorf("escape() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/77995
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 359 }
[ 2830, 3393, 48124, 1130, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 16309, 914, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 31215, 2827, 198, 197, 50780, 914, 198, 197, 59403, 197, 197, 515,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStaircase(t *testing.T) { t.Parallel() // 1,1,1 // 2,1 // 1,2 // 3 n := 3 want := 4 got := algo.Staircase(n) if want != got { t.Fatalf("want: %d, got: %d", want, got) } }
explode_data.jsonl/73129
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 102 }
[ 2830, 3393, 623, 1310, 5638, 1155, 353, 8840, 836, 8, 1476, 3244, 41288, 7957, 2822, 197, 322, 220, 16, 11, 16, 11, 16, 198, 197, 322, 220, 17, 11, 16, 198, 197, 322, 220, 16, 11, 17, 198, 197, 322, 220, 18, 271, 9038, 1669, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_findContainer(t *testing.T) { type args struct { name string containers []corev1.Container } tests := []struct { name string args args wantErr bool }{ { name: "Case 1 - Find the container", args: args{ name: "foo", containers: []corev1.Container{ { Name: "foo", VolumeMounts: []corev1.VolumeMount{ { MountPath: "/tmp", Name: "test-pvc", }, }, }, }, }, wantErr: false, }, { name: "Case 2 - Error if container not found", args: args{ name: "foo2", containers: []corev1.Container{ { Name: "foo", VolumeMounts: []corev1.VolumeMount{ { MountPath: "/tmp", Name: "test-pvc", }, }, }, }, }, wantErr: true, }, { name: "Case 3 - Error when passing in blank container name", args: args{ name: "", containers: []corev1.Container{ { Name: "foo", VolumeMounts: []corev1.VolumeMount{ { MountPath: "/tmp", Name: "test-pvc", }, }, }, }, }, wantErr: true, }, { name: "Case 4 - Check against multiple containers (rather than one)", args: args{ name: "foo", containers: []corev1.Container{ { Name: "bar", VolumeMounts: []corev1.VolumeMount{ { MountPath: "/tmp", Name: "test-pvc", }, }, }, { Name: "foo", VolumeMounts: []corev1.VolumeMount{ { MountPath: "/tmp", Name: "test-pvc", }, }, }, }, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Run function findContainer container, err := findContainer(tt.args.containers, tt.args.name) // Check that the container matches the name if err == nil && container.Name != tt.args.name { t.Errorf("Wrong container returned, wanted container %v, got %v", tt.args.name, container.Name) } if err == nil && tt.wantErr { t.Error("test failed, expected: false, got true") } else if err != nil && !tt.wantErr { t.Errorf("test failed, expected: no error, got error: %s", err.Error()) } }) } }
explode_data.jsonl/65179
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1167 }
[ 2830, 3393, 21814, 4502, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 11609, 981, 914, 198, 197, 197, 39399, 3056, 98645, 16, 33672, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 262, 914, 198, 197, 31215, 262...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func Test_rob(t *testing.T) { type args struct { nums []int } tests := []struct { name string args args want int }{ {"", args{[]int{2, 3, 2}}, 3}, {"", args{[]int{1, 2, 3, 1}}, 4}, {"", args{[]int{0}}, 0}, {"", args{[]int{1}}, 1}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := rob(tt.args.nums); got != tt.want { t.Errorf("rob() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/23579
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 221 }
[ 2830, 3393, 62, 22740, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 22431, 82, 3056, 396, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 31215, 2827, 198, 197, 50780, 526, 198, 197, 59403, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetAllTodosEmpty(t *testing.T) { t.Log(`Should get an empty list of todos`) mockDB, mock, err := sqlmock.New() if err != nil { t.Error(err) t.Fail() } defer mockDB.Close() userID := uint(1) prevID := uint(0) rows := sqlmock.NewRows(todoTableRows) mock.ExpectQuery(`SELECT \* FROM todos WHERE.+`). WithArgs(userID, resultsPerPage, prevID). WillReturnRows(rows) db := DB{mockDB} todos, err := db.GetAllTodos(userID, prevID) if err != nil { t.Errorf("failed to get todo list") t.Fail() } if len(todos) != 0 { t.Errorf("failed to get empty todo list") t.Fail() } if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expectations: %s", err) } }
explode_data.jsonl/44576
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 311 }
[ 2830, 3393, 1949, 2403, 42147, 3522, 1155, 353, 8840, 836, 8, 341, 3244, 5247, 5809, 14996, 633, 458, 4287, 1140, 315, 15519, 24183, 77333, 3506, 11, 7860, 11, 1848, 1669, 5704, 16712, 7121, 741, 743, 1848, 961, 2092, 341, 197, 3244, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestDelayedTask_EmptyStop(t *testing.T) { givenDelay := shortDuration stopSleep := duration tests := []struct { name string runner func(task core.DelayedTask, delay time.Duration) }{ {"RunWait", func(task core.DelayedTask, delay time.Duration) { _ = task.RunWait(context.Background()) }}, {"RunAsync", func(task core.DelayedTask, delay time.Duration) { task.RunAsync() }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { delayer := newDelayer() task := delayer.NewTask(givenDelay, nil) stopperChan := make(chan time.Time, 2) go func() { time.Sleep(stopSleep) stopperChan <- time.Now() // stopperStart delayer.Stop(duration, duration) stopperChan <- time.Now() // stopperEnd }() tt.runner(task, givenDelay) stopperStart := <-stopperChan stopperEnd := <-stopperChan stop := stopperEnd.Sub(stopperStart) assert.Assert(t, stop < shortDuration, "delayer should stop immediately") }) } }
explode_data.jsonl/66930
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 377 }
[ 2830, 3393, 57361, 6262, 76060, 1595, 10674, 1155, 353, 8840, 836, 8, 341, 3174, 2071, 20039, 1669, 2805, 12945, 198, 62644, 41745, 1669, 8090, 271, 78216, 1669, 3056, 1235, 341, 197, 11609, 256, 914, 198, 197, 197, 41736, 2915, 17483, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_Encode(t *testing.T) { type TestString struct { ID uint64 `json:"id,string"` Value string `json:"value"` PID uint64 `json:"pid"` PartID uint64 `json:",string"` } tests := []struct { name string input TestString want []byte wantErr bool }{ { name: "none", input: TestString{}, want: []byte(`{"id":"","value":"","pid":0,"PartID":""}`), wantErr: false, }, { name: "num", input: TestString{ ID: 787446465166, }, want: []byte(`{"id":"787446465166","value":"","pid":0,"PartID":""}`), wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(tt.input) if (err != nil) != tt.wantErr { t.Errorf("Marshal() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("Marshal() got = %s, want %s", got, tt.want) } }) } }
explode_data.jsonl/46081
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 468 }
[ 2830, 3393, 93529, 534, 1155, 353, 8840, 836, 8, 341, 13158, 3393, 703, 2036, 341, 197, 29580, 257, 2622, 21, 19, 1565, 2236, 2974, 307, 37058, 8805, 197, 47399, 220, 914, 1565, 2236, 2974, 957, 8805, 197, 10025, 915, 262, 2622, 21, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_AWSRedshiftAtRestEncryption(t *testing.T) { expectedCode := "aws-redshift-encryption-customer-key" var tests = []struct { name string source string mustIncludeResultCode string mustExcludeResultCode string }{ { name: "redshift cluster without encryption fails check", source: ` resource "aws_redshift_cluster" "bad_example" { cluster_identifier = "tf-redshift-cluster" database_name = "mydb" master_username = "foo" master_password = "Mustbe8characters" node_type = "dc1.large" cluster_type = "single-node" } `, mustIncludeResultCode: expectedCode, }, { name: "redshift cluster with encryption disabled fails check", source: ` resource "aws_redshift_cluster" "bad_example" { cluster_identifier = "tf-redshift-cluster" database_name = "mydb" master_username = "foo" master_password = "Mustbe8characters" node_type = "dc1.large" cluster_type = "single-node" encrypted = false } `, mustIncludeResultCode: expectedCode, }, { name: "redshift cluster with encryption enabled but no CMK specified fails check", source: ` resource "aws_redshift_cluster" "bad_example" { cluster_identifier = "tf-redshift-cluster" database_name = "mydb" master_username = "foo" master_password = "Mustbe8characters" node_type = "dc1.large" cluster_type = "single-node" encrypted = true } `, mustIncludeResultCode: expectedCode, }, { name: "redshift cluster with encryption enabled and CMK specified passes check", source: ` resource "aws_kms_key" "redshift" { enable_key_rotation = true } resource "aws_redshift_cluster" "good_example" { cluster_identifier = "tf-redshift-cluster" database_name = "mydb" master_username = "foo" master_password = "Mustbe8characters" node_type = "dc1.large" cluster_type = "single-node" encrypted = true kms_key_id = aws_kms_key.redshift.key_id } `, mustExcludeResultCode: expectedCode, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { results := testutil.ScanHCL(test.source, t) testutil.AssertCheckCode(t, test.mustIncludeResultCode, test.mustExcludeResultCode, results) }) } }
explode_data.jsonl/74260
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 960 }
[ 2830, 3393, 1566, 7433, 6033, 13418, 1655, 12416, 79239, 1155, 353, 8840, 836, 8, 341, 42400, 2078, 1669, 330, 8635, 31598, 13418, 12, 79937, 1786, 4394, 16173, 1837, 2405, 7032, 284, 3056, 1235, 341, 197, 11609, 1698, 914, 198, 197, 47...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWatch(t *testing.T) { b, tmpPath := backend.NewDefaultTmpBackend() s := newWatchableStore(b, &lease.FakeLessor{}, nil) defer func() { s.store.Close() os.Remove(tmpPath) }() testKey := []byte("foo") testValue := []byte("bar") s.Put(testKey, testValue, lease.NoLease) w := s.NewWatchStream() w.Watch(testKey, nil, 0) if !s.synced.contains(string(testKey)) { // the key must have had an entry in synced t.Errorf("existence = false, want true") } }
explode_data.jsonl/12639
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 191 }
[ 2830, 3393, 14247, 1155, 353, 8840, 836, 8, 341, 2233, 11, 4174, 1820, 1669, 19163, 7121, 3675, 35986, 29699, 741, 1903, 1669, 501, 14247, 480, 6093, 1883, 11, 609, 1623, 991, 726, 43, 8309, 22655, 2092, 692, 16867, 2915, 368, 341, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFormatStore_GetBulk(t *testing.T) { t.Run("Fail to format key", func(t *testing.T) { provider := formattedstore.NewProvider(mem.NewProvider(), &mockFormatter{errFormat: errors.New("key formatting failure"), useDeterministicKeyFormatting: true}) require.NotNil(t, provider) store, err := provider.OpenStore("StoreName") require.NoError(t, err) require.NotNil(t, store) value, err := store.GetBulk("KeyName") require.EqualError(t, err, `failed to get values stored under deterministically generated keys: `+ `failed to format key "KeyName": key formatting failure`) require.Nil(t, value) }) }
explode_data.jsonl/28242
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 4061, 6093, 13614, 88194, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 19524, 311, 3561, 1376, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 197, 19979, 1669, 23126, 4314, 7121, 5179, 39908, 7121, 5179, 3148, 298, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSortHeaders(t *testing.T) { testSortHeaders(t, "br, gzip, deflate", "br,deflate,gzip") testSortHeaders(t, "br, gzip, deflate", "br,deflate,gzip") testSortHeaders(t, "gzip,br,deflate", "br,deflate,gzip") testSortHeaders(t, "gzip", "gzip") testSortHeaders(t, "deflate, gzip, br", "br,deflate,gzip") }
explode_data.jsonl/31697
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 10231, 10574, 1155, 353, 8840, 836, 8, 341, 18185, 10231, 10574, 1155, 11, 330, 1323, 11, 57795, 11, 92689, 497, 330, 1323, 11, 750, 5075, 21644, 9964, 1138, 18185, 10231, 10574, 1155, 11, 330, 1323, 11, 257, 57795, 11, 92...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGenesis(t *testing.T) { genesisState := types.GenesisState{ // this line is used by starport scaffolding # genesis/test/state } k, ctx := keepertest.RvoteKeeper(t) rvote.InitGenesis(ctx, *k, genesisState) got := rvote.ExportGenesis(ctx, *k) require.NotNil(t, got) // this line is used by starport scaffolding # genesis/test/assert }
explode_data.jsonl/10873
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 128 }
[ 2830, 3393, 84652, 1155, 353, 8840, 836, 8, 341, 82281, 13774, 1397, 1669, 4494, 65384, 13774, 1397, 515, 197, 197, 322, 419, 1555, 374, 1483, 553, 6774, 403, 56150, 14995, 671, 59366, 12697, 63796, 198, 197, 630, 16463, 11, 5635, 1669,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMatchGTPsToRollout(t *testing.T) { rollout := argo.Rollout{} rollout.Namespace = "namespace" rollout.Name = "fake-app-rollout-qal" rollout.CreationTimestamp = v1.Now() rollout.Spec = argo.RolloutSpec{ Template: corev1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{"identity": "app1", "env": "qal"}, }, }, } rollout.Labels = map[string]string{"identity": "app1"} otherEnvRollout := argo.Rollout{} otherEnvRollout.Namespace = "namespace" otherEnvRollout.Name = "fake-app-rollout-qal" otherEnvRollout.CreationTimestamp = v1.Now() otherEnvRollout.Spec = argo.RolloutSpec{ Template: corev1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{"identity": "app1", "env": "random"}, }, }, } otherEnvRollout.Labels = map[string]string{"identity": "app1"} noEnvRollout := argo.Rollout{} noEnvRollout.Namespace = "namespace" noEnvRollout.Name = "fake-app-rollout-qal" noEnvRollout.CreationTimestamp = v1.Now() noEnvRollout.Spec = argo.RolloutSpec{ Template: corev1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{"identity": "app1"}, }, }, } noEnvRollout.Labels = map[string]string{"identity": "app1"} e2eGtp := v12.GlobalTrafficPolicy{} e2eGtp.CreationTimestamp = v1.Now() e2eGtp.Labels = map[string]string{"identity": "app1", "env": "e2e"} e2eGtp.Namespace = "namespace" e2eGtp.Name = "myGTP-e2e" prfGtp := v12.GlobalTrafficPolicy{} prfGtp.CreationTimestamp = v1.Now() prfGtp.Labels = map[string]string{"identity": "app1", "env": "prf"} prfGtp.Namespace = "namespace" prfGtp.Name = "myGTP-prf" qalGtp := v12.GlobalTrafficPolicy{} qalGtp.CreationTimestamp = v1.Now() qalGtp.Labels = map[string]string{"identity": "app1", "env": "qal"} qalGtp.Namespace = "namespace" qalGtp.Name = "myGTP" qalGtpOld := v12.GlobalTrafficPolicy{} qalGtpOld.CreationTimestamp = v1.Date(2020, 1, 1, 1, 1, 1, 1, time.UTC) qalGtpOld.Labels = map[string]string{"identity": "app1", "env": "qal"} qalGtpOld.Namespace = "namespace" qalGtpOld.Name = "myGTP" noEnvGTP := v12.GlobalTrafficPolicy{} noEnvGTP.CreationTimestamp = v1.Now() noEnvGTP.Labels = map[string]string{"identity": "app1"} noEnvGTP.Namespace = "namespace" noEnvGTP.Name = "myGTP" noEnvGTPOld := v12.GlobalTrafficPolicy{} noEnvGTPOld.CreationTimestamp = v1.Date(2020, 1, 1, 1, 1, 1, 1, time.UTC) noEnvGTPOld.Labels = map[string]string{"identity": "app1"} noEnvGTPOld.Namespace = "namespace" noEnvGTPOld.Name = "myGTP" testCases := []struct { name string gtp *[]v12.GlobalTrafficPolicy rollout *argo.Rollout expectedGTP *v12.GlobalTrafficPolicy }{ { name: "Should return no rollout when none have a matching env", gtp: &[]v12.GlobalTrafficPolicy{e2eGtp, prfGtp, qalGtp, qalGtpOld}, rollout: &otherEnvRollout, expectedGTP: nil, }, { name: "Should return no rollout when the GTP doesn't have an environment", gtp: &[]v12.GlobalTrafficPolicy{noEnvGTP, noEnvGTPOld}, rollout: &otherEnvRollout, expectedGTP: nil, }, { name: "Should return no rollout when no rollouts have an environment", gtp: &[]v12.GlobalTrafficPolicy{e2eGtp, prfGtp}, rollout: &noEnvRollout, expectedGTP: nil, }, { name: "Should match a GTP and rollout when both have no env label", gtp: &[]v12.GlobalTrafficPolicy{e2eGtp, prfGtp, qalGtp, qalGtpOld, noEnvGTP, noEnvGTPOld}, rollout: &noEnvRollout, expectedGTP: &noEnvGTPOld, }, { name: "Should return Match when there's one match", gtp: &[]v12.GlobalTrafficPolicy{qalGtp}, rollout: &rollout, expectedGTP: &qalGtp, }, { name: "Should return Match when there's one match from a bigger list", gtp: &[]v12.GlobalTrafficPolicy{e2eGtp, prfGtp, qalGtp}, rollout: &rollout, expectedGTP: &qalGtp, }, { name: "Should handle multiple matches properly", gtp: &[]v12.GlobalTrafficPolicy{e2eGtp, prfGtp, qalGtp, qalGtpOld}, rollout: &rollout, expectedGTP: &qalGtpOld, }, { name: "Should return nil when there's no match", gtp: &[]v12.GlobalTrafficPolicy{}, rollout: &rollout, expectedGTP: nil, }, { name: "Should return nil the rollout is invalid", gtp: &[]v12.GlobalTrafficPolicy{}, rollout: &argo.Rollout{}, expectedGTP: nil, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { returned := MatchGTPsToRollout(*c.gtp, c.rollout) if !cmp.Equal(returned, c.expectedGTP, ignoreUnexported) { t.Fatalf("Rollout mismatch. Diff: %v", cmp.Diff(returned, c.expectedGTP, ignoreUnexported)) } }) } }
explode_data.jsonl/73043
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2236 }
[ 2830, 3393, 8331, 38, 4239, 82, 1249, 32355, 411, 1155, 353, 8840, 836, 8, 341, 197, 1100, 411, 1669, 1392, 78, 71212, 411, 16094, 197, 1100, 411, 46011, 284, 330, 2231, 698, 197, 1100, 411, 2967, 284, 330, 30570, 20023, 86760, 411, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_controller_UpdateIntegration(t *testing.T) { var ( integrationStartWg sync.WaitGroup applies, starts atomic.Uint64 ) mockIntegration := mockUpdateIntegration{ Integration: FuncIntegration(func(ctx context.Context) error { starts.Inc() integrationStartWg.Done() <-ctx.Done() return nil }), ApplyConfigFunc: func(Config, Globals) error { applies.Inc() return nil }, } cfg := controllerConfig{ mockConfig{ NameFunc: func() string { return mockIntegrationName }, ConfigEqualsFunc: func(Config) bool { return false }, ApplyDefaultsFunc: func(g Globals) error { return nil }, IdentifierFunc: func(Globals) (string, error) { return mockIntegrationName, nil }, NewIntegrationFunc: func(log.Logger, Globals) (Integration, error) { integrationStartWg.Add(1) return mockIntegration, nil }, }, } ctrl, err := newController(util.TestLogger(t), cfg, Globals{}) require.NoError(t, err, "failed to create controller") sc := newSyncController(t, ctrl) // Wait for our integration to start. integrationStartWg.Wait() // Try to apply again. require.NoError(t, sc.UpdateController(cfg, ctrl.globals), "failed to re-apply config") integrationStartWg.Wait() sc.Stop() require.Equal(t, uint64(1), applies.Load(), "dynamic reload should have occurred") require.Equal(t, uint64(1), starts.Load(), "restart should not have occurred") }
explode_data.jsonl/52847
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 531 }
[ 2830, 3393, 21600, 47393, 52464, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 2084, 17376, 3479, 54, 70, 12811, 28384, 2808, 198, 197, 69898, 7202, 11, 8471, 262, 24510, 71869, 21, 19, 198, 197, 692, 77333, 52464, 1669, 7860, 4289, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCLI(t *testing.T) { if testing.Short() { t.Skip("skipping integration test in short mode.") return } // Define the reusable constraints to be use for the test cases. type constraint struct { name string wantViolation bool wantOutputRegex string } // Currently, we only test one rule. Moving forward, resource specific rules // should be added to increase the coverage. alwaysViolate := constraint{name: "always_violate", wantViolation: true, wantOutputRegex: "Constraint GCPAlwaysViolatesConstraintV1.always_violates_all on resource"} // Test cases for each type of resource is defined here. cases := []struct { name string constraints []constraint }{ {name: "bucket"}, {name: "bucket_iam"}, {name: "disk"}, {name: "firewall"}, {name: "instance"}, {name: "sql"}, {name: "example_bigquery_dataset"}, {name: "example_compute_disk"}, {name: "example_compute_firewall"}, {name: "example_compute_instance"}, {name: "example_container_cluster"}, {name: "example_organization_iam_binding"}, {name: "example_organization_iam_member"}, {name: "example_organization_iam_policy"}, {name: "example_project"}, {name: "example_project_in_org"}, {name: "example_project_in_folder"}, {name: "example_project_iam"}, {name: "example_project_iam_binding"}, {name: "example_project_iam_member"}, {name: "example_project_iam_policy"}, {name: "example_sql_database_instance"}, {name: "example_storage_bucket"}, {name: "full_compute_firewall"}, {name: "full_compute_instance"}, {name: "full_container_cluster"}, {name: "full_container_node_pool"}, {name: "full_sql_database_instance"}, {name: "full_storage_bucket"}, } for i := range cases { // Allocate a variable to make sure test can run in parallel. c := cases[i] // Add default constraints if not set. if len(c.constraints) == 0 { c.constraints = []constraint{alwaysViolate} } // Test both offline and online mode. for _, offline := range []bool{true, false} { t.Run(fmt.Sprintf("v=%s/tf=%s/offline=%t", version.LeastSupportedVersion(), c.name, offline), func(t *testing.T) { t.Parallel() // Create a temporary directory for running terraform. dir, err := ioutil.TempDir(tmpDir, "terraform") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) // Generate the <name>.tf and <name>_assets.json files into the temporary directory. generateTestFiles(t, "../testdata/templates", dir, c.name+".tf") generateTestFiles(t, "../testdata/templates", dir, c.name+".json") terraform(t, dir, c.name) t.Run("cmd=convert", func(t *testing.T) { testConvertCommand(t, dir, c.name, offline) }) for _, ct := range c.constraints { t.Run(fmt.Sprintf("cmd=validate/constraint=%s", ct.name), func(t *testing.T) { testValidateCommand(t, ct.wantViolation, ct.wantOutputRegex, dir, c.name, offline, ct.name) }) } }) } } }
explode_data.jsonl/55802
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1160 }
[ 2830, 3393, 63959, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 4886, 5654, 17590, 1273, 304, 2805, 3856, 13053, 197, 853, 198, 197, 532, 197, 322, 18614, 279, 61571, 16982, 311, 387, 990, 369, 279,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_daemon(t *testing.T) { rootDir := "testdata/snapshot" err := os.MkdirAll(rootDir, 0755) require.Nil(t, err) defer func() { _ = os.RemoveAll(rootDir) }() db, err := NewDatabase(rootDir) require.Nil(t, err) ctx := context.TODO() // Add daemons d1 := daemon.Daemon{ID: "d1"} d2 := daemon.Daemon{ID: "d2"} d3 := daemon.Daemon{ID: "d3"} err = db.SaveDaemon(ctx, &d1) require.Nil(t, err) err = db.SaveDaemon(ctx, &d2) require.Nil(t, err) db.SaveDaemon(ctx, &d3) require.Nil(t, err) // duplicate daemon id should fail err = db.SaveDaemon(ctx, &d1) require.Error(t, err) // Delete one daemon err = db.DeleteDaemon(ctx, "d2") require.Nil(t, err) // Check records ids := make(map[string]string) err = db.WalkDaemons(ctx, func(info *daemon.Daemon) error { ids[info.ID] = "" return nil }) _, ok := ids["d1"] require.Equal(t, ok, true) _, ok = ids["d2"] require.Equal(t, ok, false) _, ok = ids["d3"] require.Equal(t, ok, true) // Cleanup records err = db.Cleanup(ctx) require.Nil(t, err) ids2 := make([]string, 0) err = db.WalkDaemons(ctx, func(info *daemon.Daemon) error { ids2 = append(ids2, info.ID) return nil }) require.Nil(t, err) require.Equal(t, len(ids2), 0) }
explode_data.jsonl/79283
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 564 }
[ 2830, 3393, 47070, 7291, 1155, 353, 8840, 836, 8, 341, 33698, 6184, 1669, 330, 92425, 2687, 9601, 698, 9859, 1669, 2643, 1321, 12438, 2403, 9206, 6184, 11, 220, 15, 22, 20, 20, 340, 17957, 59678, 1155, 11, 1848, 340, 16867, 2915, 368,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadConfig(t *testing.T) { factories, err := componenttest.ExampleComponents() assert.NoError(t, err) factory := NewFactory() factories.Exporters[typeStr] = factory cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) require.NoError(t, err) require.NotNil(t, cfg) apiConfig := cfg.Exporters["datadog/api"].(*Config) err = apiConfig.Sanitize() require.NoError(t, err) assert.Equal(t, &Config{ ExporterSettings: configmodels.ExporterSettings{ NameVal: "datadog/api", TypeVal: typeStr, }, TagsConfig: TagsConfig{ Hostname: "customhostname", Env: "prod", Service: "myservice", Version: "myversion", Tags: []string{"example:tag"}, }, API: APIConfig{ Key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Site: "datadoghq.eu", }, Metrics: MetricsConfig{ Namespace: "opentelemetry.", TCPAddr: confignet.TCPAddr{ Endpoint: "https://api.datadoghq.eu", }, }, }, apiConfig) invalidConfig2 := cfg.Exporters["datadog/invalid"].(*Config) err = invalidConfig2.Sanitize() require.Error(t, err) }
explode_data.jsonl/32910
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 465 }
[ 2830, 3393, 5879, 2648, 1155, 353, 8840, 836, 8, 341, 1166, 52893, 11, 1848, 1669, 3692, 1944, 5121, 1516, 10443, 741, 6948, 35699, 1155, 11, 1848, 692, 1166, 2919, 1669, 1532, 4153, 741, 1166, 52893, 81077, 388, 21557, 2580, 60, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFunction(t *testing.T) { testStr := `<?php function TestFn($arg) { echo $arg; } $var = TestFn("world", 0);` p := NewParser() p.disableScoping = true a, _ := p.Parse("test.php", testStr) tree := []ast.Node{ &ast.FunctionStmt{ FunctionDefinition: &ast.FunctionDefinition{ Name: "TestFn", Arguments: []*ast.FunctionArgument{ { Variable: ast.NewVariable("arg"), }, }, }, Body: &ast.Block{ Statements: []ast.Statement{ast.Echo(ast.NewVariable("arg"))}, }, }, ast.ExprStmt{ Expr: ast.AssignmentExpr{ Assignee: ast.NewVariable("var"), Value: &ast.FunctionCallExpr{ FunctionName: &ast.Identifier{Value: "TestFn"}, Arguments: []ast.Expr{ &ast.Literal{Type: ast.String, Value: `"world"`}, &ast.Literal{Type: ast.Float, Value: "0"}, }, }, Operator: "=", }, }, } if len(a.Nodes) != 2 { t.Fatalf("Function did not correctly parse") } if !assertEquals(a.Nodes[0], tree[0]) { t.Fatalf("Function did not correctly parse") } if !assertEquals(a.Nodes[1], tree[1]) { t.Fatalf("Function assignment did not correctly parse") } }
explode_data.jsonl/28435
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 531 }
[ 2830, 3393, 5152, 1155, 353, 8840, 836, 8, 341, 18185, 2580, 1669, 1565, 1316, 1208, 198, 262, 729, 3393, 24911, 699, 858, 8, 341, 414, 1687, 400, 858, 280, 262, 456, 262, 400, 947, 284, 3393, 24911, 445, 14615, 497, 220, 15, 1215, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRelationshipToString(t *testing.T) { rel := neo4j.Relationship{ StartId: 10, EndId: 11, Id: 2, Type: `Something`, Props: map[string]interface{}{ "Prop1": 2, "Prop2": time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC), }, } actual := input.ToString(rel) expected := `[:Something {"Prop1":2,"Prop2":"2020-01-02T03:04:05.000000006Z"}]` if actual != expected { t.Fatalf(`expected '%v' but got '%v'`, expected, actual) } }
explode_data.jsonl/7076
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 209 }
[ 2830, 3393, 50822, 5870, 1155, 353, 8840, 836, 8, 341, 197, 3748, 1669, 35082, 19, 73, 38939, 15471, 515, 197, 65999, 764, 25, 220, 16, 15, 345, 197, 38407, 764, 25, 256, 220, 16, 16, 345, 197, 67211, 25, 414, 220, 17, 345, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestInsufficientCapacityNode(t *testing.T) { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { closeFn, dc, informers, clientset := setup(t) defer closeFn() ns := framework.CreateNamespaceOrDie(clientset, "insufficient-capacity", t) defer framework.DeleteNamespaceOrDie(clientset, ns, t) dsClient := clientset.AppsV1().DaemonSets(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name) podInformer := informers.Core().V1().Pods().Informer() nodeClient := clientset.CoreV1().Nodes() ctx, cancel := context.WithCancel(context.Background()) defer cancel() informers.Start(ctx.Done()) go dc.Run(ctx, 2) // Start Scheduler setupScheduler(ctx, t, clientset, informers) ds := newDaemonSet("foo", ns.Name) ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m") ds.Spec.UpdateStrategy = *strategy ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } defer cleanupDaemonSets(t, clientset, ds) node := newNode("node-with-limited-memory", nil) node.Status.Allocatable = allocatableResources("100M", "200m") _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } if err := waitForPodsCreated(podInformer, 1); err != nil { t.Errorf("Failed to wait for pods created: %v", err) } objects := podInformer.GetIndexer().List() for _, object := range objects { pod := object.(*v1.Pod) if err := waitForPodUnschedulable(clientset, pod); err != nil { t.Errorf("Failed to wait for unschedulable status of pod %+v", pod) } } node1 := newNode("node-with-enough-memory", nil) node1.Status.Allocatable = allocatableResources("200M", "2000m") _, err = nodeClient.Create(context.TODO(), node1, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } // 2 pods are created. But only one of two Pods is scheduled by default scheduler. validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t) validateDaemonSetStatus(dsClient, ds.Name, 1, t) }) }
explode_data.jsonl/70043
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 837 }
[ 2830, 3393, 15474, 26683, 29392, 1955, 1155, 353, 8840, 836, 8, 341, 2023, 4854, 19816, 1155, 11, 2915, 1155, 353, 8840, 836, 11, 8282, 353, 27635, 909, 64, 7291, 1649, 4289, 19816, 8, 341, 197, 27873, 24911, 11, 19402, 11, 6051, 388,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestReadIndexFormatV1(t *testing.T) { /* The block here was produced at commit 07ef80820ef1250db82f9544f3fcf7f0f63ccee0 with: db, _ := Open("v1db", nil, nil, nil) app := db.Appender() app.Add(labels.FromStrings("foo", "bar"), 1, 2) app.Add(labels.FromStrings("foo", "baz"), 3, 4) app.Add(labels.FromStrings("foo", "meh"), 1000*3600*4, 4) // Not in the block. app.Commit() db.compact() db.Close() */ blockDir := filepath.Join("testdata", "index_format_v1") block, err := OpenBlock(nil, blockDir, nil) testutil.Ok(t, err) q, err := NewBlockQuerier(block, 0, 1000) testutil.Ok(t, err) testutil.Equals(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")), map[string][]tsdbutil.Sample{`{foo="bar"}`: []tsdbutil.Sample{sample{t: 1, v: 2}}}) q, err = NewBlockQuerier(block, 0, 1000) testutil.Ok(t, err) testutil.Equals(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.$")), map[string][]tsdbutil.Sample{ `{foo="bar"}`: []tsdbutil.Sample{sample{t: 1, v: 2}}, `{foo="baz"}`: []tsdbutil.Sample{sample{t: 3, v: 4}}, }) }
explode_data.jsonl/9637
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 494 }
[ 2830, 3393, 4418, 1552, 4061, 53, 16, 1155, 353, 8840, 836, 8, 341, 197, 1057, 576, 2504, 1588, 572, 8947, 518, 5266, 198, 8689, 220, 15, 22, 823, 23, 15, 23, 17, 15, 823, 16, 17, 20, 15, 1999, 23, 17, 69, 24, 20, 19, 19, 69...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRemoveDirectory(t *testing.T) { beforeTest(t) conn := _getConnection(t) defer conn.Close() client := agaveproto.NewSftpRelayClient(conn) // create a random directory name in our test dir tmpTestDirPath, err := _createTempDirectory("") if err != nil { assert.FailNowf(t, err.Error(), "Unable to create temp test file: %s", err.Error()) } err = _updateLocalSharedTestDirOwnership() if err != nil { assert.FailNowf(t, err.Error(), "Unable to change permission on temp test dir: %s", err.Error()) } remoteTestDirPath := _resolveTestPath(tmpTestDirPath, SFTP_SHARED_TEST_DIR) req := &agaveproto.SrvRemoveRequest{ SystemConfig: _createRemoteSystemConfig(), RemotePath: remoteTestDirPath, } grpcResponse, err := client.Remove(context.Background(), req) if err != nil { assert.Nilf(t, err, "Error while invoking remote service: %v", err) } else { // get the test directory stat in the local shared directory absoluteTmpTestDirPath := _resolveTestPath(tmpTestDirPath, LocalSharedTestDir) _, err := os.Stat(absoluteTmpTestDirPath) assert.True(t, os.IsNotExist(err), "Directory " + absoluteTmpTestDirPath + " should not be present after calling Remove") assert.Equal(t, "", grpcResponse.Error, "Error message in response should be empty after successfully request") } afterTest(t) }
explode_data.jsonl/32551
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 449 }
[ 2830, 3393, 13021, 9310, 1155, 353, 8840, 836, 8, 341, 63234, 2271, 1155, 692, 32917, 1669, 716, 52414, 1155, 340, 16867, 4534, 10421, 2822, 25291, 1669, 933, 523, 15110, 7121, 50, 25068, 6740, 352, 2959, 20571, 692, 197, 322, 1855, 264...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestS3BucketMetricSupplier_Resources(t *testing.T) { tests := []struct { test string dirName string mocks func(repository *repository.MockS3Repository) wantErr error }{ { test: "multiple bucket with multiple metrics", dirName: "s3_bucket_metrics_multiple", mocks: func(repository *repository.MockS3Repository) { repository.On( "ListAllBuckets", ).Return([]*s3.Bucket{ {Name: awssdk.String("bucket-martin-test-drift")}, {Name: awssdk.String("bucket-martin-test-drift2")}, {Name: awssdk.String("bucket-martin-test-drift3")}, }, nil) repository.On( "GetBucketLocation", &s3.Bucket{Name: awssdk.String("bucket-martin-test-drift")}, ).Return( "eu-west-1", nil, ) repository.On( "GetBucketLocation", &s3.Bucket{Name: awssdk.String("bucket-martin-test-drift2")}, ).Return( "eu-west-3", nil, ) repository.On( "GetBucketLocation", &s3.Bucket{Name: awssdk.String("bucket-martin-test-drift3")}, ).Return( "ap-northeast-1", nil, ) repository.On( "ListBucketMetricsConfigurations", &s3.Bucket{Name: awssdk.String("bucket-martin-test-drift2")}, "eu-west-3", ).Return( []*s3.MetricsConfiguration{ {Id: awssdk.String("Metrics_Bucket2")}, {Id: awssdk.String("Metrics2_Bucket2")}, }, nil, ) }, }, { test: "cannot list bucket", dirName: "s3_bucket_metrics_list_bucket", mocks: func(repository *repository.MockS3Repository) { repository.On("ListAllBuckets").Return(nil, awserr.NewRequestFailure(nil, 403, "")) }, wantErr: remoteerror.NewResourceEnumerationErrorWithType(awserr.NewRequestFailure(nil, 403, ""), resourceaws.AwsS3BucketMetricResourceType, resourceaws.AwsS3BucketResourceType), }, { test: "cannot list metrics", dirName: "s3_bucket_metrics_list_metrics", mocks: func(repository *repository.MockS3Repository) { repository.On("ListAllBuckets").Return( []*s3.Bucket{ {Name: awssdk.String("bucket-martin-test-drift")}, }, nil, ) repository.On( "GetBucketLocation", &s3.Bucket{Name: awssdk.String("bucket-martin-test-drift")}, ).Return( "eu-west-3", nil, ) repository.On( "ListBucketMetricsConfigurations", &s3.Bucket{Name: awssdk.String("bucket-martin-test-drift")}, "eu-west-3", ).Return( nil, awserr.NewRequestFailure(nil, 403, ""), ) }, wantErr: remoteerror.NewResourceEnumerationError(awserr.NewRequestFailure(nil, 403, ""), resourceaws.AwsS3BucketMetricResourceType), }, } for _, tt := range tests { shouldUpdate := tt.dirName == *goldenfile.Update providerLibrary := terraform.NewProviderLibrary() supplierLibrary := resource.NewSupplierLibrary() repo := testresource.InitFakeSchemaRepository("aws", "3.19.0") resourceaws.InitResourcesMetadata(repo) factory := terraform.NewTerraformResourceFactory(repo) deserializer := resource.NewDeserializer(factory) if shouldUpdate { provider, err := InitTestAwsProvider(providerLibrary) if err != nil { t.Fatal(err) } repository := repository.NewS3Repository(client.NewAWSClientFactory(provider.session)) supplierLibrary.AddSupplier(NewS3BucketMetricSupplier(provider, repository, deserializer)) } t.Run(tt.test, func(t *testing.T) { mock := repository.MockS3Repository{} tt.mocks(&mock) provider := mocks.NewMockedGoldenTFProvider(tt.dirName, providerLibrary.Provider(terraform.AWS), shouldUpdate) s := &S3BucketMetricSupplier{ provider, deserializer, &mock, terraform.NewParallelResourceReader(parallel.NewParallelRunner(context.TODO(), 10)), tf.TerraformProviderConfig{ Name: "test", DefaultAlias: "eu-west-3", }, } got, err := s.Resources() assert.Equal(t, err, tt.wantErr) test.CtyTestDiff(got, tt.dirName, provider, deserializer, shouldUpdate, t) }) } }
explode_data.jsonl/44842
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1821 }
[ 2830, 3393, 50, 18, 36018, 54310, 46167, 62, 11277, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 3056, 1235, 341, 197, 18185, 262, 914, 198, 197, 48532, 675, 914, 198, 197, 2109, 25183, 256, 2915, 50305, 353, 23319, 24664, 50, 18, 4624...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNodes_ToByteArrays(t *testing.T) { nodes := Nodes{ &Node{val: []byte("a")}, &Node{val: []byte("b")}, } for i, val := range nodes.ToByteArrays() { exp := string(nodes[i].val) act := string(val) if exp != act { t.Errorf("expected val at index %d to be %s, got %s", i, exp, act) } } }
explode_data.jsonl/57992
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 142 }
[ 2830, 3393, 12288, 38346, 18394, 82, 1155, 353, 8840, 836, 8, 341, 79756, 1669, 52501, 515, 197, 197, 5, 1955, 90, 831, 25, 3056, 3782, 445, 64, 79583, 197, 197, 5, 1955, 90, 831, 25, 3056, 3782, 445, 65, 79583, 197, 532, 2023, 60...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMultipleLayers(t *testing.T) { p, err := LoadFromString(testMultipleLayers) if err != nil { t.Error(err) t.Fail() return } assert.NotNil(t, p) assert.Equal(t, 2, p.ModelVersion) assert.NotNil(t, p.Content) assert.Equal(t, "Layers", p.Content.Name) assert.Equal(t, 12, p.Content.Fps) assert.NotNil(t, p.Content.Layers) assert.Equal(t, 2, len(p.Content.Layers)) assert.Equal(t, 1, len(p.Content.Layers[0].Chunks)) assert.Equal(t, 1, len(p.Content.Layers[1].Chunks)) }
explode_data.jsonl/30014
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 226 }
[ 2830, 3393, 32089, 40235, 1155, 353, 8840, 836, 8, 341, 3223, 11, 1848, 1669, 8893, 44491, 8623, 32089, 40235, 340, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, 197, 3244, 57243, 741, 197, 853, 198, 197, 532, 6948, 93882, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_TeamLogout(t *testing.T) { w := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/api/logout", nil) req.Header.Set("Authorization", team[0].Token) router.ServeHTTP(w, req) assert.Equal(t, 200, w.Code) //login again w = httptest.NewRecorder() jsonData, _ := json.Marshal(map[string]interface{}{ "Name": team[0].Name, "Password": team[0].Password, }) req, _ = http.NewRequest("POST", "/api/login", bytes.NewBuffer(jsonData)) req.Header.Set("Authorization", managerToken) router.ServeHTTP(w, req) assert.Equal(t, 200, w.Code) var backJSON = struct { Error int `json:"error"` Msg string `json:"msg"` Data string `json:"data"` }{} err := json.Unmarshal(w.Body.Bytes(), &backJSON) assert.Equal(t, nil, err) team[0].Token = backJSON.Data }
explode_data.jsonl/77164
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 1139, 14580, 27958, 1155, 353, 8840, 836, 8, 341, 6692, 1669, 54320, 70334, 7121, 47023, 741, 24395, 11, 716, 1669, 1758, 75274, 445, 3806, 497, 3521, 2068, 56097, 497, 2092, 340, 24395, 15753, 4202, 445, 18124, 497, 2083, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConcurrentQueue(t *testing.T) { t.Parallel() testQueueAddDrain(t, 100, 1, 1, 1000, 1000) }
explode_data.jsonl/77516
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 42 }
[ 2830, 3393, 1109, 3231, 7554, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 18185, 7554, 2212, 8847, 466, 1155, 11, 220, 16, 15, 15, 11, 220, 16, 11, 220, 16, 11, 220, 16, 15, 15, 15, 11, 220, 16, 15, 15, 15, 340, 92 ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestRejectBadExplainer(t *testing.T) { g := gomega.NewGomegaWithT(t) kfsvc := makeTestKFService() kfsvc.Spec.Default.Explainer = &ExplainerSpec{} g.Expect(kfsvc.ValidateCreate()).Should(gomega.MatchError(ExactlyOneExplainerViolatedError)) }
explode_data.jsonl/7106
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 99 }
[ 2830, 3393, 78413, 17082, 43953, 1743, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 32696, 7121, 38, 32696, 2354, 51, 1155, 340, 16463, 69, 58094, 1669, 1281, 2271, 65008, 1860, 741, 16463, 69, 58094, 36473, 13275, 5121, 500, 1743, 28...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRemoveDuplicateAccessModes(t *testing.T) { modes := []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadOnlyMany, v1.ReadOnlyMany, } modes = removeDuplicateAccessModes(modes) if len(modes) != 2 { t.Errorf("Expected 2 distinct modes in set but found %v", len(modes)) } }
explode_data.jsonl/25710
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 123 }
[ 2830, 3393, 13021, 53979, 6054, 70035, 1155, 353, 8840, 836, 8, 341, 2109, 2539, 1669, 3056, 85, 16, 61655, 18902, 6054, 3636, 515, 197, 5195, 16, 6503, 7985, 12522, 11, 348, 16, 26460, 8441, 11, 348, 16, 26460, 8441, 11, 348, 16, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCT_PTabConstructor(t *testing.T) { v := wml.NewCT_PTab() if v == nil { t.Errorf("wml.NewCT_PTab must return a non-nil value") } if err := v.Validate(); err != nil { t.Errorf("newly constructed wml.CT_PTab should validate: %s", err) } }
explode_data.jsonl/75484
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 108 }
[ 2830, 3393, 1162, 1088, 8582, 13288, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 289, 1014, 7121, 1162, 1088, 8582, 741, 743, 348, 621, 2092, 341, 197, 3244, 13080, 445, 86, 1014, 7121, 1162, 1088, 8582, 1969, 470, 264, 2477, 83248, 897...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetOpenEBSNamespace(t *testing.T) { testCases := map[string]struct { value string expectValue string }{ "Missing env variable": { value: "", expectValue: "", }, "Present env variable with value": { value: "value1", expectValue: "value1", }, "Present env variable with whitespaces": { value: " ", expectValue: "", }, } for k, v := range testCases { v := v t.Run(k, func(t *testing.T) { if len(v.value) != 0 { os.Setenv(string(menv.OpenEBSNamespace), v.value) } actualValue := getOpenEBSNamespace() if !reflect.DeepEqual(actualValue, v.expectValue) { t.Errorf("expected %s got %s", v.expectValue, actualValue) } os.Unsetenv(string(menv.OpenEBSNamespace)) }) } }
explode_data.jsonl/30410
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 349 }
[ 2830, 3393, 1949, 5002, 36, 7347, 22699, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 16309, 981, 914, 198, 197, 24952, 1130, 914, 198, 197, 59403, 197, 197, 1, 25080, 6105, 3890, 788, 341, 298, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCode(t *testing.T) { for _, v := range codeTests { c := ComputeCode(v.secret, v.value) if c != v.code { t.Errorf("computeCode(%s, %d): got %d expected %d\n", v.secret, v.value, c, v.code) } } }
explode_data.jsonl/75087
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 101 }
[ 2830, 3393, 2078, 1155, 353, 8840, 836, 8, 1476, 2023, 8358, 348, 1669, 2088, 2038, 18200, 341, 197, 1444, 1669, 22603, 2078, 3747, 53940, 11, 348, 2824, 692, 197, 743, 272, 961, 348, 10210, 341, 298, 3244, 13080, 445, 27706, 2078, 15...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUpdateOfValidControlPlane(t *testing.T) { oldControlPlane := newControlPlaneWithVersion("my-smcp", "istio-system", "v2.0") validator, _, _ := createControlPlaneValidatorTestFixture(oldControlPlane) controlPlane := newControlPlaneWithVersion("my-smcp", "istio-system", "v2.1") response := validator.Handle(ctx, createUpdateRequest(oldControlPlane, controlPlane)) assert.True(response.Allowed, "Expected validator to accept update of valid ServiceMeshControlPlane", t) }
explode_data.jsonl/10237
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 156 }
[ 2830, 3393, 4289, 2124, 4088, 3273, 34570, 1155, 353, 8840, 836, 8, 341, 61828, 3273, 34570, 1669, 501, 3273, 34570, 2354, 5637, 445, 2408, 4668, 4672, 497, 330, 380, 815, 36648, 497, 330, 85, 17, 13, 15, 1138, 197, 16112, 11, 8358, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetProcTitleFast(t *testing.T) { if HaveSetProcTitleFast == HaveNone { t.SkipNow() } title := randomMD5() SetProcTitleFast(title) out, err := exec.Command("/bin/ps", "ax").Output() if err != nil { // No ps available on this platform. t.SkipNow() } else if !strings.Contains(string(out), title) { t.FailNow() } }
explode_data.jsonl/48491
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 134 }
[ 2830, 3393, 1649, 24508, 3851, 32174, 1155, 353, 8840, 836, 8, 341, 743, 12243, 1649, 24508, 3851, 32174, 621, 12243, 4064, 341, 197, 3244, 57776, 7039, 741, 197, 630, 24751, 1669, 4194, 6076, 20, 2822, 22212, 24508, 3851, 32174, 12504, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestAgent_ServiceMaintenance_ACLDeny(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), TestACLConfig()) defer a.Shutdown() // Register the service. service := &structs.NodeService{ ID: "test", Service: "test", } if err := a.AddService(service, nil, false, ""); err != nil { t.Fatalf("err: %v", err) } t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil) if _, err := a.srv.AgentServiceMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=root", nil) if _, err := a.srv.AgentServiceMaintenance(nil, req); err != nil { t.Fatalf("err: %v", err) } }) }
explode_data.jsonl/33641
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 358 }
[ 2830, 3393, 16810, 52548, 92735, 97627, 23619, 88, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 11323, 1669, 1532, 2271, 16810, 1155, 2967, 1507, 3393, 55393, 2648, 2398, 16867, 264, 10849, 18452, 2822, 197, 322, 8451, 279, 2473,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDomainPrefix(t *testing.T) { //Test cases cases := map[string]string{ "http://google.com": "google", "http://google.com/ding?true": "google", "google.com/?ding=false": "google", "google.com?ding=false": "google", "google.com": "google", "google.co.uk": "google", "gama.google.com": "google", "gama.google.co.uk": "google", "beta.gama.google.co.uk": "google", } for url, expectedPrefix := range cases { domainPrefix := DomainPrefix(url) if domainPrefix != expectedPrefix { t.Errorf("Url (%q) returned %q for DomainPrefix(), but %q was expected", url, domainPrefix, expectedPrefix) } } }
explode_data.jsonl/30831
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 13636, 14335, 1155, 353, 8840, 836, 8, 341, 197, 322, 2271, 5048, 198, 1444, 2264, 1669, 2415, 14032, 30953, 515, 197, 197, 76932, 1110, 17485, 905, 788, 1843, 330, 17485, 756, 197, 197, 76932, 1110, 17485, 905, 3446, 287, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFindDuplicates(t *testing.T) { testCases := []struct { got []int want []int }{ { got: []int{4, 3, 2, 7, 8, 2, 3, 1}, want: []int{2, 3}, }, { got: []int{1, 1, 2}, want: []int{1}, }, { got: []int{1}, want: []int{}, }, } for _, testCase := range testCases { actual := findDuplicates(testCase.got) assert.Check(t, is.DeepEqual(actual, testCase.want)) } }
explode_data.jsonl/74173
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 209 }
[ 2830, 3393, 9885, 76851, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 3174, 354, 220, 3056, 396, 198, 197, 50780, 3056, 396, 198, 197, 59403, 197, 197, 515, 298, 3174, 354, 25, 220, 3056, 396, 90, 19, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSubjectPostalCodeLengthOK(t *testing.T) { inputPath := "subjectPostalCode.pem" expected := lint.Pass out := test.TestLint("e_subject_postal_code_max_length", inputPath) if out.Status != expected { t.Errorf("%s: expected %s, got %s", inputPath, expected, out.Status) } }
explode_data.jsonl/23015
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 13019, 43800, 2078, 4373, 3925, 1155, 353, 8840, 836, 8, 341, 22427, 1820, 1669, 330, 11501, 43800, 2078, 49373, 698, 42400, 1669, 57920, 87768, 198, 13967, 1669, 1273, 8787, 47556, 445, 68, 28834, 6333, 278, 4136, 6345, 5118,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnmarshalLocalDateTime(t *testing.T) { examples := []struct { name string in string out LocalDateTime }{ { name: "normal", in: "1979-05-27T07:32:00", out: LocalDateTime{ Date: LocalDate{ Year: 1979, Month: 5, Day: 27, }, Time: LocalTime{ Hour: 7, Minute: 32, Second: 0, Nanosecond: 0, }, }}, { name: "with nanoseconds", in: "1979-05-27T00:32:00.999999", out: LocalDateTime{ Date: LocalDate{ Year: 1979, Month: 5, Day: 27, }, Time: LocalTime{ Hour: 0, Minute: 32, Second: 0, Nanosecond: 999999000, }, }, }, } for i, example := range examples { toml := fmt.Sprintf(`date = %s`, example.in) t.Run(fmt.Sprintf("ToLocalDateTime_%d_%s", i, example.name), func(t *testing.T) { type dateStruct struct { Date LocalDateTime } var obj dateStruct err := Unmarshal([]byte(toml), &obj) if err != nil { t.Fatal(err) } if obj.Date != example.out { t.Errorf("expected '%s', got '%s'", example.out, obj.Date) } }) t.Run(fmt.Sprintf("ToTime_%d_%s", i, example.name), func(t *testing.T) { type dateStruct struct { Date time.Time } var obj dateStruct err := Unmarshal([]byte(toml), &obj) if err != nil { t.Fatal(err) } if obj.Date.Year() != example.out.Date.Year { t.Errorf("expected year %d, got %d", example.out.Date.Year, obj.Date.Year()) } if obj.Date.Month() != example.out.Date.Month { t.Errorf("expected month %d, got %d", example.out.Date.Month, obj.Date.Month()) } if obj.Date.Day() != example.out.Date.Day { t.Errorf("expected day %d, got %d", example.out.Date.Day, obj.Date.Day()) } if obj.Date.Hour() != example.out.Time.Hour { t.Errorf("expected hour %d, got %d", example.out.Time.Hour, obj.Date.Hour()) } if obj.Date.Minute() != example.out.Time.Minute { t.Errorf("expected minute %d, got %d", example.out.Time.Minute, obj.Date.Minute()) } if obj.Date.Second() != example.out.Time.Second { t.Errorf("expected second %d, got %d", example.out.Time.Second, obj.Date.Second()) } if obj.Date.Nanosecond() != example.out.Time.Nanosecond { t.Errorf("expected nanoseconds %d, got %d", example.out.Time.Nanosecond, obj.Date.Nanosecond()) } }) } }
explode_data.jsonl/46361
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1155 }
[ 2830, 3393, 1806, 27121, 7319, 7689, 1155, 353, 8840, 836, 8, 341, 8122, 4023, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 17430, 256, 914, 198, 197, 13967, 220, 47465, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, 330, 8252, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetRequest(t *testing.T) { setupServer() defer teardownServer() setupDefaultMux(`"body"`) req := URL("http://example.com/url") if err := req.Get(); err != nil { t.Error(err) } assertTextualBody(t, `"body"`, req.Response.Body) assertMethod(t, "GET", req.Request.Method) }
explode_data.jsonl/24745
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 116 }
[ 2830, 3393, 1949, 1900, 1155, 353, 8840, 836, 8, 341, 84571, 5475, 741, 16867, 49304, 5475, 741, 84571, 3675, 44, 2200, 5809, 1, 2599, 39917, 692, 24395, 1669, 5548, 445, 1254, 1110, 8687, 905, 57254, 5130, 743, 1848, 1669, 4232, 2234, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRoundtrip(t *testing.T) { var buf bytes.Buffer r1 := report.MakeReport() r1.WriteBinary(&buf, gzip.BestCompression) r2, err := report.MakeFromBinary(&buf) if err != nil { t.Error(err) } if !reflect.DeepEqual(r1, *r2) { t.Errorf("%v != %v", r1, *r2) } }
explode_data.jsonl/17308
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 27497, 32981, 1155, 353, 8840, 836, 8, 341, 2405, 6607, 5820, 22622, 198, 7000, 16, 1669, 1895, 50133, 10361, 741, 7000, 16, 4073, 21338, 2099, 5909, 11, 57795, 1785, 477, 81411, 340, 7000, 17, 11, 1848, 1669, 1895, 50133, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestWelcomeConfig(t *testing.T) { var ( orgMessage = "defined message for an org" repoMessage = "defined message for a repo" ) config := &plugins.Configuration{ Welcome: []plugins.Welcome{ { Repos: []string{"kubernetes/test-infra"}, MessageTemplate: repoMessage, }, { Repos: []string{"kubernetes"}, MessageTemplate: orgMessage, }, { Repos: []string{"kubernetes/repo-infra"}, MessageTemplate: repoMessage, }, }, } testCases := []struct { name string repo string org string expectedMessage string }{ { name: "default message", org: "kubernetes-sigs", repo: "kind", expectedMessage: defaultWelcomeMessage, }, { name: "org defined message", org: "kubernetes", repo: "community", expectedMessage: orgMessage, }, { name: "repo defined message, before an org", org: "kubernetes", repo: "test-infra", expectedMessage: repoMessage, }, { name: "repo defined message, after an org", org: "kubernetes", repo: "repo-infra", expectedMessage: repoMessage, }, } for _, tc := range testCases { receivedMessage := welcomeMessageForRepo(optionsForRepo(config, tc.org, tc.repo)) if receivedMessage != tc.expectedMessage { t.Fatalf("%s: expected to get '%s' and received '%s'", tc.name, tc.expectedMessage, receivedMessage) } } }
explode_data.jsonl/7531
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 755 }
[ 2830, 3393, 13936, 2648, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 87625, 2052, 220, 284, 330, 9711, 1943, 369, 458, 1240, 698, 197, 17200, 5368, 2052, 284, 330, 9711, 1943, 369, 264, 15867, 698, 197, 692, 25873, 1669, 609, 18716...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReconcileOnCancelledPipelineRun(t *testing.T) { prs := []*v1alpha1.PipelineRun{tb.PipelineRun("test-pipeline-run-cancelled", "foo", tb.PipelineRunSpec("test-pipeline", tb.PipelineRunServiceAccount("test-sa"), tb.PipelineRunCancelled, ), )} ps := []*v1alpha1.Pipeline{tb.Pipeline("test-pipeline", "foo", tb.PipelineSpec( tb.PipelineTask("hello-world-1", "hello-world"), ))} ts := []*v1alpha1.Task{tb.Task("hello-world", "foo")} trs := []*v1alpha1.TaskRun{ tb.TaskRun("test-pipeline-run-cancelled-hello-world", "foo", tb.TaskRunOwnerReference("kind", "name"), tb.TaskRunLabel(pipeline.GroupName+pipeline.PipelineLabelKey, "test-pipeline-run-cancelled"), tb.TaskRunLabel(pipeline.GroupName+pipeline.PipelineRunLabelKey, "test-pipeline"), tb.TaskRunSpec(tb.TaskRunTaskRef("hello-world"), tb.TaskRunServiceAccount("test-sa"), ), ), } d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, TaskRuns: trs, } testAssets, cancel := getPipelineRunController(t, d) defer cancel() c := testAssets.Controller clients := testAssets.Clients err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-cancelled") if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } // Check that the PipelineRun was reconciled correctly reconciledRun, err := clients.Pipeline.Tekton().PipelineRuns("foo").Get("test-pipeline-run-cancelled", metav1.GetOptions{}) if err != nil { t.Fatalf("Somehow had error getting completed reconciled run out of fake client: %s", err) } if reconciledRun.Status.CompletionTime == nil { t.Errorf("Expected a CompletionTime on invalid PipelineRun but was nil") } // This PipelineRun should still be complete and false, and the status should reflect that if !reconciledRun.Status.GetCondition(apis.ConditionSucceeded).IsFalse() { t.Errorf("Expected PipelineRun status to be complete and false, but was %v", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) } }
explode_data.jsonl/81290
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 810 }
[ 2830, 3393, 693, 40446, 457, 1925, 39473, 34656, 6727, 1155, 353, 8840, 836, 8, 341, 25653, 82, 1669, 29838, 85, 16, 7141, 16, 1069, 8790, 6727, 90, 18387, 1069, 8790, 6727, 445, 1944, 2268, 8790, 22973, 85003, 832, 497, 330, 7975, 75...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test_Prism_Format(t *testing.T) { test.RunHttpTest(t, func(t *testing.T, ts *httptest.Server, app *goapp.App) { manager := app.Get("gonode.manager").(*base.PgNodeManager) home := app.Get("gonode.handler_collection").(base.HandlerCollection).NewNode("core.root") home.Name = "Homepage" home.Access = []string{"node:prism:render", "IS_AUTHENTICATED_ANONYMOUSLY"} manager.Save(home, false) raw := app.Get("gonode.handler_collection").(base.HandlerCollection).NewNode("core.raw") raw.Name = "Humans.txt" raw.Slug = "humans.txt" raw.Access = []string{"node:prism:render", "IS_AUTHENTICATED_ANONYMOUSLY"} manager.Save(raw, false) manager.Move(raw.Uuid, home.Uuid) raw2 := app.Get("gonode.handler_collection").(base.HandlerCollection).NewNode("core.raw") raw2.Name = "Humans" raw2.Slug = "humans" raw2.Access = []string{"node:prism:render", "IS_AUTHENTICATED_ANONYMOUSLY"} manager.Save(raw2, false) manager.Move(raw2.Uuid, home.Uuid) res, _ := test.RunRequest("GET", fmt.Sprintf("%s/humans", ts.URL)) assert.Equal(t, http.StatusOK, res.StatusCode, "Cannot find /humans") res, _ = test.RunRequest("GET", fmt.Sprintf("%s/humans.txt", ts.URL)) assert.Equal(t, http.StatusOK, res.StatusCode, "Cannot find /humans.txt") }) }
explode_data.jsonl/72816
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 507 }
[ 2830, 3393, 32716, 2142, 72999, 1155, 353, 8840, 836, 8, 341, 18185, 16708, 2905, 2271, 1155, 11, 2915, 1155, 353, 8840, 836, 11, 10591, 353, 96336, 70334, 22997, 11, 906, 353, 3346, 676, 5105, 8, 341, 197, 92272, 1669, 906, 2234, 445...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestApplyFailsWithVersionMismatch(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() _, client, closeFn := setup(t) defer closeFn() obj := []byte(`{ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": "deployment", "labels": {"app": "nginx"} }, "spec": { "replicas": 3, "selector": { "matchLabels": { "app": "nginx" } }, "template": { "metadata": { "labels": { "app": "nginx" } }, "spec": { "containers": [{ "name": "nginx", "image": "nginx:latest" }] } } } }`) _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). AbsPath("/apis/apps/v1"). Namespace("default"). Resource("deployments"). Name("deployment"). Param("fieldManager", "apply_test"). Body(obj).Do(context.TODO()).Get() if err != nil { t.Fatalf("Failed to create object using Apply patch: %v", err) } obj = []byte(`{ "apiVersion": "extensions/v1beta", "kind": "Deployment", "metadata": { "name": "deployment", "labels": {"app": "nginx"} }, "spec": { "replicas": 100, "selector": { "matchLabels": { "app": "nginx" } }, "template": { "metadata": { "labels": { "app": "nginx" } }, "spec": { "containers": [{ "name": "nginx", "image": "nginx:latest" }] } } } }`) _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). AbsPath("/apis/apps/v1"). Namespace("default"). Resource("deployments"). Name("deployment"). Param("fieldManager", "apply_test"). Body([]byte(obj)).Do(context.TODO()).Get() if err == nil { t.Fatalf("Expecting to get version mismatch when applying object") } status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get version mismatch as API error") } if status.Status().Code != http.StatusBadRequest { t.Fatalf("expected status code to be %d but was %d", http.StatusBadRequest, status.Status().Code) } }
explode_data.jsonl/53474
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 958 }
[ 2830, 3393, 28497, 37, 6209, 2354, 5637, 82572, 1155, 353, 8840, 836, 8, 341, 16867, 4565, 70, 266, 57824, 287, 4202, 13859, 42318, 16014, 2271, 1155, 11, 4094, 12753, 13275, 13859, 42318, 11, 13954, 20304, 22997, 16384, 28497, 11, 830, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestLogging(t *testing.T) { logger, _ := log.NewLogger(log.WithHandler(log.NoopHandler)) messages := []log.Entry{} // collect is a middleware that collects the messages collect := log.MiddlewareFunc(func(next log.Handler) log.Handler { return log.HandlerFunc(func(entry log.Entry) error { messages = append(messages, entry) return next.HandleLog(entry) }) }) logger.Use(collect) a := assertions.New(t) e := echo.New() // Test Logging middleware { handler := Log(logger)(handler) { req := httptest.NewRequest("GET", "/", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) err := handler(c) a.So(err, should.BeNil) } fields := messages[0].Fields().Fields() a.So(len(messages), should.Equal, 1) a.So(messages[0].Message(), should.Equal, "Request handled") a.So(messages[0].Level(), should.Equal, log.InfoLevel) a.So(fields["method"], should.Equal, "GET") a.So(fields["url"], should.Equal, "/") a.So(fields["response_size"], should.Equal, 3) a.So(fields["status"], should.Equal, 200) a.So(fields, should.ContainKey, "duration") a.So(fields, should.ContainKey, "remote_addr") a.So(fields, should.ContainKey, "request_id") a.So(fields, should.ContainKey, "response_size") a.So(fields, should.NotContainKey, "redirect") } // Reset messages messages = nil // Test Logging middleware on error { handler := Log(logger)(errorHandler) req := httptest.NewRequest("GET", "/", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) err := handler(c) a.So(err, should.BeNil) fields := messages[0].Fields().Fields() a.So(len(messages), should.Equal, 1) a.So(messages[0].Message(), should.Equal, "Request error") a.So(messages[0].Level(), should.Equal, log.ErrorLevel) a.So(fields["status"], should.Equal, 500) } // Reset messages messages = nil // Test Logging middleware on redirect { handler := Log(logger)(redirectHandler) req := httptest.NewRequest("GET", "/", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) err := handler(c) a.So(err, should.BeNil) fields := messages[0].Fields().Fields() a.So(len(messages), should.Equal, 1) a.So(messages[0].Message(), should.Equal, "Request handled") a.So(messages[0].Level(), should.Equal, log.InfoLevel) a.So(fields, should.ContainKey, "location") } // Reset messages messages = nil // Test Logging middleware on forward { handler := forwardMiddleware(Log(logger)(noopHandler)) req := httptest.NewRequest("GET", "/", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) err := handler(c) a.So(err, should.BeNil) fields := messages[0].Fields().Fields() a.So(len(messages), should.Equal, 1) a.So(fields, should.ContainKey, "forwarded_for") } // Reset messages messages = nil // Test Logging middleware with invalid handler { handler := Log(logger)(invalidHandler) req := httptest.NewRequest("GET", "/", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) err := handler(c) a.So(err, should.NotBeNil) fields := messages[0].Fields().Fields() a.So(len(messages), should.Equal, 1) a.So(fields, should.ContainKey, "error") } }
explode_data.jsonl/14446
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1274 }
[ 2830, 3393, 34575, 1155, 353, 8840, 836, 8, 1476, 17060, 11, 716, 1669, 1487, 7121, 7395, 12531, 26124, 3050, 12531, 16766, 453, 3050, 4390, 2109, 3737, 1669, 3056, 839, 22330, 31483, 197, 322, 6530, 374, 264, 29679, 429, 51207, 279, 66...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfig_Get(t *testing.T) { expectedDialect := TestDefaultManagerDialect() actualDialect, err := configObj.Get("state.manager.dialect") if err != true { fmt.Printf("Error retrieving structure from config object.") t.Fail() } assert.Equal(t, expectedDialect, actualDialect, "Bom Engine read from config object meets expectations.") }
explode_data.jsonl/50527
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 109 }
[ 2830, 3393, 2648, 13614, 1155, 353, 8840, 836, 8, 341, 42400, 35, 55056, 1669, 3393, 3675, 2043, 35, 55056, 2822, 88814, 35, 55056, 11, 1848, 1669, 2193, 5261, 2234, 445, 2454, 32815, 950, 55056, 1138, 743, 1848, 961, 830, 341, 197, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestTardigrade:", NilObject: (*tardigrade.Object)(nil), }) }
explode_data.jsonl/34491
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 66 }
[ 2830, 3393, 52464, 1155, 353, 8840, 836, 8, 341, 1166, 267, 17966, 16708, 1155, 11, 609, 49494, 17966, 8382, 417, 515, 197, 197, 24703, 675, 25, 330, 2271, 51, 567, 5233, 1021, 55120, 197, 18317, 321, 1190, 25, 220, 4609, 83, 567, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1