text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestInstanceIdentity(t *testing.T) { imports := make(testImporter) conf := Config{Importer: imports} makePkg := func(src string) { f, err := parseSrc("", src) if err != nil { t.Fatal(err) } name := f.PkgName.Value pkg, err := conf.Check(name, []*syntax.File{f}, nil) if err != nil { t.Fatal(err) } imports[name] = pkg } makePkg(`package lib; type T[P any] struct{}`) makePkg(`package a; import "lib"; var A lib.T[int]`) makePkg(`package b; import "lib"; var B lib.T[int]`) a := imports["a"].Scope().Lookup("A") b := imports["b"].Scope().Lookup("B") if !Identical(a.Type(), b.Type()) { t.Errorf("mismatching types: a.A: %s, b.B: %s", a.Type(), b.Type()) } }
explode_data.jsonl/29402
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 314 }
[ 2830, 3393, 2523, 18558, 1155, 353, 8840, 836, 8, 341, 21918, 82, 1669, 1281, 8623, 77289, 340, 67850, 1669, 5532, 90, 77289, 25, 15202, 532, 77438, 47, 7351, 1669, 2915, 14705, 914, 8, 341, 197, 1166, 11, 1848, 1669, 4715, 20360, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSeriesRename(t *testing.T) { // Create new series init := []Series{ NewSeriesFloat64("test", &SeriesInit{1, 0}), NewSeriesInt64("test", &SeriesInit{1, 0}), NewSeriesString("test", &SeriesInit{1, 0}), NewSeriesTime("test", &SeriesInit{1, 0}), NewSeriesMixed("test", &SeriesInit{1, 0}), NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}), } for i := range init { s := init[i] // Rename series s.Rename("test2") if s.Name() != "test2" { t.Errorf("wrong name") } } }
explode_data.jsonl/9997
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 215 }
[ 2830, 3393, 25544, 88757, 1155, 353, 8840, 836, 8, 1476, 197, 322, 4230, 501, 4013, 198, 28248, 1669, 3056, 25544, 4257, 197, 197, 3564, 25544, 5442, 21, 19, 445, 1944, 497, 609, 25544, 3803, 90, 16, 11, 220, 15, 30793, 197, 197, 35...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestApp01maConnect(t *testing.T) { var err error t.Logf("TestConnect()...\n") // DockerRun(t) ioApp01ma = NewIoApp01ma() ioApp01ma.DefaultParms() err = ioApp01ma.Connect("") if err == nil { err = ioApp01ma.Disconnect() if err != nil { t.Fatalf("Error: %s\n\n", err) } ioApp01ma = nil } else { t.Fatalf("Error: %s\n\n", err) } t.Logf("TestConnect() - End of Test\n\n\n") }
explode_data.jsonl/16188
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 240 }
[ 2830, 3393, 2164, 15, 16, 1728, 14611, 1155, 353, 8840, 836, 8, 341, 262, 762, 1848, 260, 1465, 271, 3244, 98954, 445, 2271, 14611, 368, 30801, 77, 1138, 322, 40549, 6727, 1155, 692, 53112, 2164, 15, 16, 1728, 284, 1532, 42799, 2164, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_getPasswordEnv(t *testing.T) { oldArgs := os.Args defer func() { os.Args = oldArgs }() _ = os.Setenv("SITU_VAULT_PASSWORD_VAR", "test-pw") pwc := PasswordConfig{ Env: "SITU_VAULT_PASSWORD_VAR", } password := getPassword(pwc) assert.Equal(t, "test-pw", password) }
explode_data.jsonl/70521
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 130 }
[ 2830, 3393, 3062, 4876, 14359, 1155, 353, 8840, 836, 8, 341, 61828, 4117, 1669, 2643, 51015, 198, 16867, 2915, 368, 314, 2643, 51015, 284, 2310, 4117, 50746, 197, 62, 284, 2643, 4202, 3160, 445, 50, 48299, 2334, 32, 3532, 23059, 25750, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClientOptions_chainAll(t *testing.T) { t.Parallel() readPrefMode, err := readpref.ModeFromString("secondary") require.NoError(t, err) rp, err := readpref.New( readPrefMode, readpref.WithTagSets(tag.NewTagSetsFromMaps([]map[string]string{{"nyc": "1"}})...), readpref.WithMaxStaleness(2*time.Second), ) rc := readconcern.New(readconcern.Level("majority")) wc := writeconcern.New( writeconcern.J(true), writeconcern.WTagSet("majority"), writeconcern.W(3), writeconcern.WTimeout(2*time.Second), ) require.NoError(t, err) opts := clientopt.BundleClient(). AppName("foo"). Auth(clientopt.Credential{ AuthMechanism: "MONGODB-X509", AuthMechanismProperties: map[string]string{"foo": "bar"}, AuthSource: "$external", Password: "supersecurepassword", Username: "admin", }). ConnectTimeout(500 * time.Millisecond). HeartbeatInterval(15 * time.Second). Hosts([]string{ "mongodb://localhost:27018", "mongodb://localhost:27019"}). LocalThreshold(time.Second). MaxConnIdleTime(30 * time.Second). MaxConnsPerHost(150). MaxIdleConnsPerHost(20). ReadConcern(rc). ReadPreference(rp). ReplicaSet("foo"). RetryWrites(true). ServerSelectionTimeout(time.Second). Single(false). SocketTimeout(2 * time.Second). SSL(&clientopt.SSLOpt{ Enabled: true, ClientCertificateKeyFile: "client.pem", ClientCertificateKeyPassword: nil, Insecure: false, CaFile: "ca.pem", }). WriteConcern(wc) expectedClient := &clientopt.Client{ TopologyOptions: nil, ConnString: connstring.ConnString{ AppName: "foo", AuthMechanism: "MONGODB-X509", AuthMechanismProperties: map[string]string{"foo": "bar"}, AuthSource: "$external", Username: "admin", Password: "supersecurepassword", ConnectTimeout: 500 * time.Millisecond, ConnectTimeoutSet: true, HeartbeatInterval: 15 * time.Second, HeartbeatIntervalSet: true, Hosts: []string{ "mongodb://localhost:27018", "mongodb://localhost:27019", }, LocalThresholdSet: true, LocalThreshold: time.Second, MaxConnIdleTime: 30 * time.Second, MaxConnIdleTimeSet: true, MaxConnsPerHost: 150, MaxConnsPerHostSet: true, MaxIdleConnsPerHost: 20, MaxIdleConnsPerHostSet: true, ReplicaSet: "foo", ServerSelectionTimeoutSet: true, ServerSelectionTimeout: time.Second, Connect: connstring.AutoConnect, ConnectSet: true, SocketTimeout: 2 * time.Second, SocketTimeoutSet: true, SSL: true, SSLSet: true, SSLClientCertificateKeyFile: "client.pem", SSLClientCertificateKeyFileSet: true, SSLClientCertificateKeyPassword: nil, SSLClientCertificateKeyPasswordSet: true, SSLInsecure: false, SSLInsecureSet: true, SSLCaFile: "ca.pem", SSLCaFileSet: true, }, ReadConcern: rc, ReadPreference: rp, WriteConcern: wc, RetryWrites: true, RetryWritesSet: true, } client, err := opts.Unbundle(connstring.ConnString{}) require.NoError(t, err) require.NotNil(t, client) require.Equal(t, expectedClient, client) }
explode_data.jsonl/15
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1888 }
[ 2830, 3393, 2959, 3798, 30583, 2403, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 37043, 29978, 3636, 11, 1848, 1669, 1349, 30552, 42852, 44491, 445, 18699, 1138, 17957, 35699, 1155, 11, 1848, 340, 7000, 79, 11, 1848, 1669, 134...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetBranches(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "Basic Q2xpZW50SWQ6Q2xpZW50K1NlY3JldA==", r.Header.Get("Authorization")) w.Header().Set("Content-Type", "application/json") fmt.Fprintln(w, "{\"access_token\" : \"tolen\"}") })) defer ts.Close() branchesServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "{\"values\" : [{\"name\": \"foo\"}]}") })) defer branchesServer.Close() branches, err := new(BranchLoader).LoadBranches(Bitbucket{ ClientID: "ClientId", ClientSecret: "Client Secret", Username: "Username", RepositoryName: "repo", TokenUrl: ts.URL, ApiUrl: branchesServer.URL, }) assert.NoError(t, err) assert.Equal(t, []string{"foo"}, branches) }
explode_data.jsonl/50989
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 365 }
[ 2830, 3393, 1949, 18197, 288, 1155, 353, 8840, 836, 8, 1476, 57441, 1669, 54320, 70334, 7121, 5475, 19886, 89164, 18552, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, 197, 6948, 12808, 1155, 11, 330, 15944, 1207, 17, 35725, 81756...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClientUnsubscribesTwice(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) s.Start() defer s.Stop() ctx := context.Background() _, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) assert.Equal(t, pubsub.ErrSubscriptionNotFound, err) err = s.UnsubscribeAll(ctx, clientID) assert.Equal(t, pubsub.ErrSubscriptionNotFound, err) }
explode_data.jsonl/22060
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 238 }
[ 2830, 3393, 2959, 1806, 1966, 11592, 9433, 22816, 558, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 6675, 1966, 7121, 5475, 741, 1903, 4202, 7395, 12531, 8787, 287, 7395, 2398, 1903, 12101, 741, 16867, 274, 30213, 2822, 20985, 1669, 2266, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGraphDot_basic(t *testing.T) { var g Graph g.Add(1) g.Add(2) g.Add(3) g.Connect(BasicEdge(1, 3)) actual := strings.TrimSpace(string(g.Dot(nil))) expected := strings.TrimSpace(testGraphDotBasicStr) if actual != expected { t.Fatalf("bad: %s", actual) } }
explode_data.jsonl/70152
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 11212, 34207, 34729, 1155, 353, 8840, 836, 8, 341, 2405, 342, 12165, 198, 3174, 1904, 7, 16, 340, 3174, 1904, 7, 17, 340, 3174, 1904, 7, 18, 340, 3174, 43851, 5349, 5971, 11656, 7, 16, 11, 220, 18, 4390, 88814, 1669, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRetryOnDiffHost(t *testing.T) { cancel, controller := newController() defer cancel() wf := unmarshalWF(helloWorldWf) woc := newWorkflowOperationCtx(wf, controller) // Verify that there are no nodes in the wf status. assert.Empty(t, woc.wf.Status.Nodes) // Add the parent node for retries. nodeName := "test-node" nodeID := woc.wf.NodeID(nodeName) node := woc.initializeNode(nodeName, wfv1.NodeTypeRetry, "", &wfv1.WorkflowStep{}, "", wfv1.NodeRunning) hostSelector := "kubernetes.io/hostname" retries := wfv1.RetryStrategy{} retries.Affinity = &wfv1.RetryAffinity{ NodeAntiAffinity: &wfv1.RetryNodeAntiAffinity{}, } woc.wf.Status.Nodes[nodeID] = *node assert.Equal(t, node.Phase, wfv1.NodeRunning) // Ensure there are no child nodes yet. lastChild := getChildNodeIndex(node, woc.wf.Status.Nodes, -1) assert.Nil(t, lastChild) // Add child node. childNode := fmt.Sprintf("child-node-%d", 0) woc.initializeNode(childNode, wfv1.NodeTypePod, "", &wfv1.WorkflowStep{}, "", wfv1.NodeRunning) woc.addChildNode(nodeName, childNode) n := woc.wf.GetNodeByName(nodeName) lastChild = getChildNodeIndex(n, woc.wf.Status.Nodes, -1) assert.NotNil(t, lastChild) woc.markNodePhase(lastChild.Name, wfv1.NodeFailed) _, _, err := woc.processNodeRetries(n, retries, &executeTemplateOpts{}) assert.NoError(t, err) n = woc.wf.GetNodeByName(nodeName) assert.Equal(t, n.Phase, wfv1.NodeRunning) // Ensure related fields are not set assert.Equal(t, lastChild.HostNodeName, "") // Set host name n = woc.wf.GetNodeByName(nodeName) lastChild = getChildNodeIndex(n, woc.wf.Status.Nodes, -1) lastChild.HostNodeName = "test-fail-hostname" woc.wf.Status.Nodes[lastChild.ID] = *lastChild tmpl := &wfv1.Template{} tmpl.RetryStrategy = &retries RetryOnDifferentHost(nodeID)(*woc.retryStrategy(tmpl), woc.wf.Status.Nodes, tmpl) assert.NotNil(t, tmpl.Affinity) // Verify if template's Affinity has the right value targetNodeSelectorRequirement := tmpl.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0] sourceNodeSelectorRequirement := apiv1.NodeSelectorRequirement{ Key: hostSelector, Operator: apiv1.NodeSelectorOpNotIn, Values: []string{lastChild.HostNodeName}, } assert.Equal(t, sourceNodeSelectorRequirement, targetNodeSelectorRequirement) }
explode_data.jsonl/71036
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 944 }
[ 2830, 3393, 51560, 1925, 21751, 9296, 1155, 353, 8840, 836, 8, 341, 84441, 11, 6461, 1669, 501, 2051, 741, 16867, 9121, 741, 6692, 69, 1669, 650, 27121, 32131, 3203, 4791, 10134, 54, 69, 340, 6692, 509, 1669, 501, 62768, 8432, 23684, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFormatFMDataWithoutNMSServer(t *testing.T) { fmConfig := config.FMConfig{ DestinationDir: "./tmp", Source: "source", Service: "NDAC", NodeID: "123", Host: "host", } fileName := "./fm_data.json" err := createTestData(fileName, testFMData) if err != nil { t.Error(err) } defer os.Remove(fileName) err = os.MkdirAll(fmConfig.DestinationDir, os.ModePerm) if err != nil { t.Error(err) } FormatFMData(fileName, fmConfig, "localhost:8888") defer os.RemoveAll(fmConfig.DestinationDir) if len(failedFmFiles) == 0 || failedFmFiles[0] != "./tmp/fm_data" { t.Fail() } }
explode_data.jsonl/46224
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 286 }
[ 2830, 3393, 4061, 25609, 1043, 26040, 37325, 1220, 2836, 1155, 353, 8840, 836, 8, 341, 1166, 76, 2648, 1669, 2193, 991, 44, 2648, 515, 197, 10957, 20646, 6184, 25, 5924, 5173, 756, 197, 197, 3608, 25, 260, 330, 2427, 756, 197, 91619, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestRepository_GetCommit(t *testing.T) { ctx := actor.WithActor(context.Background(), &actor.Actor{ UID: 1, }) db := database.NewMockDB() gitCommands := []string{ "GIT_COMMITTER_NAME=a GIT_COMMITTER_EMAIL=a@a.com GIT_COMMITTER_DATE=2006-01-02T15:04:05Z git commit --allow-empty -m foo --author='a <a@a.com>' --date 2006-01-02T15:04:05Z", "GIT_COMMITTER_NAME=c GIT_COMMITTER_EMAIL=c@c.com GIT_COMMITTER_DATE=2006-01-02T15:04:07Z git commit --allow-empty -m bar --author='a <a@a.com>' --date 2006-01-02T15:04:06Z", } gitCommandsWithFiles := getGitCommandsWithFiles(fileWithAccess, fileWithoutAccess) oldRunCommitLog := runCommitLog type testCase struct { repo api.RepoName id api.CommitID wantCommit *gitdomain.Commit noEnsureRevision bool revisionNotFoundError bool } runGetCommitTests := func(checker authz.SubRepoPermissionChecker, tests map[string]testCase) { for label, test := range tests { t.Run(label, func(t *testing.T) { var noEnsureRevision bool t.Cleanup(func() { runCommitLog = oldRunCommitLog }) runCommitLog = func(ctx context.Context, cmd *gitserver.Cmd, opt CommitsOptions) ([]*wrappedCommit, error) { // Track the value of NoEnsureRevision we pass to gitserver noEnsureRevision = opt.NoEnsureRevision return oldRunCommitLog(ctx, cmd, opt) } resolveRevisionOptions := ResolveRevisionOptions{ NoEnsureRevision: test.noEnsureRevision, } commit, err := GetCommit(ctx, db, test.repo, test.id, resolveRevisionOptions, checker) if err != nil { if test.revisionNotFoundError { if !errors.HasType(err, &gitdomain.RevisionNotFoundError{}) { t.Errorf("%s: GetCommit: expected a RevisionNotFoundError, got %s", label, err) } return } t.Errorf("%s: GetCommit: %s", label, err) } if !CommitsEqual(commit, test.wantCommit) { t.Errorf("%s: got commit == %+v, want %+v", label, commit, test.wantCommit) return } // Test that trying to get a nonexistent commit returns RevisionNotFoundError. if _, err := GetCommit(ctx, db, test.repo, NonExistentCommitID, resolveRevisionOptions, checker); !errors.HasType(err, &gitdomain.RevisionNotFoundError{}) { t.Errorf("%s: for nonexistent commit: got err %v, want RevisionNotFoundError", label, err) } if noEnsureRevision != test.noEnsureRevision { t.Fatalf("Expected %t, got %t", test.noEnsureRevision, noEnsureRevision) } }) } } wantGitCommit := &gitdomain.Commit{ ID: "b266c7e3ca00b1a17ad0b1449825d0854225c007", Author: gitdomain.Signature{Name: "a", Email: "a@a.com", Date: MustParseTime(time.RFC3339, "2006-01-02T15:04:06Z")}, Committer: &gitdomain.Signature{Name: "c", Email: "c@c.com", Date: MustParseTime(time.RFC3339, "2006-01-02T15:04:07Z")}, Message: "bar", Parents: []api.CommitID{"ea167fe3d76b1e5fd3ed8ca44cbd2fe3897684f8"}, } tests := map[string]testCase{ "git cmd with NoEnsureRevision false": { repo: MakeGitRepository(t, gitCommands...), id: "b266c7e3ca00b1a17ad0b1449825d0854225c007", wantCommit: wantGitCommit, noEnsureRevision: false, }, "git cmd with NoEnsureRevision true": { repo: MakeGitRepository(t, gitCommands...), id: "b266c7e3ca00b1a17ad0b1449825d0854225c007", wantCommit: wantGitCommit, noEnsureRevision: true, }, } // Run basic tests w/o sub-repo permissions checker runGetCommitTests(nil, tests) checker := getTestSubRepoPermsChecker(fileWithoutAccess) // Add test cases with file names for sub-repo permissions testing tests["with sub-repo permissions and access to file"] = testCase{ repo: MakeGitRepository(t, gitCommandsWithFiles...), id: "da50eed82c8ff3c17bb642000d8aad9d434283c1", wantCommit: &gitdomain.Commit{ ID: "da50eed82c8ff3c17bb642000d8aad9d434283c1", Author: gitdomain.Signature{Name: "a", Email: "a@a.com", Date: MustParseTime(time.RFC3339, "2006-01-02T15:04:05Z")}, Committer: &gitdomain.Signature{Name: "a", Email: "a@a.com", Date: MustParseTime(time.RFC3339, "2006-01-02T15:04:05Z")}, Message: "commit1", }, noEnsureRevision: true, } tests["with sub-repo permissions and NO access to file"] = testCase{ repo: MakeGitRepository(t, gitCommandsWithFiles...), id: "ee7773505e98390e809cbf518b2a92e4748b0187", wantCommit: &gitdomain.Commit{}, noEnsureRevision: true, revisionNotFoundError: true, } // Run test w/ sub-repo permissions filtering runGetCommitTests(checker, tests) }
explode_data.jsonl/8512
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2097 }
[ 2830, 3393, 4624, 13614, 33441, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 12089, 26124, 18870, 5378, 19047, 1507, 609, 5621, 76094, 515, 197, 197, 6463, 25, 220, 16, 345, 197, 3518, 20939, 1669, 4625, 7121, 11571, 3506, 741, 90731, 304...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTimebounds(t *testing.T) { kp0 := newKeypair0() tb := NewTimeout(300) tx, err := NewTransaction( TransactionParams{ SourceAccount: &SimpleAccount{AccountID: kp0.Address(), Sequence: 1}, Operations: []Operation{&BumpSequence{BumpTo: 0}}, BaseFee: MinBaseFee, Timebounds: tb, }, ) assert.NoError(t, err) assert.Equal(t, tb, tx.timebounds) assert.Equal(t, xdr.TimePoint(tb.MinTime), tx.envelope.V1.Tx.TimeBounds.MinTime) assert.Equal(t, xdr.TimePoint(tb.MaxTime), tx.envelope.V1.Tx.TimeBounds.MaxTime) }
explode_data.jsonl/20650
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 249 }
[ 2830, 3393, 1462, 34019, 1155, 353, 8840, 836, 8, 341, 16463, 79, 15, 1669, 501, 6608, 1082, 1310, 15, 2822, 62842, 1669, 1532, 7636, 7, 18, 15, 15, 340, 46237, 11, 1848, 1669, 1532, 8070, 1006, 197, 197, 8070, 4870, 515, 298, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetInformation(t *testing.T) { log.Println("Test GetInformation") res, err := testDevice.GetInformation() if err != nil { t.Error(err) } js := prettyJSON(&res) fmt.Println(js) }
explode_data.jsonl/60353
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 75 }
[ 2830, 3393, 1949, 14873, 1155, 353, 8840, 836, 8, 341, 6725, 12419, 445, 2271, 2126, 14873, 5130, 10202, 11, 1848, 1669, 1273, 6985, 2234, 14873, 741, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, 197, 630, 95636, 1669, 5020, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestTagsForBranchMain(t *testing.T) { testRepo := newTestRepo(t) defer testRepo.cleanup(t) result, err := testRepo.sut.TagsForBranch(git.DefaultBranch) require.Nil(t, err) require.Equal(t, []string{testRepo.firstTagName}, result) }
explode_data.jsonl/14010
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 100 }
[ 2830, 3393, 15930, 2461, 18197, 6202, 1155, 353, 8840, 836, 8, 341, 18185, 25243, 1669, 501, 2271, 25243, 1155, 340, 16867, 1273, 25243, 87689, 1155, 692, 9559, 11, 1848, 1669, 1273, 25243, 514, 332, 73522, 2461, 18197, 3268, 275, 13275, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestStartupMonitorPodConditionController(t *testing.T) { scenarios := []struct { name string initialObjects []runtime.Object initialNodeStatus []operatorv1.NodeStatus previousConditions []operatorv1.OperatorCondition expectedConditions []operatorv1.OperatorCondition }{ { name: "scenario 1: happy path", initialObjects: []runtime.Object{newPod(corev1.PodRunning, "kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal")}, expectedConditions: []operatorv1.OperatorCondition{ {Type: "StartupMonitorPodDegraded", Status: operatorv1.ConditionFalse}, {Type: "StartupMonitorPodContainerExcessiveRestartsDegraded", Status: operatorv1.ConditionFalse}, }, initialNodeStatus: []operatorv1.NodeStatus{{NodeName: "ip-10-0-129-56.ec2.internal", TargetRevision: 2}}, }, { name: "scenario 2: degraded in pending phase", initialObjects: []runtime.Object{ func() *corev1.Pod { p := newPod(corev1.PodPending, "kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal") startTime := metav1.NewTime(time.Now().Add(-3 * time.Minute)) p.Status.StartTime = &startTime p.Status.Reason = "PendingReason" p.Status.Message = "PendingMessage" return p }(), }, initialNodeStatus: []operatorv1.NodeStatus{{NodeName: "ip-10-0-129-56.ec2.internal", TargetRevision: 2}}, expectedConditions: []operatorv1.OperatorCondition{ {Type: "StartupMonitorPodDegraded", Status: operatorv1.ConditionTrue, Reason: "PendingReason", Message: "the pod kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal has been in Pending phase for more than max tolerated time \\(2m0s\\) due to PendingMessage"}, {Type: "StartupMonitorPodContainerExcessiveRestartsDegraded", Status: operatorv1.ConditionFalse}, }, }, { name: "scenario 3: degraded container in pending phase", initialObjects: []runtime.Object{ func() *corev1.Pod { p := newPod(corev1.PodPending, "kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal") startTime := metav1.NewTime(time.Now().Add(-3 * time.Minute)) p.Status.StartTime = &startTime p.Status.Reason = "PendingReason" p.Status.Message = "PendingMessage" p.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "WaitingContainerName", State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerWaitingReason", Message: "ContainerWaitingMessage"}}, }, } return p }(), }, initialNodeStatus: []operatorv1.NodeStatus{{NodeName: "ip-10-0-129-56.ec2.internal", TargetRevision: 2}}, expectedConditions: []operatorv1.OperatorCondition{ {Type: "StartupMonitorPodDegraded", Status: operatorv1.ConditionTrue, Reason: "ContainerWaitingReason", Message: "the pod kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal has been in Pending phase for more than max tolerated time \\(2m0s\\) due to PendingMessage\nat least one container WaitingContainerName is waiting since .* due to ContainerWaitingMessage"}, {Type: "StartupMonitorPodContainerExcessiveRestartsDegraded", Status: operatorv1.ConditionFalse}, }, }, { name: "scenario 4: degraded failed container", initialObjects: []runtime.Object{ func() *corev1.Pod { p := newPod(corev1.PodFailed, "kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal") p.Status.Reason = "FailedReason" p.Status.Message = "FailedMessage" p.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "TerminatedContainerName", State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "TerminatedContainerReason", Message: "TerminatedContainerMessage", ExitCode: 255}}, }, } return p }(), }, initialNodeStatus: []operatorv1.NodeStatus{{NodeName: "ip-10-0-129-56.ec2.internal", TargetRevision: 2}}, expectedConditions: []operatorv1.OperatorCondition{ {Type: "StartupMonitorPodDegraded", Status: operatorv1.ConditionTrue, Reason: "TerminatedContainerReason", Message: "at least one container TerminatedContainerName in kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal pod exited with 255 \\(expected nonzero exit code\\), due to TerminatedContainerMessage"}, {Type: "StartupMonitorPodContainerExcessiveRestartsDegraded", Status: operatorv1.ConditionFalse}, }, }, { name: "scenario 5: degraded excessive restarts", initialObjects: []runtime.Object{ func() *corev1.Pod { p := newPod(corev1.PodRunning, "kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal") p.Status.Reason = "FailedReason" p.Status.Message = "FailedMessage" p.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "RestartingContainerName", RestartCount: 4, }, } return p }(), }, expectedConditions: []operatorv1.OperatorCondition{ {Type: "StartupMonitorPodDegraded", Status: operatorv1.ConditionFalse}, {Type: "StartupMonitorPodContainerExcessiveRestartsDegraded", Status: operatorv1.ConditionTrue, Reason: "ExcessiveRestarts", Message: "at least one container RestartingContainerName in kube-apiserver-startup-monitor-ip-10-0-129-56.ec2.internal pod has restarted 4 times, max allowed is 2"}, }, initialNodeStatus: []operatorv1.NodeStatus{{NodeName: "ip-10-0-129-56.ec2.internal", TargetRevision: 2}}, }, } for _, scenario := range scenarios { t.Run(scenario.name, func(t *testing.T) { // test data fakeOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( &operatorv1.StaticPodOperatorSpec{}, &operatorv1.StaticPodOperatorStatus{ NodeStatuses: scenario.initialNodeStatus, OperatorStatus: operatorv1.OperatorStatus{ Conditions: scenario.previousConditions, }}, nil, nil, ) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) for _, obj := range scenario.initialObjects { if err := indexer.Add(obj); err != nil { t.Error(err) } } // act target := &startupMonitorPodConditionController{ podLister: corev1listers.NewPodLister(indexer).Pods("openshift-kube-apiserver"), operatorClient: fakeOperatorClient, targetName: "kube-apiserver", startupMonitorEnabledFn: func() (bool, error) { return true, nil }, } // validate err := target.sync(nil, nil) if err != nil { t.Error(err) } _, actualOperatorStatus, _, err := fakeOperatorClient.GetOperatorState() if err != nil { t.Fatal(err) } if err := areCondidtionsEqual(scenario.expectedConditions, actualOperatorStatus.Conditions); err != nil { t.Error(err) } }) } }
explode_data.jsonl/23280
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2629 }
[ 2830, 3393, 39076, 30098, 23527, 10547, 2051, 1155, 353, 8840, 836, 8, 341, 29928, 60494, 1669, 3056, 1235, 341, 197, 11609, 2290, 914, 198, 197, 85270, 11543, 257, 3056, 22255, 8348, 198, 197, 85270, 1955, 2522, 220, 3056, 7884, 85, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTransportReadHeadResponse(t *testing.T) { ct := newClientTester(t) clientDone := make(chan struct{}) ct.client = func() error { defer close(clientDone) req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) res, err := ct.tr.RoundTrip(req) if err != nil { return err } if res.ContentLength != 123 { return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) } slurp, err := ioutil.ReadAll(res.Body) if err != nil { return fmt.Errorf("ReadAll: %v", err) } if len(slurp) > 0 { return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) } return nil } ct.server = func() error { ct.greet() for { f, err := ct.fr.ReadFrame() if err != nil { t.Logf("ReadFrame: %v", err) return nil } hf, ok := f.(*HeadersFrame) if !ok { continue } var buf bytes.Buffer enc := hpack.NewEncoder(&buf) enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) ct.fr.WriteHeaders(HeadersFrameParam{ StreamID: hf.StreamID, EndHeaders: true, EndStream: false, // as the GFE does BlockFragment: buf.Bytes(), }) ct.fr.WriteData(hf.StreamID, true, nil) <-clientDone return nil } } ct.run() }
explode_data.jsonl/16120
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 589 }
[ 2830, 3393, 27560, 4418, 12346, 2582, 1155, 353, 8840, 836, 8, 341, 89216, 1669, 501, 2959, 58699, 1155, 340, 25291, 17453, 1669, 1281, 35190, 2036, 37790, 89216, 6581, 284, 2915, 368, 1465, 341, 197, 16867, 3265, 12805, 17453, 340, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestMultipleDeferredIndexes_BuildTogether(t *testing.T) { log.Printf("In TestMultipleDeferredIndexes_BuildTogether()") var bucketName = "default" var index1 = "id_company" var index2 = "id_age" var index3 = "id_gender" var index4 = "id_isActive" e := secondaryindex.DropAllSecondaryIndexes(indexManagementAddress) FailTestIfError(e, "Error in DropAllSecondaryIndexes", t) docsToCreate := generateDocs(10000, "users.prod") UpdateKVDocs(docsToCreate, docs) log.Printf("Setting JSON docs in KV") kvutility.SetKeyValues(docsToCreate, "default", "", clusterconfig.KVAddress) err := secondaryindex.CreateSecondaryIndex(index1, bucketName, indexManagementAddress, "", []string{"company"}, false, nil, true, defaultIndexActiveTimeout, nil) FailTestIfError(err, "Error in creating the index", t) time.Sleep(1 * time.Second) err = secondaryindex.CreateSecondaryIndexAsync(index2, bucketName, indexManagementAddress, "", []string{"age"}, false, []byte("{\"defer_build\": true}"), true, nil) FailTestIfError(err, "Error in creating the index", t) err = secondaryindex.CreateSecondaryIndexAsync(index3, bucketName, indexManagementAddress, "", []string{"gender"}, false, []byte("{\"defer_build\": true}"), true, nil) FailTestIfError(err, "Error in creating the index", t) err = secondaryindex.CreateSecondaryIndexAsync(index4, bucketName, indexManagementAddress, "", []string{"isActive"}, false, []byte("{\"defer_build\": true}"), true, nil) FailTestIfError(err, "Error in creating the index", t) err = secondaryindex.BuildIndexes([]string{index2, index3, index4}, bucketName, indexManagementAddress, defaultIndexActiveTimeout) FailTestIfError(err, "Error in deferred index build", t) docScanResults := datautility.ExpectedScanResponse_int64(docs, "age", 30, 50, 1) scanResults, err := secondaryindex.Range(index2, bucketName, indexScanAddress, []interface{}{30}, []interface{}{50}, 1, false, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) docsToCreate = generateDocs(10000, "users.prod") UpdateKVDocs(docsToCreate, docs) log.Printf("Setting JSON docs in KV") kvutility.SetKeyValues(docsToCreate, "default", "", clusterconfig.KVAddress) docScanResults = datautility.ExpectedScanResponse_string(docs, "gender", "female", "female", 3) scanResults, err = secondaryindex.Range(index3, bucketName, indexScanAddress, []interface{}{"female"}, []interface{}{"female"}, 3, false, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) docScanResults = datautility.ExpectedScanResponse_bool(docs, "isActive", true, 3) scanResults, err = secondaryindex.Range(index4, bucketName, indexScanAddress, []interface{}{true}, []interface{}{true}, 3, false, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) }
explode_data.jsonl/59134
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 993 }
[ 2830, 3393, 32089, 88417, 62229, 96686, 80987, 1155, 353, 8840, 836, 8, 341, 6725, 19367, 445, 641, 3393, 32089, 88417, 62229, 96686, 80987, 368, 5130, 2405, 15621, 675, 284, 330, 2258, 698, 2405, 1922, 16, 284, 330, 307, 33403, 698, 24...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExecutePassOnDifferentApis(t *testing.T) { environment := environment.NewEnvironment("dev", "Dev", "", "https://url/to/dev/environment", "DEV") apis := testGetExecuteApis() path := util.ReplacePathSeparators("../../cmd/monaco/test-resources/duplicate-name-test") projects, err := project.LoadProjectsToDeploy("project2", apis, path, util.NewFileReader()) assert.NilError(t, err) errors := execute(environment, projects, true, "", false) for _, err := range errors { assert.NilError(t, err) } }
explode_data.jsonl/80403
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 175 }
[ 2830, 3393, 17174, 12187, 1925, 69123, 91121, 1155, 353, 8840, 836, 8, 341, 197, 23294, 1669, 4573, 7121, 12723, 445, 3583, 497, 330, 14592, 497, 7342, 330, 2428, 1110, 1085, 32429, 35061, 54482, 497, 330, 31819, 5130, 197, 13725, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNameMatcher(t *testing.T) { for i, currCase := range []struct { matcherArgs []string path string want bool }{ // name match includes subdirectories {[]string{"foo"}, "foo/bar/regular", true}, // name match matches on subdirectories {[]string{"foo"}, "bar/foo/inner", true}, // matches if any matcher matches {[]string{"bar", "foo"}, "foo/bar", true}, // full match required {[]string{"foo"}, "fooLongerName/inner", false}, // regexps work {[]string{"foo.*"}, "fooLongerName/inner", true}, // matches occur on name parts only (does not match across directory boundaries). "foo/bar" is checked // against "bar" and "foo", and therefore does not match. {[]string{"foo/bar"}, "foo/bar", false}, } { m := matcher.Name(currCase.matcherArgs...) got := m.Match(currCase.path) assert.Equal(t, currCase.want, got, "Case %d", i) } }
explode_data.jsonl/81366
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 328 }
[ 2830, 3393, 675, 37554, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 9804, 4207, 1669, 2088, 3056, 1235, 341, 197, 2109, 28058, 4117, 3056, 917, 198, 197, 26781, 286, 914, 198, 197, 50780, 286, 1807, 198, 197, 59403, 197, 197, 322, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestOrZeroReturnsNoneOnNil(t *testing.T) { if v := credentials.OrZero(nil); v != credentials.None { t.Errorf("credentials.OrZero(nil) should've returned credentials.None, not %v!", v) } }
explode_data.jsonl/48263
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 2195, 17999, 16446, 4064, 1925, 19064, 1155, 353, 8840, 836, 8, 341, 743, 348, 1669, 16387, 90449, 17999, 27907, 1215, 348, 961, 16387, 18475, 341, 197, 3244, 13080, 445, 32353, 90449, 17999, 27907, 8, 1265, 3003, 5927, 16387,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestHandshakeClientHelloRetryRequest(t *testing.T) { config := testConfig.Clone() config.CurvePreferences = []CurveID{X25519, CurveP256} test := &clientTest{ name: "HelloRetryRequest", args: []string{"-cipher", "ECDHE-RSA-AES128-GCM-SHA256", "-curves", "P-256"}, config: config, } runClientTestTLS13(t, test) }
explode_data.jsonl/27702
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 135 }
[ 2830, 3393, 2314, 29661, 2959, 9707, 51560, 1900, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 1273, 2648, 64463, 741, 25873, 727, 73047, 14306, 284, 3056, 31325, 915, 90, 55, 17, 20, 20, 16, 24, 11, 53677, 47, 17, 20, 21, 630, 18185,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSign(t *testing.T) { s, err := NewSignerFromFile("testdata/ca.pem", "testdata/ca_key.pem", nil) if err != nil { t.Fatal("Failed to produce signer") } // test the empty request _, err = s.Sign(signer.SignRequest{}) if err == nil { t.Fatalf("Empty request failed to produce an error") } // not a csr certPem, err := ioutil.ReadFile("../../helpers/testdata/cert.pem") if err != nil { t.Fatal(err) } // csr with ip as hostname pem, err := ioutil.ReadFile("testdata/ip.csr") if err != nil { t.Fatal(err) } // improper request validReq := signer.SignRequest{Hosts: signer.SplitHosts(testHostName), Request: string(certPem)} _, err = s.Sign(validReq) if err == nil { t.Fatal("A bad case failed to raise an error") } validReq = signer.SignRequest{Hosts: signer.SplitHosts("128.84.126.213"), Request: string(pem)} _, err = s.Sign(validReq) if err != nil { t.Fatal("A bad case failed to raise an error") } pem, err = ioutil.ReadFile("testdata/ex.csr") validReq = signer.SignRequest{ Request: string(pem), Hosts: []string{"example.com"}, } s.Sign(validReq) if err != nil { t.Fatal("Failed to sign") } }
explode_data.jsonl/71275
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 481 }
[ 2830, 3393, 7264, 1155, 353, 8840, 836, 8, 341, 1903, 11, 1848, 1669, 1532, 7264, 261, 43633, 445, 92425, 80591, 49373, 497, 330, 92425, 80591, 3097, 49373, 497, 2092, 340, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 445, 9408, 311, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestMultiStatement(t *testing.T) { dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) { ip, port, err := c.FirstPort() if err != nil { t.Fatal(err) } addr := msConnectionString(ip, port) ms := &SQLServer{} d, err := ms.Open(addr) if err != nil { t.Fatal(err) } defer func() { if err := d.Close(); err != nil { t.Error(err) } }() if err := d.Run(strings.NewReader("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);")); err != nil { t.Fatalf("expected err to be nil, got %v", err) } // make sure second table exists var exists int if err := d.(*SQLServer).conn.QueryRowContext(context.Background(), "SELECT COUNT(1) FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT schema_name()) AND table_catalog = (SELECT db_name())").Scan(&exists); err != nil { t.Fatal(err) } if exists != 1 { t.Fatalf("expected table bar to exist") } }) }
explode_data.jsonl/49644
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 389 }
[ 2830, 3393, 20358, 8636, 1155, 353, 8840, 836, 8, 341, 2698, 74, 8840, 41288, 7957, 2271, 1155, 11, 32247, 11, 2915, 1155, 353, 8840, 836, 11, 272, 40204, 1944, 33672, 1731, 8, 341, 197, 46531, 11, 2635, 11, 1848, 1669, 272, 15926, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestValidators(t *testing.T) { cleanup, _, _, port := InitializeTestLCD(t, 1, []sdk.AccAddress{}, true) defer cleanup() resultVals := getValidatorSets(t, port, -1, false) require.Contains(t, resultVals.Validators[0].Address.String(), "cosmosvalcons") require.Contains(t, resultVals.Validators[0].PubKey, "cosmosvalconspub") getValidatorSets(t, port, 2, false) getValidatorSets(t, port, 10000000, true) }
explode_data.jsonl/25400
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 31748, 1155, 353, 8840, 836, 8, 341, 1444, 60639, 11, 8358, 8358, 2635, 1669, 9008, 2271, 64003, 1155, 11, 220, 16, 11, 3056, 51295, 77538, 4286, 22655, 830, 340, 16867, 21290, 741, 9559, 52452, 1669, 633, 14256, 30175, 1155...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLongestPeriod(t *testing.T) { // Given rates := NewRateSet() require.NoError(t, rates.Add(1*time.Second, 10, 20)) require.NoError(t, rates.Add(7*time.Second, 10, 20)) require.NoError(t, rates.Add(5*time.Second, 11, 21)) clock := testutils.GetClock() // When tbs := NewTokenBucketSet(rates, clock) // Then assert.Equal(t, 7*time.Second, tbs.maxPeriod) }
explode_data.jsonl/25388
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 152 }
[ 2830, 3393, 6583, 477, 23750, 1155, 353, 8840, 836, 8, 341, 197, 322, 16246, 198, 7000, 973, 1669, 1532, 11564, 1649, 741, 17957, 35699, 1155, 11, 7813, 1904, 7, 16, 77053, 32435, 11, 220, 16, 15, 11, 220, 17, 15, 1171, 17957, 35699...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMergeProwConfigEnvironment(t *testing.T) { t.Parallel() o := TestOptions{} o.Setup() o.Kind = prowconfig.Environment o.EnvironmentNamespace = "jx-staging" prowConfig := &config.Config{} prowConfig.LogLevel = "debug" c, err := yaml.Marshal(prowConfig) assert.NoError(t, err) data := make(map[string]string) data[prow.ProwConfigFilename] = string(c) cm := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: prow.ProwConfigMapName, }, Data: data, } _, err = o.KubeClient.CoreV1().ConfigMaps(o.NS).Create(cm) assert.NoError(t, err) err = o.AddProwConfig() assert.NoError(t, err) cm, err = o.KubeClient.CoreV1().ConfigMaps(o.NS).Get(prow.ProwConfigMapName, metav1.GetOptions{}) assert.NoError(t, err) assert.NoError(t, yaml.Unmarshal([]byte(cm.Data[prow.ProwConfigFilename]), &prowConfig)) assert.Equal(t, "debug", prowConfig.LogLevel) assert.NotEmpty(t, prowConfig.Presubmits["test/repo"]) }
explode_data.jsonl/218
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 387 }
[ 2830, 3393, 52096, 47, 651, 2648, 12723, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 22229, 1669, 3393, 3798, 16094, 22229, 39820, 741, 22229, 54199, 284, 47558, 1676, 45651, 198, 22229, 45651, 22699, 284, 330, 73, 87, 5477, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestJsonKeyedObject(t *testing.T) { // Fragile test - relies on output order of json.Marshal tests := map[string]Kvlist{ `{"key1":"val1","key2":"val2"}`: { {Key: []byte(`key1`), Value: []byte(`val1`)}, {Key: []byte(`key2`), Value: []byte(`val2`)}, }, `{"key1":"val1","key2":"val2","oink":"foo","sausage":"bar"}`: { {Key: []byte(`key1`), Value: []byte(`val1`)}, {Key: []byte(`key2`), Value: []byte(`val2`)}, {Key: []byte(`oink`), Value: []byte(`foo`)}, {Key: []byte(`sausage`), Value: []byte(`bar`)}, }, } modifier := jsonKeyedObjectModifier{} for expect, input := range tests { res, err := modifier.modifyKvlist(input) if err != nil { t.Errorf("%v !-> %v, got an error %s", input, expect, err) } if !bytes.Equal(res, []byte(expect)) { t.Errorf("%v !-> %v, got %s", input, expect, res) } } }
explode_data.jsonl/20482
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 394 }
[ 2830, 3393, 5014, 1592, 291, 1190, 1155, 353, 8840, 836, 8, 341, 197, 322, 58089, 457, 1273, 481, 33644, 389, 2550, 1973, 315, 2951, 37271, 198, 78216, 1669, 2415, 14032, 60, 42, 85, 1607, 515, 197, 197, 63, 4913, 792, 16, 3252, 831...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestCountIncrementBy(t *testing.T) { count := getNoiselessCount() count.IncrementBy(4) got := count.Result() const want = 4 if got != want { t.Errorf("IncrementBy: after adding %d got %d, want %d", want, got, want) } }
explode_data.jsonl/57748
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 90 }
[ 2830, 3393, 2507, 38311, 1359, 1155, 353, 8840, 836, 8, 341, 18032, 1669, 633, 61819, 1717, 2507, 741, 18032, 5337, 13477, 1359, 7, 19, 340, 3174, 354, 1669, 1760, 18456, 741, 4777, 1366, 284, 220, 19, 198, 743, 2684, 961, 1366, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRouterDependencies(t *testing.T) { r := NewTestRouter(t) type DB struct { Get func() string } // Datastore is a global dependency, set by value. db := &DB{ Get: func() string { return "Hello, " }, } type Logger struct { Log func(msg string) } // Logger is a contextual instance from the gin request context. captured := "" log := Dependency(GinContextDependency(), func(c *gin.Context) (*Logger, error) { return &Logger{ Log: func(msg string) { captured = fmt.Sprintf("%s [uri:%s]", msg, c.FullPath()) }, }, nil }) r.Resource("/hello", GinContextDependency(), SimpleDependency(db), log, QueryParam("name", "Your name", ""), ).Get("Basic hello world", func(c *gin.Context, db *DB, l *Logger, name string) string { if name == "" { name = c.Request.RemoteAddr } l.Log("Hello logger!") return db.Get() + name }) w := httptest.NewRecorder() req, _ := http.NewRequest(http.MethodGet, "/hello?name=foo", nil) r.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "Hello logger! [uri:/hello]", captured) }
explode_data.jsonl/66541
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 438 }
[ 2830, 3393, 9523, 48303, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 2271, 9523, 1155, 692, 13158, 5952, 2036, 341, 197, 37654, 2915, 368, 914, 198, 197, 630, 197, 322, 2885, 4314, 374, 264, 3644, 24036, 11, 738, 553, 897, 624, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCacheControl(t *testing.T) { rr := httptest.NewRecorder() req := httptest.NewRequest("GET", "/", nil) r := CacheControl(10) h := r(http.HandlerFunc(fakeHandler)) h.ServeHTTP(rr, req) if rr.Header().Get("Cache-Control") != "max-age=10" { t.Fatalf("TestCahceControl: returned cache header doesn't equal expected header. Got %v, expected %v", rr.Header().Get("Cache-Control"), "max-age=10") } }
explode_data.jsonl/44434
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 154 }
[ 2830, 3393, 8233, 3273, 1155, 353, 8840, 836, 8, 341, 197, 634, 1669, 54320, 70334, 7121, 47023, 741, 24395, 1669, 54320, 70334, 75274, 445, 3806, 497, 64657, 2092, 692, 7000, 1669, 19479, 3273, 7, 16, 15, 340, 9598, 1669, 435, 19886, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestService_CreateGetTask(t *testing.T) { ctx := context.Background() s := newService(t) queue := &Queue{ ProjectID: "sinmetal-ci", Region: "asia-northeast1", Name: "gcpboxtest", } taskName, err := s.CreateGetTask(ctx, queue, &GetTask{ Routing: &Routing{ Service: "gcpbox", }, RelativeUri: "/cloudtasks/appengine/get-task", }) if err != nil { t.Fatal(err) } if len(taskName) < 1 { t.Error("task name is empty") } }
explode_data.jsonl/69452
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 204 }
[ 2830, 3393, 1860, 34325, 1949, 6262, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 2822, 1903, 1669, 501, 1860, 1155, 692, 46993, 1669, 609, 7554, 515, 197, 197, 7849, 915, 25, 330, 15940, 54008, 62384, 756, 197, 197, 14091, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_sortColors(t *testing.T) { type args struct { nums []int } tests := []struct { name string args args }{ {"1", args{[]int{1,2,0}}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sortColors(tt.args.nums) }) } }
explode_data.jsonl/28410
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 18435, 13108, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 22431, 82, 3056, 396, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 31215, 2827, 198, 197, 59403, 197, 197, 4913, 16, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIsSpot(t *testing.T) { tests := []struct { name string lifeCycle *string expected bool }{ {name: "LifeCycle is nil", lifeCycle: nil, expected: false, }, {name: "LifeCycle is not nil but not spot", lifeCycle: aws.String("something"), expected: false, }, {name: "LifeCycle is not nil and is spot", lifeCycle: aws.String("spot"), expected: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { i := &instance{Instance: &ec2.Instance{}} i.InstanceLifecycle = tt.lifeCycle retValue := i.isSpot() if retValue != tt.expected { if tt.lifeCycle != nil { t.Errorf("Value received for '%v': %t expected %t", *tt.lifeCycle, retValue, tt.expected) } else { t.Errorf("Value received for '%v': %t expected %t", tt.lifeCycle, retValue, tt.expected) } } }) } }
explode_data.jsonl/55194
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 387 }
[ 2830, 3393, 3872, 47049, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 3056, 1235, 341, 197, 11609, 414, 914, 198, 197, 8810, 1612, 44820, 353, 917, 198, 197, 42400, 220, 1807, 198, 197, 59403, 197, 197, 47006, 25, 330, 25749, 44820, 37...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSlashWithUnbondingDelegation(t *testing.T) { ctx, keeper, params := setupHelper(t, 10) consAddr := sdk.ConsAddress(PKs[0].Address()) fraction := sdk.NewDecWithPrec(5, 1) // set an unbonding delegation ubd := types.UnbondingDelegation{ DelegatorAddr: addrDels[0], ValidatorAddr: addrVals[0], CreationHeight: 11, // expiration timestamp (beyond which the unbonding delegation shouldn't be slashed) MinTime: time.Unix(0, 0), InitialBalance: sdk.NewInt64Coin(params.BondDenom, 4), Balance: sdk.NewInt64Coin(params.BondDenom, 4), } keeper.SetUnbondingDelegation(ctx, ubd) // slash validator for the first time ctx = ctx.WithBlockHeight(12) oldPool := keeper.GetPool(ctx) validator, found := keeper.GetValidatorByConsAddr(ctx, consAddr) require.True(t, found) keeper.Slash(ctx, consAddr, 10, 10, fraction) // end block updates := keeper.ApplyAndReturnValidatorSetUpdates(ctx) require.Equal(t, 1, len(updates)) // read updating unbonding delegation ubd, found = keeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0]) require.True(t, found) // balance decreased require.Equal(t, sdk.NewInt(2), ubd.Balance.Amount) // read updated pool newPool := keeper.GetPool(ctx) // bonded tokens burned require.Equal(t, int64(3), oldPool.BondedTokens.Sub(newPool.BondedTokens).RoundInt64()) // read updated validator validator, found = keeper.GetValidatorByConsAddr(ctx, consAddr) require.True(t, found) // power decreased by 3 - 6 stake originally bonded at the time of infraction // was still bonded at the time of discovery and was slashed by half, 4 stake // bonded at the time of discovery hadn't been bonded at the time of infraction // and wasn't slashed require.Equal(t, sdk.NewDec(7), validator.GetPower()) // slash validator again ctx = ctx.WithBlockHeight(13) keeper.Slash(ctx, consAddr, 9, 10, fraction) ubd, found = keeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0]) require.True(t, found) // balance decreased again require.Equal(t, sdk.NewInt(0), ubd.Balance.Amount) // read updated pool newPool = keeper.GetPool(ctx) // bonded tokens burned again require.Equal(t, int64(6), oldPool.BondedTokens.Sub(newPool.BondedTokens).RoundInt64()) // read updated validator validator, found = keeper.GetValidatorByConsAddr(ctx, consAddr) require.True(t, found) // power decreased by 3 again require.Equal(t, sdk.NewDec(4), validator.GetPower()) // slash validator again // all originally bonded stake has been slashed, so this will have no effect // on the unbonding delegation, but it will slash stake bonded since the infraction // this may not be the desirable behaviour, ref https://github.com/cosmos/cosmos-sdk/issues/1440 ctx = ctx.WithBlockHeight(13) keeper.Slash(ctx, consAddr, 9, 10, fraction) ubd, found = keeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0]) require.True(t, found) // balance unchanged require.Equal(t, sdk.NewInt(0), ubd.Balance.Amount) // read updated pool newPool = keeper.GetPool(ctx) // bonded tokens burned again require.Equal(t, int64(9), oldPool.BondedTokens.Sub(newPool.BondedTokens).RoundInt64()) // read updated validator validator, found = keeper.GetValidatorByConsAddr(ctx, consAddr) require.True(t, found) // power decreased by 3 again require.Equal(t, sdk.NewDec(1), validator.GetPower()) // slash validator again // all originally bonded stake has been slashed, so this will have no effect // on the unbonding delegation, but it will slash stake bonded since the infraction // this may not be the desirable behaviour, ref https://github.com/cosmos/cosmos-sdk/issues/1440 ctx = ctx.WithBlockHeight(13) keeper.Slash(ctx, consAddr, 9, 10, fraction) ubd, found = keeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0]) require.True(t, found) // balance unchanged require.Equal(t, sdk.NewInt(0), ubd.Balance.Amount) // read updated pool newPool = keeper.GetPool(ctx) // just 1 bonded token burned again since that's all the validator now has require.Equal(t, int64(10), oldPool.BondedTokens.Sub(newPool.BondedTokens).RoundInt64()) // apply TM updates keeper.ApplyAndReturnValidatorSetUpdates(ctx) // read updated validator // power decreased by 1 again, validator is out of stake // ergo validator should have been removed from the store _, found = keeper.GetValidatorByConsAddr(ctx, consAddr) require.False(t, found) }
explode_data.jsonl/49547
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1524 }
[ 2830, 3393, 88004, 2354, 1806, 64239, 287, 1912, 87566, 1155, 353, 8840, 836, 8, 341, 20985, 11, 53416, 11, 3628, 1669, 6505, 5511, 1155, 11, 220, 16, 15, 340, 197, 6254, 13986, 1669, 45402, 94594, 4286, 5304, 42, 82, 58, 15, 936, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWatchFromZeroIndex(t *testing.T) { codec := latest.Codec pod := &api.Pod{JSONBase: api.JSONBase{ID: "foo"}} testCases := map[string]struct { Response EtcdResponseWithError ExpectedVersion uint64 ExpectedType watch.EventType }{ "get value created": { EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(codec, pod), CreatedIndex: 1, ModifiedIndex: 1, }, Action: "get", EtcdIndex: 2, }, }, 1, watch.Added, }, "get value modified": { EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(codec, pod), CreatedIndex: 1, ModifiedIndex: 2, }, Action: "get", EtcdIndex: 3, }, }, 2, watch.Modified, }, } for k, testCase := range testCases { fakeClient := NewFakeEtcdClient(t) fakeClient.Data["/some/key"] = testCase.Response h := EtcdHelper{fakeClient, codec, versioner} watching := h.Watch("/some/key", 0) fakeClient.WaitForWatchCompletion() if e, a := testCase.Response.R.EtcdIndex+1, fakeClient.WatchIndex; e != a { t.Errorf("%s: expected watch index to be %d, got %d", k, e, a) } // the existing node is detected and the index set event := <-watching.ResultChan() if e, a := testCase.ExpectedType, event.Type; e != a { t.Errorf("%s: expected %v, got %v", k, e, a) } actualPod, ok := event.Object.(*api.Pod) if !ok { t.Fatalf("%s: expected a pod, got %#v", k, event.Object) } if actualPod.ResourceVersion != testCase.ExpectedVersion { t.Errorf("%s: expected pod with resource version %d, Got %#v", k, testCase.ExpectedVersion, actualPod) } pod.ResourceVersion = testCase.ExpectedVersion if e, a := pod, event.Object; !reflect.DeepEqual(e, a) { t.Errorf("%s: expected %v, got %v", k, e, a) } watching.Stop() } }
explode_data.jsonl/40980
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 861 }
[ 2830, 3393, 14247, 3830, 17999, 1552, 1155, 353, 8840, 836, 8, 341, 43343, 66, 1669, 5535, 20274, 66, 198, 3223, 347, 1669, 609, 2068, 88823, 90, 5370, 3978, 25, 6330, 18009, 3978, 90, 915, 25, 330, 7975, 9207, 630, 18185, 37302, 1669...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestTemp8(t *testing.T) { n := 1000 m := 100000 edges := make([][]int, m) for i := range edges { s := (i+1)/n + 1 t := i % n + 1 c := i edges[i] = []int{s, t, c} } //fmt.Println(edges) temp(n, edges) }
explode_data.jsonl/52286
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 116 }
[ 2830, 3393, 12151, 23, 1155, 353, 8840, 836, 8, 341, 9038, 1669, 220, 16, 15, 15, 15, 198, 2109, 1669, 220, 16, 15, 15, 15, 15, 15, 198, 197, 16900, 1669, 1281, 10556, 1294, 396, 11, 296, 340, 2023, 600, 1669, 2088, 12822, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEndToEnd(t *testing.T) { // 1. setup input data targets := []*testData{ { name: "target1", pages: []mockPrometheusResponse{ {code: 200, data: target1Page1}, {code: 500, data: ""}, {code: 200, data: target1Page2}, }, validateFunc: verifyTarget1, }, { name: "target2", pages: []mockPrometheusResponse{ {code: 200, data: target2Page1}, {code: 200, data: target2Page2}, {code: 500, data: ""}, {code: 200, data: target2Page3}, {code: 200, data: target2Page4}, {code: 500, data: ""}, {code: 200, data: target2Page5}, }, validateFunc: verifyTarget2, }, { name: "target3", pages: []mockPrometheusResponse{ {code: 200, data: target3Page1}, {code: 200, data: target3Page2}, }, validateFunc: verifyTarget3, }, } testEndToEnd(t, targets, false) }
explode_data.jsonl/32319
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 392 }
[ 2830, 3393, 3727, 66573, 1155, 353, 8840, 836, 8, 341, 197, 322, 220, 16, 13, 6505, 1946, 821, 198, 28861, 82, 1669, 29838, 1944, 1043, 515, 197, 197, 515, 298, 11609, 25, 330, 5657, 16, 756, 298, 3223, 1134, 25, 3056, 16712, 35186,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRecursiveRun(t *testing.T) { // Make sure that a recursive call to Run*() correctly sets the environment and no stash or stack // corruptions occur. vm := New() vm.Set("f", func() (Value, error) { return vm.RunString("let x = 1; { let z = 100, z1 = 200, z2 = 300, z3 = 400; } x;") }) res, err := vm.RunString(` function f1() { let x = 2; eval(''); { let y = 3; let res = f(); if (x !== 2) { // check for stash corruption throw new Error("x="+x); } if (y !== 3) { // check for stack corruption throw new Error("y="+y); } return res; } }; f1(); `) if err != nil { t.Fatal(err) } if !res.SameAs(valueInt(1)) { t.Fatal(res) } }
explode_data.jsonl/10460
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 307 }
[ 2830, 3393, 78542, 6727, 1155, 353, 8840, 836, 8, 341, 197, 322, 7405, 2704, 429, 264, 30819, 1618, 311, 6452, 9, 368, 12440, 7289, 279, 4573, 323, 902, 64037, 476, 5611, 198, 197, 322, 1829, 54681, 12170, 624, 54879, 1669, 1532, 741,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClusterDefaultTopologyVersion(t *testing.T) { // NOTE: ClusterTopology feature flag is disabled by default, thus preventing to set Cluster.Topologies. // Enabling the feature flag temporarily for this test. defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() g := NewWithT(t) c := builder.Cluster("fooboo", "cluster1"). WithTopology(builder.ClusterTopology(). WithClass("foo"). WithVersion("1.19.1"). Build()). Build() // Sets up the fakeClient for the test case. This is required because the test uses a Managed Topology. fakeClient := fake.NewClientBuilder(). WithObjects(builder.ClusterClass("fooboo", "foo").Build()). WithScheme(fakeScheme). Build() // Create the webhook and add the fakeClient as its client. webhook := &Cluster{Client: fakeClient} t.Run("for Cluster", customDefaultValidateTest(ctx, c, webhook)) g.Expect(webhook.Default(ctx, c)).To(Succeed()) g.Expect(c.Spec.Topology.Version).To(HavePrefix("v")) }
explode_data.jsonl/82290
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 335 }
[ 2830, 3393, 28678, 3675, 60954, 5637, 1155, 353, 8840, 836, 8, 341, 197, 322, 16743, 25, 35380, 60954, 4565, 5181, 374, 8386, 553, 1638, 11, 8450, 26160, 311, 738, 35380, 17557, 9090, 624, 197, 322, 2925, 18768, 279, 4565, 5181, 27092, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewInvalidShortCode(t *testing.T) { _, err := valueobject.NewShortCode("") assert.NotNil(t, err) _, err2 := valueobject.NewShortCode(" ") assert.NotNil(t, err2) _, err3 := valueobject.NewShortCode("test") assert.NotNil(t, err3) }
explode_data.jsonl/28953
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 100 }
[ 2830, 3393, 3564, 7928, 12472, 2078, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1848, 1669, 897, 1700, 7121, 12472, 2078, 31764, 6948, 93882, 1155, 11, 1848, 692, 197, 6878, 1848, 17, 1669, 897, 1700, 7121, 12472, 2078, 445, 14167, 6948, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTool_UploadBlob(t *testing.T) { e, cleanup := fakes.NewTestEnv(t) defer cleanup() cas := e.Server.CAS tmpFile := path.Join(t.TempDir(), "blob") if err := os.WriteFile(tmpFile, []byte("Hello, World!"), 0777); err != nil { t.Fatalf("Could not create temp blob: %v", err) } dg, err := digest.NewFromFile(tmpFile) if err != nil { t.Fatalf("digest.NewFromFile('%v') failed: %v", tmpFile, err) } toolClient := &Client{GrpcClient: e.Client.GrpcClient} if err := toolClient.UploadBlob(context.Background(), tmpFile); err != nil { t.Fatalf("UploadBlob('%v', '%v') failed: %v", dg.String(), tmpFile, err) } // First request should upload the blob. if cas.BlobWrites(dg) != 1 { t.Fatalf("Expected 1 write for blob '%v', got %v", dg.String(), cas.BlobWrites(dg)) } // Retries should check whether the blob already exists and skip uploading if it does. if err := toolClient.UploadBlob(context.Background(), tmpFile); err != nil { t.Fatalf("UploadBlob('%v', '%v') failed: %v", dg.String(), tmpFile, err) } if cas.BlobWrites(dg) != 1 { t.Fatalf("Expected 1 write for blob '%v', got %v", dg.String(), cas.BlobWrites(dg)) } }
explode_data.jsonl/8282
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 452 }
[ 2830, 3393, 7740, 62, 13844, 37985, 1155, 353, 8840, 836, 8, 341, 7727, 11, 21290, 1669, 282, 2050, 7121, 2271, 14359, 1155, 340, 16867, 21290, 741, 1444, 300, 1669, 384, 22997, 727, 1911, 271, 20082, 1703, 1669, 1815, 22363, 1155, 6500...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestUPCEReader(t *testing.T) { // testdata from zxing core/src/test/resources/blackbox/upce-1/ reader := NewUPCEReader() format := gozxing.BarcodeFormat_UPC_E tests := []struct { file string wants string }{ {"testdata/upce/1.png", "01234565"}, {"testdata/upce/2.png", "00123457"}, {"testdata/upce/4.png", "01234531"}, } for _, test := range tests { testutil.TestFile(t, reader, test.file, test.wants, format, nil, nil) } }
explode_data.jsonl/70697
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 195 }
[ 2830, 3393, 3124, 34, 640, 68, 998, 1155, 353, 8840, 836, 8, 341, 197, 322, 1273, 691, 504, 71322, 287, 6200, 13437, 12697, 38900, 98545, 2011, 71843, 346, 12, 16, 5894, 61477, 1669, 1532, 3124, 34, 640, 68, 998, 741, 59416, 1669, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestConfigurationStatusPropagation(t *testing.T) { svc := &Service{} svc.Status.PropagateConfigurationStatus(ConfigurationStatus{ LatestReadyRevisionName: "foo", LatestCreatedRevisionName: "bar", }) want := ServiceStatus{ LatestReadyRevisionName: "foo", LatestCreatedRevisionName: "bar", } if diff := cmp.Diff(want, svc.Status); diff != "" { t.Errorf("unexpected ServiceStatus (-want +got): %s", diff) } }
explode_data.jsonl/17370
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 152 }
[ 2830, 3393, 7688, 2522, 35172, 1155, 353, 8840, 836, 8, 341, 1903, 7362, 1669, 609, 1860, 16094, 1903, 7362, 10538, 42483, 46836, 7688, 2522, 45443, 2522, 515, 197, 15070, 13893, 19202, 33602, 675, 25, 256, 330, 7975, 756, 197, 15070, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestIssuanceDisclosureEmptyAttributes(t *testing.T) { client, _ := parseStorage(t) defer test.ClearTestStorage(t) req := getNameIssuanceRequest() sessionHelper(t, req, "issue", client) // Test disclosing our null attribute req2 := getDisclosureRequest(irma.NewAttributeTypeIdentifier("irma-demo.MijnOverheid.fullName.prefix")) res := requestorSessionHelper(t, req2, client) require.Nil(t, res.Err) require.Nil(t, res.Disclosed[0][0].RawValue) }
explode_data.jsonl/69993
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 28216, 84, 681, 91065, 3522, 10516, 1155, 353, 8840, 836, 8, 341, 25291, 11, 716, 1669, 4715, 5793, 1155, 340, 16867, 1273, 13524, 2271, 5793, 1155, 692, 24395, 1669, 20299, 28216, 84, 681, 1900, 741, 25054, 5511, 1155, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCRDDeletionCascading(t *testing.T) { ctx := setup(t, 5) defer ctx.tearDown() clientSet, apiExtensionClient, dynamicClient := ctx.clientSet, ctx.apiExtensionClient, ctx.dynamicClient ns := createNamespaceOrDie("crd-mixed", clientSet, t) t.Logf("First pass CRD cascading deletion") definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name) testCRDDeletion(t, ctx, ns, definition, resourceClient) t.Logf("Second pass CRD cascading deletion") accessor := meta.NewAccessor() accessor.SetResourceVersion(definition, "") _, err := apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, dynamicClient) if err != nil { t.Fatalf("failed to create CustomResourceDefinition: %v", err) } testCRDDeletion(t, ctx, ns, definition, resourceClient) }
explode_data.jsonl/18185
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 278 }
[ 2830, 3393, 8973, 35, 1912, 52625, 34, 5061, 2228, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 6505, 1155, 11, 220, 20, 340, 16867, 5635, 31853, 59342, 2822, 25291, 1649, 11, 6330, 12049, 2959, 11, 8741, 2959, 1669, 5635, 6581, 1649, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNewConfigWrapper(t *testing.T) { const nondefaultPath = "/this/is/not/the/default/registries.conf" const variableReference = "$HOME" const rootPrefix = "/root/prefix" tempHome, err := ioutil.TempDir("", "tempHome") require.NoError(t, err) defer os.RemoveAll(tempHome) var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf") userRegistriesFilePath := filepath.Join(tempHome, userRegistriesFile) for _, c := range []struct { sys *types.SystemContext userfilePresent bool expected string }{ // The common case {nil, false, systemRegistriesConfPath}, // There is a context, but it does not override the path. {&types.SystemContext{}, false, systemRegistriesConfPath}, // Path overridden {&types.SystemContext{SystemRegistriesConfPath: nondefaultPath}, false, nondefaultPath}, // Root overridden { &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, false, filepath.Join(rootPrefix, systemRegistriesConfPath), }, // Root and path overrides present simultaneously, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, SystemRegistriesConfPath: nondefaultPath, }, false, nondefaultPath, }, // User registries file overridden {&types.SystemContext{}, true, userRegistriesFilePath}, // Context and user User registries file preset simultaneously {&types.SystemContext{SystemRegistriesConfPath: nondefaultPath}, true, nondefaultPath}, // Root and user registries file overrides present simultaneously, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, SystemRegistriesConfPath: nondefaultPath, }, true, nondefaultPath, }, // No environment expansion happens in the overridden paths {&types.SystemContext{SystemRegistriesConfPath: variableReference}, false, variableReference}, } { if c.userfilePresent { err := os.MkdirAll(filepath.Dir(userRegistriesFilePath), os.ModePerm) require.NoError(t, err) f, err := os.Create(userRegistriesFilePath) require.NoError(t, err) f.Close() } else { os.Remove(userRegistriesFilePath) } path := newConfigWrapperWithHomeDir(c.sys, tempHome).configPath assert.Equal(t, c.expected, path) } }
explode_data.jsonl/62227
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 808 }
[ 2830, 3393, 3564, 2648, 11542, 1155, 353, 8840, 836, 8, 341, 4777, 2477, 2258, 1820, 284, 3521, 574, 46427, 62441, 51257, 28989, 14, 53287, 4019, 13937, 698, 4777, 3890, 8856, 284, 5201, 27546, 698, 4777, 3704, 14335, 284, 3521, 2888, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMatchRatingComparison(t *testing.T) { for _, row := range mrcmp_testdata { res := MatchRatingComparison(row[0], row[1]) expected := (row[2] == "True") if res != expected { t.Errorf("MatchRatingCodex(%q, %q) => %t, expected %t", row[0], row[1], res, expected) } } }
explode_data.jsonl/58062
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 118 }
[ 2830, 3393, 8331, 22152, 33487, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 2802, 1669, 2088, 296, 1287, 1307, 4452, 691, 341, 197, 10202, 1669, 14152, 22152, 33487, 7835, 58, 15, 1125, 2802, 58, 16, 2546, 197, 42400, 1669, 320, 651, 58...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestIndexes(t *testing.T) { var exampleJSON = `{ "vals": [ [1,66,{test: 3}], [4,5,[6]] ], "objectArray":[ {"first": "Dale", "age": 44}, {"first": "Roger", "age": 68}, ] }` testCases := []struct { path string expected []string }{ { `vals.#.1`, []string{`6`, "5"}, }, { `vals.#.2`, []string{"{", "["}, }, { `objectArray.#(age>43)#.first`, []string{`"`, `"`}, }, { `objectArray.@reverse.#.first`, nil, }, } for _, tc := range testCases { r := Get(exampleJSON, tc.path) assert(t, len(r.Indexes) == len(tc.expected)) for i, a := range r.Indexes { assert(t, string(exampleJSON[a]) == tc.expected[i]) } } }
explode_data.jsonl/43487
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 352 }
[ 2830, 3393, 62229, 1155, 353, 8840, 836, 8, 341, 2405, 3110, 5370, 284, 1565, 515, 197, 197, 1, 25596, 788, 2278, 298, 197, 58, 16, 11, 21, 21, 27837, 1944, 25, 220, 18, 64054, 298, 197, 58, 19, 11, 20, 17259, 21, 14288, 197, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRSSyncExpectations(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2, 0) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod") manager.podStore.Indexer.Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{ controller.NewControllerExpectations(), true, func() { // If we check active pods before checking expectataions, the // ReplicaSet will create a new replica because it doesn't see // this pod, but has fulfilled its expectations. manager.podStore.Indexer.Add(&postExpectationsPod) }, }) manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0) }
explode_data.jsonl/10053
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 383 }
[ 2830, 3393, 68980, 1721, 17536, 804, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 2943, 746, 7121, 2461, 2648, 2195, 18175, 2099, 3927, 2972, 10753, 90, 9296, 25, 7342, 8883, 2648, 25, 2732, 2972, 12614, 2648, 90, 2808, 5637, 25, 1273, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAzureFindValidAccessTokenForTenant_NoTokens(t *testing.T) { tokens := make([]cli.Token, 0) token, err := findValidAccessTokenForTenant(tokens, "abc123") if err == nil { t.Fatalf("Expected an error but didn't get one") } if token != nil { t.Fatalf("Expected a null token to be returned but got: %+v", token) } }
explode_data.jsonl/60957
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 124 }
[ 2830, 3393, 78107, 9885, 4088, 37649, 2461, 71252, 36989, 29300, 1155, 353, 8840, 836, 8, 341, 3244, 9713, 1669, 1281, 10556, 19521, 32277, 11, 220, 15, 340, 43947, 11, 1848, 1669, 1477, 4088, 37649, 2461, 71252, 34052, 11, 330, 13683, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestConfigUtil_Values(t *testing.T) { t.Parallel() type config struct { B BoolValue `mapstructure:"bool"` D DurationValue `mapstructure:"duration"` S StringValue `mapstructure:"string"` U UintValue `mapstructure:"uint"` } cases := []struct { in string success string failure string }{ { `{ }`, `"false" "0s" "" "0"`, "", }, { `{ "bool": true, "duration": "2h", "string": "hello", "uint": 23 }`, `"true" "2h0m0s" "hello" "23"`, "", }, { `{ "bool": "nope" }`, "", "got 'string'", }, { `{ "duration": "nope" }`, "", "invalid duration nope", }, { `{ "string": 123 }`, "", "got 'float64'", }, { `{ "uint": -1 }`, "", "value cannot be negative", }, { `{ "uint": 4294967296 }`, "", "value is too large", }, } for i, c := range cases { var raw interface{} dec := json.NewDecoder(bytes.NewBufferString(c.in)) if err := dec.Decode(&raw); err != nil { t.Fatalf("(case %d) err: %v", i, err) } var r config msdec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: ConfigDecodeHook, Result: &r, ErrorUnused: true, }) if err != nil { t.Fatalf("(case %d) err: %v", i, err) } err = msdec.Decode(raw) if c.failure != "" { if err == nil || !strings.Contains(err.Error(), c.failure) { t.Fatalf("(case %d) err: %v", i, err) } continue } if err != nil { t.Fatalf("(case %d) err: %v", i, err) } actual := fmt.Sprintf("%q %q %q %q", r.B.String(), r.D.String(), r.S.String(), r.U.String()) if actual != c.success { t.Fatalf("(case %d) bad: %s", i, actual) } } }
explode_data.jsonl/49063
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 846 }
[ 2830, 3393, 2648, 2742, 62, 6227, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 13158, 2193, 2036, 341, 197, 12791, 12608, 1130, 257, 1565, 2186, 7837, 2974, 2641, 8805, 197, 10957, 21045, 1130, 1565, 2186, 7837, 2974, 17021, 88...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestNewRollupTargetV2ProtoInvalidStoragePoliciesProto(t *testing.T) { proto := &rulepb.RollupTargetV2{ Pipeline: &pipelinepb.Pipeline{ Ops: []pipelinepb.PipelineOp{ { Type: pipelinepb.PipelineOp_TRANSFORMATION, Transformation: &pipelinepb.TransformationOp{ Type: transformationpb.TransformationType_ABSOLUTE, }, }, }, }, StoragePolicies: []*policypb.StoragePolicy{ &policypb.StoragePolicy{ Resolution: policypb.Resolution{Precision: 1234}, Retention: policypb.Retention{Period: 5678}, }, }, } _, err := newRollupTargetFromV2Proto(proto) require.Error(t, err) }
explode_data.jsonl/8366
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 277 }
[ 2830, 3393, 3564, 32355, 454, 6397, 53, 17, 31549, 7928, 5793, 47, 42038, 31549, 1155, 353, 8840, 836, 8, 341, 197, 15110, 1669, 609, 12937, 16650, 71212, 454, 6397, 53, 17, 515, 197, 10025, 8790, 25, 609, 51258, 16650, 1069, 8790, 51...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMaterializerNoDDL(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ Workflow: "workflow", SourceKeyspace: "sourceks", TargetKeyspace: "targetks", TableSettings: []*vtctldatapb.TableMaterializeSettings{{ TargetTable: "t1", SourceExpression: "select * from t1", CreateDdl: "", }}, } env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t1") env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) err := env.wr.Materialize(context.Background(), ms) require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") require.Equal(t, env.tmc.getSchemaRequestCount(100), 0) require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) }
explode_data.jsonl/61874
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 323 }
[ 2830, 3393, 13415, 3135, 2753, 58781, 1155, 353, 8840, 836, 8, 341, 47691, 1669, 609, 9708, 302, 507, 266, 391, 65, 44253, 551, 6086, 515, 197, 197, 62768, 25, 981, 330, 56249, 756, 197, 197, 3608, 8850, 1306, 25, 330, 2427, 2787, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCommandsAreSorted(t *testing.T) { EnableCommandSorting = true originalNames := []string{"middle", "zlast", "afirst"} expectedNames := []string{"afirst", "middle", "zlast"} var tmpCommand = &Command{Use: "tmp"} for _, name := range originalNames { tmpCommand.AddCommand(&Command{Use: name}) } for i, c := range tmpCommand.Commands() { if expectedNames[i] != c.Name() { t.Errorf("expected: %s, got: %s", expectedNames[i], c.Name()) } } EnableCommandSorting = true }
explode_data.jsonl/47431
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 177 }
[ 2830, 3393, 30479, 11526, 51051, 1155, 353, 8840, 836, 8, 341, 197, 11084, 4062, 71681, 284, 830, 271, 197, 9889, 7980, 1669, 3056, 917, 4913, 19656, 497, 330, 89, 4259, 497, 330, 2577, 864, 16707, 42400, 7980, 1669, 3056, 917, 4913, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestBuildFailed(t *testing.T) { folder := testlib.Mktmp(t) writeGoodMain(t, folder) config := config.Project{ Builds: []config.Build{ { ID: "buildid", Flags: []string{"-flag-that-dont-exists-to-force-failure"}, Targets: []string{ runtimeTarget, }, GoBinary: "go", }, }, } ctx := context.New(config) ctx.Git.CurrentTag = "5.6.7" err := Default.Build(ctx, ctx.Config.Builds[0], api.Options{ Target: "darwin_amd64", }) assertContainsError(t, err, `flag provided but not defined: -flag-that-dont-exists-to-force-failure`) require.Empty(t, ctx.Artifacts.List()) }
explode_data.jsonl/54144
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 274 }
[ 2830, 3393, 11066, 9408, 1155, 353, 8840, 836, 8, 341, 1166, 2018, 1669, 1273, 2740, 1321, 74, 5173, 1155, 340, 24945, 15216, 6202, 1155, 11, 8527, 340, 25873, 1669, 2193, 30944, 515, 197, 197, 11066, 82, 25, 3056, 1676, 25212, 515, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNearestIdxForSpan(t *testing.T) { t.Parallel() for i, test := range []struct { length int lower float64 upper float64 value float64 idx int }{ { length: 13, lower: 7, upper: 8.2, value: 6, idx: 0, }, { length: 13, lower: 7, upper: 8.2, value: 10, idx: 12, }, { length: 13, lower: 7, upper: 8.2, value: 7.19, idx: 2, }, { length: 13, lower: 7, upper: 8.2, value: 7.21, idx: 2, }, { length: 13, lower: 7, upper: 8.2, value: 7.2, idx: 2, }, { length: 13, lower: 7, upper: 8.2, value: 7.151, idx: 2, }, { length: 13, lower: 7, upper: 8.2, value: 7.249, idx: 2, }, { length: 4, lower: math.Inf(-1), upper: math.Inf(1), value: math.Copysign(0, -1), idx: 0, }, { length: 5, lower: math.Inf(-1), upper: math.Inf(1), value: 0, idx: 2, }, { length: 5, lower: math.Inf(-1), upper: math.Inf(1), value: math.Inf(-1), idx: 0, }, { length: 5, lower: math.Inf(-1), upper: math.Inf(1), value: math.Inf(1), idx: 3, }, { length: 4, lower: math.Inf(-1), upper: math.Inf(1), value: 0, idx: 2, }, { length: 4, lower: math.Inf(-1), upper: math.Inf(1), value: math.Inf(1), idx: 2, }, { length: 4, lower: math.Inf(-1), upper: math.Inf(1), value: math.Inf(-1), idx: 0, }, { length: 5, lower: math.Inf(1), upper: math.Inf(1), value: 1, idx: 0, }, { length: 5, lower: math.NaN(), upper: math.NaN(), value: 1, idx: 0, }, { length: 5, lower: 0, upper: 1, value: math.NaN(), idx: 0, }, { length: 5, lower: math.NaN(), upper: 1, value: 0, idx: 4, }, { length: 5, lower: math.Inf(-1), upper: 1, value: math.Inf(-1), idx: 0, }, { length: 5, lower: math.Inf(-1), upper: 1, value: 0, idx: 4, }, { length: 5, lower: math.Inf(1), upper: 1, value: math.Inf(1), idx: 0, }, { length: 5, lower: math.Inf(1), upper: 1, value: 0, idx: 4, }, { length: 5, lower: 100, upper: math.Inf(-1), value: math.Inf(-1), idx: 4, }, { length: 5, lower: 100, upper: math.Inf(-1), value: 200, idx: 0, }, { length: 5, lower: 100, upper: math.Inf(1), value: math.Inf(1), idx: 4, }, { length: 5, lower: 100, upper: math.Inf(1), value: 200, idx: 0, }, { length: 5, lower: -1, upper: 2, value: math.Inf(-1), idx: 0, }, { length: 5, lower: -1, upper: 2, value: math.Inf(1), idx: 4, }, { length: 5, lower: 1, upper: -2, value: math.Inf(-1), idx: 4, }, { length: 5, lower: 1, upper: -2, value: math.Inf(1), idx: 0, }, { length: 5, lower: 2, upper: 0, value: 3, idx: 0, }, { length: 5, lower: 2, upper: 0, value: -1, idx: 4, }, } { if idx := NearestIdxForSpan(test.length, test.lower, test.upper, test.value); test.idx != idx { t.Errorf("Case %v mismatch: Want: %v, Got: %v", i, test.idx, idx) } } if !Panics(func() { NearestIdxForSpan(1, 0, 1, 0.5) }) { t.Errorf("Expected panic for short span length") } }
explode_data.jsonl/1233
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2162 }
[ 2830, 3393, 8813, 15432, 11420, 2461, 12485, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 2023, 600, 11, 1273, 1669, 2088, 3056, 1235, 341, 197, 49046, 526, 198, 197, 8810, 1202, 220, 2224, 21, 19, 198, 197, 197, 13574, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUnmarshal_WithEmbeddedStruct(t *testing.T) { type TestEmbStructA struct { A string } testUnmarshal(t, []testcase{ { data: `a = "value"`, expect: &struct { TestEmbStructA A string }{ A: "value", }, }, { data: `a = "value"`, expect: &struct { A string TestEmbStructA }{ A: "value", }, }, }) }
explode_data.jsonl/52959
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 189 }
[ 2830, 3393, 1806, 27121, 62, 2354, 83466, 9422, 1155, 353, 8840, 836, 8, 341, 13158, 3393, 98786, 9422, 32, 2036, 341, 197, 22985, 914, 198, 197, 532, 18185, 1806, 27121, 1155, 11, 3056, 1944, 5638, 515, 197, 197, 515, 298, 8924, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRecordsAddListeners(t *testing.T) { list := NewRecordList() ev := NewEvent("foo") l1 := newMockListener() l2 := newMockListener() assert.Len(t, list.getList(), 0) list.Add(ev, l1) list1 := list.getList() assert.Len(t, list1, 1) assertListenersEqual(t, list1[0], ev, "foo", []Listener{l1}) list.Add(ev, l2) list2 := list.getList() assert.Len(t, list2, 1) assertListenersEqual(t, list2[0], ev, "foo", []Listener{l2, l1}) }
explode_data.jsonl/25272
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 192 }
[ 2830, 3393, 25876, 2212, 31570, 1155, 353, 8840, 836, 8, 341, 14440, 1669, 1532, 6471, 852, 741, 74837, 1669, 1532, 1556, 445, 7975, 1138, 8810, 16, 1669, 501, 11571, 2743, 741, 8810, 17, 1669, 501, 11571, 2743, 741, 6948, 65819, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProcessorExit(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) require.Nil(t, err) tester.MustApplyPatches() // stop the changefeed p.changefeed.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.AdminJobType = model.AdminStop return status, true, nil }) p.changefeed.PatchTaskStatus(ctx.GlobalVars().CaptureInfo.ID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { status.AdminJobType = model.AdminStop return status, true, nil }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) require.True(t, cerror.ErrReactorFinished.Equal(errors.Cause(err))) tester.MustApplyPatches() require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ Error: nil, }) }
explode_data.jsonl/81941
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 336 }
[ 2830, 3393, 22946, 15339, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 272, 7628, 1972, 7121, 29699, 1972, 19, 2271, 3715, 340, 3223, 11, 37111, 1669, 2930, 22946, 19, 2271, 7502, 11, 259, 340, 2405, 1848, 1465, 198, 197, 322, 2930, 934...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIsDir(t *testing.T) { err := Mkdir(dirRoot, 0755) if err != nil { panic(err) } fileName := dirRoot + "/test.txt" err = Touch(fileName) if err != nil { panic(err) } t.Cleanup(func() { _ = RemoveWithRecur(dirRoot) }) if !Exists(dirRoot) { t.Error("IsDir test failed!") } if IsDir(dirRoot) != true { t.Error("IsDir test failed!") } if IsDir(fileName) == true { t.Error("IsDir test failed!") } }
explode_data.jsonl/34183
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 192 }
[ 2830, 3393, 3872, 6184, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 386, 12438, 14161, 8439, 11, 220, 15, 22, 20, 20, 340, 743, 1848, 961, 2092, 341, 197, 30764, 3964, 340, 197, 532, 17661, 675, 1669, 5419, 8439, 488, 3521, 1944, 3909...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClient_Base(t *testing.T) { client, err := New("https://dev.azure.com", "org", "proj") if err != nil { t.Error(err) } got, want := client.BaseURL.String(), "https://dev.azure.com/" if got != want { t.Errorf("Want Client URL %q, got %q", want, got) } }
explode_data.jsonl/82298
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 113 }
[ 2830, 3393, 2959, 33982, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 1532, 445, 2428, 1110, 3583, 70240, 905, 497, 330, 1775, 497, 330, 30386, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 3964, 340, 197, 532, 3174, 354, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestValidatePath(t *testing.T) { testCases := map[string]struct { input interface{} want error }{ "not a string": { input: 123, want: errValueNotAString, }, "empty string": { input: "", want: errValueEmpty, }, "invalid path": { input: "../Dockerfile", want: errValueNotAValidPath, }, "returns nil if valid absolute path": { input: "frontend/Dockerfile", want: nil, }, "returns nil if valid relative path": { input: "frontend/../backend/Dockerfile", want: nil, }, } for path, tc := range testCases { t.Run(path, func(t *testing.T) { // GIVEN fs := &afero.Afero{Fs: afero.NewMemMapFs()} fs.MkdirAll("frontend", 0755) fs.MkdirAll("backend", 0755) afero.WriteFile(fs, "frontend/Dockerfile", []byte("FROM nginx"), 0644) afero.WriteFile(fs, "backend/Dockerfile", []byte("FROM nginx"), 0644) // WHEN got := validatePath(fs, tc.input) // THEN if tc.want == nil { require.Nil(t, got) } else { require.EqualError(t, tc.want, got.Error()) } }) } }
explode_data.jsonl/34547
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 486 }
[ 2830, 3393, 17926, 1820, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 22427, 3749, 16094, 197, 50780, 220, 1465, 198, 197, 59403, 197, 197, 1, 1921, 264, 914, 788, 341, 298, 22427, 25, 220, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTxIndexByHash(t *testing.T) { for i := 0; i < 20; i++ { txs := makeTxs(15, 60) for j := 0; j < len(txs); j++ { tx := txs[j] idx := txs.IndexByHash(tx.Hash()) assert.Equal(t, j, idx) } assert.Equal(t, -1, txs.IndexByHash(nil)) assert.Equal(t, -1, txs.IndexByHash(Tx("foodnwkf").Hash())) } }
explode_data.jsonl/57149
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 166 }
[ 2830, 3393, 31584, 1552, 1359, 6370, 1155, 353, 8840, 836, 8, 341, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 17, 15, 26, 600, 1027, 341, 197, 3244, 18561, 1669, 1281, 51, 18561, 7, 16, 20, 11, 220, 21, 15, 340, 197, 2023, 502...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFieldSetFloatValue(t *testing.T) { recordBuf := make([]byte, 20) f := newField("NAME", "N", 8, 2) f.Offset = 5 f.setFloatValue(recordBuf, 123.45) require.Equal(t, []byte(" 123.45"), recordBuf[5:13]) }
explode_data.jsonl/79459
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 93 }
[ 2830, 3393, 1877, 1649, 5442, 1130, 1155, 353, 8840, 836, 8, 341, 71952, 15064, 1669, 1281, 10556, 3782, 11, 220, 17, 15, 340, 1166, 1669, 501, 1877, 445, 7535, 497, 330, 45, 497, 220, 23, 11, 220, 17, 340, 1166, 61958, 284, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAutoNonExistentZone(t *testing.T) { t.Parallel() tmpdir, err := ioutil.TempDir(os.TempDir(), "coredns") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) corefile := `.:0 { auto { directory ` + tmpdir + ` (.*) {1} reload 0.01s } errors stdout }` i, err := CoreDNSServer(corefile) if err != nil { t.Fatalf("Could not get CoreDNS serving instance: %s", err) } udp, _ := CoreDNSServerPorts(i, 0) if udp == "" { t.Fatal("Could not get UDP listening port") } defer i.Stop() m := new(dns.Msg) m.SetQuestion("example.org.", dns.TypeA) resp, err := dns.Exchange(m, udp) if err != nil { t.Fatal("Expected to receive reply, but didn't") } if resp.Rcode != dns.RcodeServerFailure { t.Fatalf("Expected reply to be a SERVFAIL, got %d", resp.Rcode) } }
explode_data.jsonl/10917
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 352 }
[ 2830, 3393, 13253, 8121, 840, 18128, 15363, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 20082, 3741, 11, 1848, 1669, 43144, 65009, 6184, 9638, 65009, 6184, 1507, 330, 2153, 45226, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestMySQLClusterService_Update(t *testing.T) { asst := assert.New(t) entity, err := createMySQLCluster() asst.Nil(err, common.CombineMessageWithError("test Update() failed", err)) s := initNewMySQLService() err = s.Update(entity.Identity(), map[string]interface{}{clusterNameStruct: testUpdateClusterName}) asst.Nil(err, common.CombineMessageWithError("test Update() failed", err)) err = s.GetByID(entity.Identity()) asst.Nil(err, common.CombineMessageWithError("test Update() failed", err)) mysqlClusterName := s.GetMySQLClusters()[constant.ZeroInt].GetClusterName() asst.Equal(testUpdateClusterName, mysqlClusterName) // delete err = deleteMySQLClusterByID(s.MySQLClusters[0].Identity()) asst.Nil(err, common.CombineMessageWithError("test Update() failed", err)) }
explode_data.jsonl/6157
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 276 }
[ 2830, 3393, 59224, 28678, 1860, 47393, 1155, 353, 8840, 836, 8, 341, 60451, 267, 1669, 2060, 7121, 1155, 692, 52987, 11, 1848, 1669, 1855, 59224, 28678, 741, 60451, 267, 59678, 3964, 11, 4185, 31124, 2052, 66102, 445, 1944, 5549, 368, 4...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGenesisState_Validate(t *testing.T) { for _, tc := range []struct { desc string genState *types.GenesisState valid bool }{ { desc: "default is valid", genState: types.DefaultGenesis(), valid: true, }, { desc: "valid genesis state", genState: &types.GenesisState{ // this line is used by starport scaffolding # types/genesis/validField }, valid: true, }, // this line is used by starport scaffolding # types/genesis/testcase } { t.Run(tc.desc, func(t *testing.T) { err := tc.genState.Validate() if tc.valid { require.NoError(t, err) } else { require.Error(t, err) } }) } }
explode_data.jsonl/1545
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 294 }
[ 2830, 3393, 84652, 1397, 62, 17926, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17130, 1669, 2088, 3056, 1235, 341, 197, 41653, 257, 914, 198, 197, 82281, 1397, 353, 9242, 65384, 13774, 1397, 198, 197, 56322, 262, 1807, 198, 197, 59403, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPanickingCatchAll(t *testing.T) { defer func() { if recover() != nil { t.Error("panics should not fly thru") } }() panickingCatchAll() }
explode_data.jsonl/23197
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 64 }
[ 2830, 3393, 35693, 16272, 57760, 2403, 1155, 353, 8840, 836, 8, 341, 16867, 2915, 368, 341, 197, 743, 11731, 368, 961, 2092, 341, 298, 3244, 6141, 445, 848, 1211, 1265, 537, 11466, 40078, 1138, 197, 197, 532, 197, 69826, 197, 848, 162...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestIAMKeySource(t *testing.T) { tests := []struct { name string givenIAMErr bool wantErr bool }{ { name: "normal success", }, { name: "iam error", givenIAMErr: true, wantErr: true, }, } for _, test := range tests { const tokenValue = "iam-signed-jwt" t.Run(test.name, func(t *testing.T) { iamSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { t.Log(r.URL.Path) if strings.Contains(r.URL.Path, "keys") { json.NewEncoder(w).Encode(iam.ListServiceAccountKeysResponse{ Keys: []*iam.ServiceAccountKey{ {Name: "8289d54280b76712de41cd2ef95972b123be9ac0"}, }, }) } else { json.NewEncoder(w).Encode(iam.ServiceAccountKey{ PublicKeyData: pubKey, }) } })) if test.givenIAMErr { iamSvr.Close() } else { defer iamSvr.Close() } defaultTokenSource = func(ctx context.Context, scopes ...string) (oauth2.TokenSource, error) { return nil, nil } defer func() { defaultTokenSource = google.DefaultTokenSource }() cfg := IAMConfig{ IAMAddress: iamSvr.URL, } ctx := context.Background() hc := func(_ context.Context) *http.Client { return http.DefaultClient } src, err := NewIAMPublicKeySource(ctx, cfg, hc) if (err != nil) != test.wantErr { t.Errorf("expected error? %t but got %s", test.wantErr, err) } if src == nil { return } got, err := src.Get(ctx) if (err != nil) != test.wantErr { t.Errorf("expected error? %t but got %s", test.wantErr, err) } if len(got.Keys) == 0 { t.Errorf("expected keys to be generated but got none") } }) } }
explode_data.jsonl/44401
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 796 }
[ 2830, 3393, 73707, 1592, 3608, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 3056, 1235, 341, 197, 11609, 286, 914, 198, 197, 3174, 2071, 40, 2729, 634, 1807, 271, 197, 50780, 7747, 1807, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetServiceKeyByGuid(t *testing.T) { Convey("Get service key by guid", t, func() { setup(MockRoute{"GET", "/v2/service_keys", getServiceKeyPayload, "", 200, "q=service_instance_guid:ecf26687-e176-4784-b181-b3c942fecb62", nil}, t) defer teardown() c := &Config{ ApiAddress: server.URL, Token: "foobar", } client, err := NewClient(c) So(err, ShouldBeNil) serviceKey, err := client.GetServiceKeyByInstanceGuid("ecf26687-e176-4784-b181-b3c942fecb62") So(err, ShouldBeNil) So(serviceKey, ShouldNotBeNil) So(serviceKey.Name, ShouldEqual, "test01_key") So(serviceKey.ServiceInstanceGuid, ShouldEqual, "ecf26687-e176-4784-b181-b3c942fecb62") So(serviceKey.Credentials, ShouldNotEqual, nil) So(serviceKey.ServiceInstanceUrl, ShouldEqual, "/v2/service_instances/fcf26687-e176-4784-b181-b3c942fecb62") }) }
explode_data.jsonl/66020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 374 }
[ 2830, 3393, 1949, 1860, 1592, 1359, 16686, 1155, 353, 8840, 836, 8, 341, 93070, 5617, 445, 1949, 2473, 1376, 553, 9643, 497, 259, 11, 2915, 368, 341, 197, 84571, 66436, 4899, 4913, 3806, 497, 3521, 85, 17, 34186, 12631, 497, 85869, 15...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateGlobalConfiguration(t *testing.T) { globalConfiguration := v1alpha1.GlobalConfiguration{ Spec: v1alpha1.GlobalConfigurationSpec{ Listeners: []v1alpha1.Listener{ { Name: "tcp-listener", Port: 53, Protocol: "TCP", }, { Name: "udp-listener", Port: 53, Protocol: "UDP", }, }, }, } gcv := createGlobalConfigurationValidator() err := gcv.ValidateGlobalConfiguration(&globalConfiguration) if err != nil { t.Errorf("ValidateGlobalConfiguration() returned error %v for valid input", err) } }
explode_data.jsonl/11993
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 245 }
[ 2830, 3393, 17926, 11646, 7688, 1155, 353, 8840, 836, 8, 341, 18842, 7688, 1669, 348, 16, 7141, 16, 27381, 7688, 515, 197, 7568, 992, 25, 348, 16, 7141, 16, 27381, 7688, 8327, 515, 298, 10675, 18223, 25, 3056, 85, 16, 7141, 16, 6409...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestParser_ParseGeneralApiInfoExtensions(t *testing.T) { // should be return an error because extension value is not a valid json func() { expected := "annotation @x-google-endpoints need a valid json value" gopath := os.Getenv("GOPATH") assert.NotNil(t, gopath) p := New() err := p.ParseGeneralAPIInfo("testdata/extensionsFail1.go") if assert.Error(t, err) { assert.Equal(t, expected, err.Error()) } }() // should be return an error because extension don't have a value func() { expected := "annotation @x-google-endpoints need a value" gopath := os.Getenv("GOPATH") assert.NotNil(t, gopath) p := New() err := p.ParseGeneralAPIInfo("testdata/extensionsFail2.go") if assert.Error(t, err) { assert.Equal(t, expected, err.Error()) } }() }
explode_data.jsonl/63546
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 295 }
[ 2830, 3393, 6570, 77337, 15415, 6563, 1731, 31282, 1155, 353, 8840, 836, 8, 341, 197, 322, 1265, 387, 470, 458, 1465, 1576, 8894, 897, 374, 537, 264, 2697, 2951, 198, 29244, 368, 341, 197, 42400, 1669, 330, 24674, 569, 87, 62645, 1306...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSetExternalID(t *testing.T) { tests := []struct { descrip string ID string isSet bool }{ { "sets the external ID if not empty", "should-be-set", true, }, { "external ID not set if empty", "", false, }, } for _, l := range tests { test := l t.Run(test.descrip, func(t *testing.T) { f := setExternalID(test.ID) p := &stscreds.AssumeRoleProvider{} f(p) if test.isSet { if *p.ExternalID != test.ID { t.Fail() } } }) } }
explode_data.jsonl/18781
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 254 }
[ 2830, 3393, 1649, 25913, 915, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 52912, 740, 79, 914, 198, 197, 29580, 414, 914, 198, 197, 19907, 1649, 256, 1807, 198, 197, 59403, 197, 197, 515, 298, 197, 1, 4917, 279,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSocketWriter_tcp(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) sw := newSocketWriter() sw.Address = "tcp://" + listener.Addr().String() err = sw.Connect() require.NoError(t, err) lconn, err := listener.Accept() require.NoError(t, err) testSocketWriter_stream(t, sw, lconn) }
explode_data.jsonl/34017
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 136 }
[ 2830, 3393, 10286, 6492, 45562, 1155, 353, 8840, 836, 8, 341, 14440, 798, 11, 1848, 1669, 4179, 68334, 445, 27161, 497, 330, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, 15, 1138, 17957, 35699, 1155, 11, 1848, 692, 77295, 1669, 501, 10...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMuxRegexp2(t *testing.T) { r := NewRouter() r.Get("/foo-{suffix:[a-z]{2,3}}.json", HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte(URLParam(rc, "suffix"))) })) ts := NewTestServer(r) defer ts.Close() if _, body := testRequest(t, ts, "GET", "/foo-.json", nil); body != "" { t.Fatalf(body) } if _, body := testRequest(t, ts, "GET", "/foo-abc.json", nil); body != "abc" { t.Fatalf(body) } }
explode_data.jsonl/47965
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 202 }
[ 2830, 3393, 44, 2200, 3477, 4580, 17, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 9523, 741, 7000, 2234, 4283, 7975, 63347, 26786, 7259, 64, 9141, 15370, 17, 11, 18, 3417, 13, 2236, 497, 19954, 9626, 18552, 7502, 2266, 9328, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error) go func() { _, err := io.Copy(ioutil.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } }
explode_data.jsonl/81978
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 234 }
[ 2830, 3393, 15613, 3027, 34253, 22748, 615, 1155, 353, 8840, 836, 8, 341, 25920, 1669, 3883, 12714, 445, 927, 497, 6523, 66, 497, 330, 631, 421, 23286, 3583, 14, 14154, 17065, 28, 16, 74, 1760, 28, 16, 15, 15, 15, 315, 23286, 3583, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_RepoCreate(t *testing.T) { http := &httpmock.Registry{} client := NewClient(ReplaceTripper(http)) http.StubResponse(200, bytes.NewBufferString(`{}`)) input := RepoCreateInput{ Description: "roasted chesnuts", HomepageURL: "http://example.com", } _, err := RepoCreate(client, input) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(http.Requests) != 1 { t.Fatalf("expected 1 HTTP request, seen %d", len(http.Requests)) } var reqBody struct { Query string Variables struct { Input map[string]interface{} } } bodyBytes, _ := ioutil.ReadAll(http.Requests[0].Body) _ = json.Unmarshal(bodyBytes, &reqBody) if description := reqBody.Variables.Input["description"].(string); description != "roasted chesnuts" { t.Errorf("expected description to be %q, got %q", "roasted chesnuts", description) } if homepage := reqBody.Variables.Input["homepageUrl"].(string); homepage != "http://example.com" { t.Errorf("expected homepageUrl to be %q, got %q", "http://example.com", homepage) } }
explode_data.jsonl/74332
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 389 }
[ 2830, 3393, 62, 25243, 4021, 1155, 353, 8840, 836, 8, 341, 28080, 1669, 609, 1254, 16712, 89142, 16094, 25291, 1669, 1532, 2959, 7, 23107, 21884, 6922, 19886, 4390, 28080, 7758, 392, 2582, 7, 17, 15, 15, 11, 5820, 7121, 4095, 703, 580...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestRenderWriter(t *testing.T) { bufBytes := bytes.Buffer{} bufWriter := bufio.NewWriter(&bufBytes) writer := renderWriter{} assert := assert.New(t) // WriteString data := "foobar" writer.WriteString(bufWriter, data) assert.NoError(bufWriter.Flush()) assert.Equal(bufBytes.String(), data) assert.Equal(writer.lastWrittenByte, data[len(data)-1]) assert.NoError(writer.err) bufBytes.Reset() // Write data2 := []byte("raboof") writer.Write(bufWriter, data2) assert.NoError(bufWriter.Flush()) assert.Equal(bufBytes.Bytes(), data2) assert.Equal(writer.lastWrittenByte, data2[len(data2)-1]) assert.NoError(writer.err) // Write with error errString := "test error" errWriter := errorBuf{lastError: fmt.Errorf(errString), numWritten: 1} data3 := "zxyq" writer.WriteString(&errWriter, data3) assert.EqualError(writer.err, errString) assert.Equal(string(writer.lastWrittenByte), string(data3[errWriter.numWritten-1])) bufBytes.Reset() // Further writes are no-ops writer.WriteString(bufWriter, data) writer.Write(bufWriter, data2) assert.NoError(bufWriter.Flush()) assert.EqualError(writer.err, errString) assert.Equal(bufBytes.Bytes(), []byte{}) assert.Equal(string(writer.lastWrittenByte), string(data3[errWriter.numWritten-1])) // Reset clears error state writer.Reset() assert.NoError(writer.err) assert.Equal(writer.lastWrittenByte, byte(0)) }
explode_data.jsonl/50822
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 500 }
[ 2830, 3393, 6750, 6492, 1155, 353, 8840, 836, 8, 341, 26398, 7078, 1669, 5820, 22622, 16094, 26398, 6492, 1669, 96917, 7121, 6492, 2099, 5909, 7078, 340, 38959, 1669, 3141, 6492, 16094, 6948, 1669, 2060, 7121, 1155, 692, 197, 322, 9645, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHeaderCompare(t *testing.T) { //t.SkipNow() h := Header{"A": {"foo"}, "B": {"bar"}} hh := http.Header{"A": {"foo", "bar"}, "C": {"helloworld"}, "B": {"bar"}} if err := h.Compare(hh); err != nil { t.Errorf("got err %v, want <nil>", err) } h = Header{"X": {"foo"}, "B": {"baz"}} want := []string{ fmt.Sprintf(`Header["X"] got = %s""%s, want = %s"foo"%s`, RedColor, StopColor, RedColor, StopColor), fmt.Sprintf(`Header["B"] got = %s"bar"%s, want = %s"baz"%s`, RedColor, StopColor, RedColor, StopColor), } if err := h.Compare(hh); err != nil { for _, w := range want { if !strings.Contains(err.Error(), w) { t.Errorf("error got %v, should contain %q", err, w) } } } else { t.Error("got err <nil>, want err") } }
explode_data.jsonl/776
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 330 }
[ 2830, 3393, 4047, 27374, 1155, 353, 8840, 836, 8, 341, 197, 322, 83, 57776, 7039, 741, 9598, 1669, 12104, 4913, 32, 788, 5212, 7975, 14345, 330, 33, 788, 5212, 2257, 95642, 9598, 71, 1669, 1758, 15753, 4913, 32, 788, 5212, 7975, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestGetDNSinfo(t *testing.T) { d := setup() dn, err := d.GetDNSinfo() notErr(t, err) assert(t, dn.PriIPv4, "44.147.45.53") assert(t, dn.SecIPv4, "44.147.45.28") assert(t, dn.Suffix, "home.gan") }
explode_data.jsonl/21120
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 109 }
[ 2830, 3393, 1949, 61088, 2733, 1155, 353, 8840, 836, 8, 341, 2698, 1669, 6505, 741, 2698, 77, 11, 1848, 1669, 294, 2234, 61088, 2733, 741, 97266, 7747, 1155, 11, 1848, 340, 6948, 1155, 11, 47488, 1069, 461, 58056, 19, 11, 330, 19, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFuseSidecarEnabled(t *testing.T) { type testCase struct { name string annotations map[string]string expect bool } testcases := []testCase{ { name: "enable_fuse", annotations: map[string]string{ common.InjectFuseSidecar: "true", }, expect: true, }, { name: "disable_fuse", annotations: map[string]string{ common.InjectFuseSidecar: "false", }, expect: false, }, { name: "no_fuse", annotations: map[string]string{ "test": "false", }, expect: false, }, } for _, testcase := range testcases { got := FuseSidecarEnabled(testcase.annotations) if got != testcase.expect { t.Errorf("The testcase %s's failed due to expect %v but got %v", testcase.name, testcase.expect, got) } } }
explode_data.jsonl/35573
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 339 }
[ 2830, 3393, 48600, 16384, 6918, 5462, 1155, 353, 8840, 836, 8, 341, 13158, 54452, 2036, 341, 197, 11609, 286, 914, 198, 197, 197, 39626, 2415, 14032, 30953, 198, 197, 24952, 414, 1807, 198, 197, 630, 18185, 23910, 1669, 3056, 66194, 515...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCreateBucket_AnotherIndexBuilding(t *testing.T) { log.Printf("In TestCreateBucket_AnotherIndexBuilding()") e := secondaryindex.DropAllSecondaryIndexes(indexManagementAddress) FailTestIfError(e, "Error in DropAllSecondaryIndexes", t) index1 := "buck1_idx" index2 := "buck2_idx" bucket1 := "default" bucket2 := "multi_buck2" kvutility.FlushBucket(bucket1, "", clusterconfig.Username, clusterconfig.Password, kvaddress) kvutility.EditBucket(bucket1, "", clusterconfig.Username, clusterconfig.Password, kvaddress, "256") kvutility.DeleteBucket(bucket2, "", clusterconfig.Username, clusterconfig.Password, kvaddress) secondaryindex.RemoveClientForBucket(kvaddress, bucket2) tc.ClearMap(docs) time.Sleep(bucketOpWaitDur * time.Second) log.Printf("Setting JSON docs in KV") bucket1docs := generateDocs(200000, "test.prod") bucket2docs := generateDocs(10000, "test.prod") kvutility.SetKeyValues(bucket1docs, bucket1, "", clusterconfig.KVAddress) err := secondaryindex.CreateSecondaryIndexAsync(index1, bucket1, indexManagementAddress, "", []string{"company"}, false, nil, true, nil) FailTestIfError(err, "Error in creating the index1", t) kvutility.CreateBucket(bucket2, "sasl", "", clusterconfig.Username, clusterconfig.Password, kvaddress, "256", "11213") time.Sleep(bucketOpWaitDur * time.Second) kvutility.SetKeyValues(bucket2docs, bucket2, "", clusterconfig.KVAddress) err = secondaryindex.CreateSecondaryIndexAsync(index2, bucket2, indexManagementAddress, "", []string{"age"}, false, nil, true, nil) FailTestIfError(err, "Error in creating the index1", t) client, err := secondaryindex.GetOrCreateClient(indexManagementAddress, "test4client") FailTestIfError(err, "Error while creating client", t) defn1, _ := secondaryindex.GetDefnID(client, bucket1, index1) defn2, _ := secondaryindex.GetDefnID(client, bucket2, index2) e = secondaryindex.WaitTillIndexActive(defn1, client, defaultIndexActiveTimeout) if e != nil { FailTestIfError(e, "Error in WaitTillIndexActive for index1", t) } e = secondaryindex.WaitTillIndexActive(defn2, client, defaultIndexActiveTimeout) if e != nil { FailTestIfError(e, "Error in WaitTillIndexActive for index2", t) } docScanResults := datautility.ExpectedScanAllResponse(bucket2docs, "age") scanResults, err := secondaryindex.ScanAll(index2, bucket2, indexScanAddress, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan index2", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) log.Printf("Number of docScanResults and scanResults = %v and %v", len(docScanResults), len(scanResults)) docScanResults = datautility.ExpectedScanAllResponse(bucket1docs, "company") scanResults, err = secondaryindex.ScanAll(index1, bucket1, indexScanAddress, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan index1", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) log.Printf("Number of docScanResults and scanResults = %v and %v", len(docScanResults), len(scanResults)) kvutility.DeleteBucket(bucket2, "", clusterconfig.Username, clusterconfig.Password, kvaddress) secondaryindex.RemoveClientForBucket(kvaddress, bucket2) kvutility.FlushBucket(bucket1, "", clusterconfig.Username, clusterconfig.Password, kvaddress) tc.ClearMap(docs) }
explode_data.jsonl/59146
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1130 }
[ 2830, 3393, 4021, 36018, 32699, 1575, 1552, 30133, 1155, 353, 8840, 836, 8, 341, 6725, 19367, 445, 641, 3393, 4021, 36018, 32699, 1575, 1552, 30133, 368, 5130, 7727, 1669, 14246, 1252, 21688, 2403, 48963, 62229, 7195, 22237, 4286, 340, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestChangeStream_resumableError(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } skipIfBelow36(t) if os.Getenv("TOPOLOGY") != "replica_set" { t.Skip() } coll := createTestCollection(t, nil, nil) // Ensure the database is created. _, err := coll.InsertOne(context.Background(), bson.NewDocument(bson.EC.Int32("y", 1))) require.NoError(t, err) changes, err := coll.Watch(context.Background(), nil) require.NoError(t, err) // Create a context that will expire before the operation can finish. ctx, cancel := context.WithTimeout(context.Background(), 100*time.Nanosecond) // "Use" the cancel function, which go vet complains if we throw away. func(context.CancelFunc) {}(cancel) require.False(t, changes.Next(ctx)) err = changes.Err() require.Error(t, err) require.False(t, isServerError(err)) // If the ResumeAfter option is present, the the operation attempted to resume. hasResume := false for _, opt := range changes.(*changeStream).options { if _, ok := opt.(option.OptResumeAfter); ok { hasResume = true break } } require.True(t, hasResume) }
explode_data.jsonl/68240
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 391 }
[ 2830, 3393, 4072, 3027, 4918, 372, 480, 1454, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 741, 197, 532, 1903, 13389, 2679, 38214, 18, 21, 1155, 692, 743, 2643, 64883, 445, 5207...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDuplicateMempool(t *testing.T) { q, mem := initEnv(0) defer q.Close() defer mem.Close() // add 10 txs err := add10Tx(mem.client) if err != nil { t.Error("add tx error", err.Error()) return } require.Equal(t, mem.Size(), 10) msg := mem.client.NewMessage("mempool", types.EventGetMempool, nil) mem.client.Send(msg, true) reply, err := mem.client.Wait(msg) if err != nil { t.Error(err) return } if len(reply.GetData().(*types.ReplyTxList).GetTxs()) != 10 || mem.Size() != 10 { t.Error("TestDuplicateMempool failed") } }
explode_data.jsonl/16825
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 229 }
[ 2830, 3393, 53979, 44, 3262, 1749, 1155, 353, 8840, 836, 8, 341, 18534, 11, 1833, 1669, 2930, 14359, 7, 15, 340, 16867, 2804, 10421, 741, 16867, 1833, 10421, 2822, 197, 322, 912, 220, 16, 15, 9854, 82, 198, 9859, 1669, 912, 16, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestPNCounter_SubtractAndGet(t *testing.T) { it.PNCounterTester(t, func(t *testing.T, pn *hz.PNCounter) { v, err := pn.SubtractAndGet(context.Background(), 1) if err != nil { t.Fatal(err) } assert.Equal(t, int64(-1), v) v, err = pn.SubtractAndGet(context.Background(), 10) if err != nil { t.Fatal(err) } assert.Equal(t, int64(-11), v) }) }
explode_data.jsonl/48304
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 178 }
[ 2830, 3393, 17896, 14099, 36359, 2144, 97726, 1155, 353, 8840, 836, 8, 341, 23374, 1069, 45, 14099, 58699, 1155, 11, 2915, 1155, 353, 8840, 836, 11, 43050, 353, 37259, 1069, 45, 14099, 8, 341, 197, 5195, 11, 1848, 1669, 43050, 12391, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestLookupFieldOrMethod(t *testing.T) { // Test cases assume a lookup of the form a.f or x.f, where a stands for an // addressable value, and x for a non-addressable value (even though a variable // for ease of test case writing). // // Should be kept in sync with TestMethodSet. var tests = []struct { src string found bool index []int indirect bool }{ // field lookups {"var x T; type T struct{}", false, nil, false}, {"var x T; type T struct{ f int }", true, []int{0}, false}, {"var x T; type T struct{ a, b, f, c int }", true, []int{2}, false}, // method lookups {"var a T; type T struct{}; func (T) f() {}", true, []int{0}, false}, {"var a *T; type T struct{}; func (T) f() {}", true, []int{0}, true}, {"var a T; type T struct{}; func (*T) f() {}", true, []int{0}, false}, {"var a *T; type T struct{}; func (*T) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false? // collisions {"type ( E1 struct{ f int }; E2 struct{ f int }; x struct{ E1; *E2 })", false, []int{1, 0}, false}, {"type ( E1 struct{ f int }; E2 struct{}; x struct{ E1; *E2 }); func (E2) f() {}", false, []int{1, 0}, false}, // outside methodset // (*T).f method exists, but value of type T is not addressable {"var x T; type T struct{}; func (*T) f() {}", false, nil, true}, } for _, test := range tests { pkg, err := pkgFor("test", "package p;"+test.src, nil) if err != nil { t.Errorf("%s: incorrect test case: %s", test.src, err) continue } obj := pkg.Scope().Lookup("a") if obj == nil { if obj = pkg.Scope().Lookup("x"); obj == nil { t.Errorf("%s: incorrect test case - no object a or x", test.src) continue } } f, index, indirect := LookupFieldOrMethod(obj.Type(), obj.Name() == "a", pkg, "f") if (f != nil) != test.found { if f == nil { t.Errorf("%s: got no object; want one", test.src) } else { t.Errorf("%s: got object = %v; want none", test.src, f) } } if !sameSlice(index, test.index) { t.Errorf("%s: got index = %v; want %v", test.src, index, test.index) } if indirect != test.indirect { t.Errorf("%s: got indirect = %v; want %v", test.src, indirect, test.indirect) } } }
explode_data.jsonl/55549
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 899 }
[ 2830, 3393, 34247, 1877, 2195, 3523, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 5048, 9658, 264, 18615, 315, 279, 1352, 264, 833, 476, 856, 833, 11, 1380, 264, 13352, 369, 458, 198, 197, 322, 2621, 480, 897, 11, 323, 856, 369, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestGetLDAPStatusAPIEndpoint(t *testing.T) { pingResult = []*multildap.ServerStatus{ {Host: "10.0.0.3", Port: 361, Available: true, Error: nil}, {Host: "10.0.0.3", Port: 362, Available: true, Error: nil}, {Host: "10.0.0.5", Port: 361, Available: false, Error: errors.New("something is awfully wrong")}, } getLDAPConfig = func(*setting.Cfg) (*ldap.Config, error) { return &ldap.Config{}, nil } newLDAP = func(_ []*ldap.ServerConfig) multildap.IMultiLDAP { return &LDAPMock{} } sc := getLDAPStatusContext(t) require.Equal(t, http.StatusOK, sc.resp.Code) expected := ` [ { "host": "10.0.0.3", "port": 361, "available": true, "error": "" }, { "host": "10.0.0.3", "port": 362, "available": true, "error": "" }, { "host": "10.0.0.5", "port": 361, "available": false, "error": "something is awfully wrong" } ] ` assert.JSONEq(t, expected, sc.resp.Body.String()) }
explode_data.jsonl/34371
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 370 }
[ 2830, 3393, 1949, 93497, 2522, 7082, 27380, 1155, 353, 8840, 836, 8, 341, 3223, 287, 2077, 284, 29838, 25875, 695, 391, 22997, 2522, 515, 197, 197, 90, 9296, 25, 330, 16, 15, 13, 15, 13, 15, 13, 18, 497, 5776, 25, 220, 18, 21, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReconcileWithTaskResults(t *testing.T) { names.TestingSeed() ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` metadata: name: test-pipeline namespace: foo spec: tasks: - name: a-task taskRef: name: a-task - name: b-task params: - name: bParam value: $(tasks.a-task.results.aResult) taskRef: name: b-task `)} prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` metadata: name: test-pipeline-run-different-service-accs namespace: foo spec: pipelineRef: name: test-pipeline serviceAccountName: test-sa-0 `)} ts := []*v1beta1.Task{ parse.MustParseTask(t, ` metadata: name: a-task namespace: foo spec: {} `), parse.MustParseTask(t, ` metadata: name: b-task namespace: foo spec: params: - name: bParam type: string `), } trs := []*v1beta1.TaskRun{mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-different-service-accs-a-task-xxyyy", "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "a-task", true), ` spec: resources: {} serviceAccountName: test-sa taskRef: name: hello-world timeout: 1h0m0s status: conditions: - lastTransitionTime: null status: "True" type: Succeeded taskResults: - name: aResult value: aResultValue `)} d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, TaskRuns: trs, } prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", []string{}, false) expectedTaskRunName := "test-pipeline-run-different-service-accs-b-task" expectedTaskRun := mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-different-service-accs-b-task", "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "b-task", false), ` spec: params: - name: bParam value: aResultValue resources: {} serviceAccountName: test-sa-0 taskRef: name: b-task timeout: 1h0m0s `) // Check that the expected TaskRun was created actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: "tekton.dev/pipelineTask=b-task,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", Limit: 1, }) if err != nil { t.Fatalf("Failure to list TaskRun's %s", err) } if len(actual.Items) != 1 { t.Fatalf("Expected 1 TaskRuns got %d", len(actual.Items)) } actualTaskRun := actual.Items[0] if d := cmp.Diff(&actualTaskRun, expectedTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) } }
explode_data.jsonl/27319
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1133 }
[ 2830, 3393, 693, 40446, 457, 2354, 6262, 9801, 1155, 353, 8840, 836, 8, 341, 93940, 8787, 287, 41471, 741, 35009, 1669, 29838, 85, 16, 19127, 16, 1069, 8790, 90, 6400, 50463, 14463, 34656, 1155, 11, 22074, 17637, 510, 220, 829, 25, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestApplicationCRUD(t *testing.T) { testingConfig := "config.testing.yml" httpClient := Client{} httpClient.SetHTTPClient(module.NewHTTPClient(20)) httpClient.SetAPIKey("") // LoadConfigFile t.Run("LoadConfigFile", func(t *testing.T) { fs := module.FileSystem{} dir, _ := os.Getwd() configFile := fmt.Sprintf("%s/%s", dir, testingConfig) for { if fs.FileExists(configFile) { break } dir = filepath.Dir(dir) configFile = fmt.Sprintf("%s/%s", dir, testingConfig) } t.Logf("Load Config File %s", configFile) configUnparsed, _ := ioutil.ReadFile(configFile) configParsed, _ := envsubst.EvalEnv(string(configUnparsed)) viper.SetConfigType("yaml") viper.ReadConfig(bytes.NewBuffer([]byte(configParsed))) }) // TestGetApplications t.Run("TestGetApplications", func(t *testing.T) { srv := pkg.ServerMock( "/api/v1/cluster/production/namespace/default/app", `{"applications":[{"id":"toad","name":"Toad App","format":"clivern/toad:release-[.Release]","containers":[{"name":"toad","image":"clivern/toad:release-0.2.3","version":"0.2.3","deployment":{"name":"toad-deployment","uid":"0f77903a-ce69-4aa5-a025-cad4b4a3209e"}}]}]}`, http.StatusOK, ) defer srv.Close() httpClient.SetAPIURL(srv.URL) result, err := httpClient.GetApplications(context.TODO(), "production", "default") pkg.Expect(t, nil, err) pkg.Expect(t, result, model.Applications{ Applications: []model.Application{ model.Application{ ID: "toad", Name: "Toad App", Format: "clivern/toad:release-[.Release]", Containers: []model.Container{ model.Container{ Name: "toad", Image: "clivern/toad:release-0.2.3", Version: "0.2.3", Deployment: model.Deployment{ Name: "toad-deployment", UID: "0f77903a-ce69-4aa5-a025-cad4b4a3209e", }, }, }, }, }, }) }) // TestGetApplication t.Run("TestGetApplication", func(t *testing.T) { srv := pkg.ServerMock( "/api/v1/cluster/production/namespace/default/app/toad", `{"id":"toad","name":"Toad App","format":"clivern/toad:release-[.Release]","containers":[{"name":"toad","image":"clivern/toad:release-0.2.3","version":"0.2.3","deployment":{"name":"toad-deployment","uid":"0f77903a-ce69-4aa5-a025-cad4b4a3209e"}}]}`, http.StatusOK, ) defer srv.Close() httpClient.SetAPIURL(srv.URL) result, err := httpClient.GetApplication(context.TODO(), "production", "default", "toad") pkg.Expect(t, nil, err) pkg.Expect(t, result, model.Application{ ID: "toad", Name: "Toad App", Format: "clivern/toad:release-[.Release]", Containers: []model.Container{ model.Container{ Name: "toad", Image: "clivern/toad:release-0.2.3", Version: "0.2.3", Deployment: model.Deployment{ Name: "toad-deployment", UID: "0f77903a-ce69-4aa5-a025-cad4b4a3209e", }, }, }, }) }) }
explode_data.jsonl/71965
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1330 }
[ 2830, 3393, 4988, 8973, 4656, 1155, 353, 8840, 836, 8, 341, 197, 8840, 2648, 1669, 330, 1676, 45056, 33936, 1837, 28080, 2959, 1669, 8423, 16094, 28080, 2959, 4202, 9230, 2959, 19123, 7121, 9230, 2959, 7, 17, 15, 1171, 28080, 2959, 4202...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMigrateOldConfigFromFile(t *testing.T) { tests := []struct { desc string oldCfg string expectedKinds []string expectErr bool }{ { desc: "empty file produces empty result", oldCfg: "", expectErr: false, }, { desc: "bad config produces error", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s `, kubeadmapiv1old.SchemeGroupVersion.String())), expectErr: true, }, { desc: "InitConfiguration only gets migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, }, expectErr: false, }, { desc: "ClusterConfiguration only gets migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: ClusterConfiguration `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, }, expectErr: false, }, { desc: "JoinConfiguration only gets migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: JoinConfiguration discovery: bootstrapToken: token: abcdef.0123456789abcdef apiServerEndpoint: kube-apiserver:6443 unsafeSkipCAVerification: true `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.JoinConfigurationKind, }, expectErr: false, }, { desc: "Init + Cluster Configurations are migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration --- apiVersion: %[1]s kind: ClusterConfiguration `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, }, expectErr: false, }, { desc: "Init + Join Configurations are migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration --- apiVersion: %[1]s kind: JoinConfiguration discovery: bootstrapToken: token: abcdef.0123456789abcdef apiServerEndpoint: kube-apiserver:6443 unsafeSkipCAVerification: true `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind, }, expectErr: false, }, { desc: "Cluster + Join Configurations are migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: ClusterConfiguration --- apiVersion: %[1]s kind: JoinConfiguration discovery: bootstrapToken: token: abcdef.0123456789abcdef apiServerEndpoint: kube-apiserver:6443 unsafeSkipCAVerification: true `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind, }, expectErr: false, }, { desc: "Init + Cluster + Join Configurations are migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration --- apiVersion: %[1]s kind: ClusterConfiguration --- apiVersion: %[1]s kind: JoinConfiguration discovery: bootstrapToken: token: abcdef.0123456789abcdef apiServerEndpoint: kube-apiserver:6443 unsafeSkipCAVerification: true `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind, }, expectErr: false, }, { desc: "component configs are not migrated", oldCfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration --- apiVersion: %[1]s kind: ClusterConfiguration --- apiVersion: %[1]s kind: JoinConfiguration discovery: bootstrapToken: token: abcdef.0123456789abcdef apiServerEndpoint: kube-apiserver:6443 unsafeSkipCAVerification: true --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration `, kubeadmapiv1old.SchemeGroupVersion.String())), expectedKinds: []string{ constants.InitConfigurationKind, constants.ClusterConfigurationKind, constants.JoinConfigurationKind, }, expectErr: false, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { b, err := MigrateOldConfig([]byte(test.oldCfg)) if test.expectErr { if err == nil { t.Fatalf("unexpected success:\n%s", b) } } else { if err != nil { t.Fatalf("unexpected failure: %v", err) } gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b) if err != nil { t.Fatalf("unexpected error returned by GroupVersionKindsFromBytes: %v", err) } if len(gvks) != len(test.expectedKinds) { t.Fatalf("length mismatch between resulting gvks and expected kinds:\n\tlen(gvks)=%d\n\tlen(expectedKinds)=%d", len(gvks), len(test.expectedKinds)) } for _, expectedKind := range test.expectedKinds { if !kubeadmutil.GroupVersionKindsHasKind(gvks, expectedKind) { t.Fatalf("migration failed to produce config kind: %s", expectedKind) } } } }) } }
explode_data.jsonl/10228
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2338 }
[ 2830, 3393, 44, 34479, 18284, 2648, 43633, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 41653, 688, 914, 198, 197, 61828, 42467, 286, 914, 198, 197, 42400, 42, 8673, 3056, 917, 198, 197, 24952, 7747, 257, 1807, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestDealCard(t *testing.T) { cases := []struct { inCards []int expected []int }{ {[]int{1, 2, 3}, []int{1, 2, 3}}, } for _, c := range cases { hand := NewHandNoCards() // test for _, card := range c.inCards { hand.DealCard(card) } result := hand.GetCards() ok := len(c.expected) == len(result) if ok { for i := 0; i < len(result); i++ { if ok && result[i] != c.expected[i] { ok = false } } } if !ok { t.Errorf("DealCard() == %v, expected %v", result, c.expected) } } }
explode_data.jsonl/27792
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 256 }
[ 2830, 3393, 72841, 5770, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 17430, 28448, 220, 3056, 396, 198, 197, 42400, 3056, 396, 198, 197, 59403, 197, 197, 90, 1294, 396, 90, 16, 11, 220, 17, 11, 220, 18, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestBackupExclude(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() testRunInit(t, env.gopts) datadir := filepath.Join(env.base, "testdata") for _, filename := range backupExcludeFilenames { fp := filepath.Join(datadir, filename) rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) f, err := os.Create(fp) rtest.OK(t, err) fmt.Fprint(f, filename) rtest.OK(t, f.Close()) } snapshots := make(map[string]struct{}) opts := BackupOptions{} testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) files := testRunLs(t, env.gopts, snapshotID) rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"), "expected file %q in first snapshot, but it's not included", "foo.tar.gz") opts.Excludes = []string{"*.tar.gz"} testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) files = testRunLs(t, env.gopts, snapshotID) rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), "expected file %q not in first snapshot, but it's included", "foo.tar.gz") opts.Excludes = []string{"*.tar.gz", "private/secret"} testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) files = testRunLs(t, env.gopts, snapshotID) rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), "expected file %q not in first snapshot, but it's included", "foo.tar.gz") rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"), "expected file %q not in first snapshot, but it's included", "passwords.txt") }
explode_data.jsonl/43546
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 686 }
[ 2830, 3393, 56245, 95239, 1155, 353, 8840, 836, 8, 341, 57538, 11, 21290, 1669, 448, 2271, 12723, 1155, 340, 16867, 21290, 2822, 18185, 6727, 3803, 1155, 11, 6105, 1302, 10518, 692, 2698, 266, 41173, 1669, 26054, 22363, 16978, 8928, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSearchHashtagPosts(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() th.LoginBasic() Client := th.Client message := "#sgtitlereview with space" assert.NotNil(t, th.CreateMessagePost(message)) message = "#sgtitlereview\n with return" assert.NotNil(t, th.CreateMessagePost(message)) message = "no hashtag" assert.NotNil(t, th.CreateMessagePost(message)) posts, resp := Client.SearchPosts(th.BasicTeam.Id, "#sgtitlereview", false) CheckNoError(t, resp) require.Len(t, posts.Order, 2, "wrong search results") Client.Logout() _, resp = Client.SearchPosts(th.BasicTeam.Id, "#sgtitlereview", false) CheckUnauthorizedStatus(t, resp) }
explode_data.jsonl/5258
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 236 }
[ 2830, 3393, 5890, 10281, 34311, 19631, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 741, 70479, 32499, 15944, 741, 71724, 1669, 270, 11716, 271, 24753, 1669, 5869, 1991, 16903,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProcessHashtable(t *testing.T) { ht := &processHashtable{} p1 := model.NewProcess("s1", []model.KeyValue{ model.String("host", "google.com"), }) p1dup := model.NewProcess("s1", []model.KeyValue{ model.String("host", "google.com"), }) p2 := model.NewProcess("s2", []model.KeyValue{ model.String("host", "facebook.com"), }) assert.Equal(t, "p1", ht.getKey(p1)) assert.Equal(t, "p1", ht.getKey(p1)) assert.Equal(t, "p1", ht.getKey(p1dup)) assert.Equal(t, "p2", ht.getKey(p2)) expectedMapping := map[string]*model.Process{ "p1": p1, "p2": p2, } assert.Equal(t, expectedMapping, ht.getMapping()) }
explode_data.jsonl/11520
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 283 }
[ 2830, 3393, 7423, 93020, 1155, 353, 8840, 836, 8, 341, 197, 426, 1669, 609, 4630, 93020, 31483, 3223, 16, 1669, 1614, 7121, 7423, 445, 82, 16, 497, 3056, 2528, 9610, 1130, 515, 197, 19727, 6431, 445, 3790, 497, 330, 17485, 905, 4461, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateAuthorizeTokensUpdate(t *testing.T) { valid := &oapi.OAuthAuthorizeToken{ ObjectMeta: metav1.ObjectMeta{Name: "authorizeTokenNameWithMinimumLength", ResourceVersion: "1"}, ClientName: "myclient", UserName: "myusername", UserUID: "myuseruid", Scopes: []string{`user:info`}, } errs := ValidateAuthorizeTokenUpdate(valid, valid) if len(errs) != 0 { t.Errorf("expected success: %v", errs) } errorCases := map[string]struct { Token oapi.OAuthAuthorizeToken Change func(*oapi.OAuthAuthorizeToken) T field.ErrorType F string }{ "change name": { Token: *valid, Change: func(obj *oapi.OAuthAuthorizeToken) { obj.Name = "" }, T: field.ErrorTypeInvalid, F: "metadata.name", }, "change userUID": { Token: *valid, Change: func(obj *oapi.OAuthAuthorizeToken) { obj.UserUID = "" }, T: field.ErrorTypeInvalid, F: "[]", }, } for k, v := range errorCases { copied, _ := api.Scheme.Copy(&v.Token) newToken := copied.(*oapi.OAuthAuthorizeToken) v.Change(newToken) errs := ValidateAuthorizeTokenUpdate(newToken, &v.Token) if len(errs) == 0 { t.Errorf("expected failure %s for %v", k, v.Token) continue } for i := range errs { if errs[i].Type != v.T { t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) } if errs[i].Field != v.F { t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) } } } }
explode_data.jsonl/78252
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 647 }
[ 2830, 3393, 17926, 37483, 29300, 4289, 1155, 353, 8840, 836, 8, 341, 56322, 1669, 609, 78, 2068, 8382, 5087, 37483, 3323, 515, 197, 23816, 12175, 25, 77520, 16, 80222, 63121, 25, 330, 52022, 3323, 675, 2354, 28695, 4373, 497, 11765, 563...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTransportEventTraceRealDNS(t *testing.T) { skipIfDNSHijacked(t) defer afterTest(t) tr := &Transport{} defer tr.CloseIdleConnections() c := &Client{Transport: tr} var mu sync.Mutex // guards buf var buf bytes.Buffer logf := func(format string, args ...interface{}) { mu.Lock() defer mu.Unlock() fmt.Fprintf(&buf, format, args...) buf.WriteByte('\n') } req, _ := NewRequest("GET", "http://dns-should-not-resolve.golang:80", nil) trace := &httptrace.ClientTrace{ DNSStart: func(e httptrace.DNSStartInfo) { logf("DNSStart: %+v", e) }, DNSDone: func(e httptrace.DNSDoneInfo) { logf("DNSDone: %+v", e) }, ConnectStart: func(network, addr string) { logf("ConnectStart: %s %s", network, addr) }, ConnectDone: func(network, addr string, err error) { logf("ConnectDone: %s %s %v", network, addr, err) }, } req = req.WithContext(httptrace.WithClientTrace(context.Background(), trace)) resp, err := c.Do(req) if err == nil { resp.Body.Close() t.Fatal("expected error during DNS lookup") } mu.Lock() got := buf.String() mu.Unlock() wantSub := func(sub string) { if !strings.Contains(got, sub) { t.Errorf("expected substring %q in output.", sub) } } wantSub("DNSStart: {Host:dns-should-not-resolve.golang}") wantSub("DNSDone: {Addrs:[] Err:") if strings.Contains(got, "ConnectStart") || strings.Contains(got, "ConnectDone") { t.Errorf("should not see Connect events") } if t.Failed() { t.Errorf("Output:\n%s", got) } }
explode_data.jsonl/14162
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 605 }
[ 2830, 3393, 27560, 1556, 6550, 12768, 61088, 1155, 353, 8840, 836, 8, 341, 1903, 13389, 2679, 61088, 39, 3172, 11191, 1155, 340, 16867, 1283, 2271, 1155, 340, 25583, 1669, 609, 27560, 16094, 16867, 489, 10421, 41370, 54751, 741, 1444, 166...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCrUnmergedMakeFilesInRenamedDir(t *testing.T) { test(t, users("alice", "bob"), as(alice, mkdir("a/b"), ), as(bob, disableUpdates(), ), as(alice, rename("a/b", "b"), ), as(bob, noSync(), write("a/b/c", "hello"), write("a/b/d", "goodbye"), reenableUpdates(), lsdir("a", m{}), lsdir("b", m{"c": "FILE", "d": "FILE"}), read("b/c", "hello"), read("b/d", "goodbye"), ), as(alice, lsdir("a", m{}), lsdir("b", m{"c": "FILE", "d": "FILE"}), read("b/c", "hello"), read("b/d", "goodbye"), ), ) }
explode_data.jsonl/31371
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 305 }
[ 2830, 3393, 16001, 1806, 40354, 8078, 10809, 641, 34625, 3606, 6184, 1155, 353, 8840, 836, 8, 341, 18185, 1155, 345, 197, 90896, 445, 63195, 497, 330, 47086, 4461, 197, 60451, 17643, 558, 345, 298, 88650, 445, 64, 3470, 4461, 197, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_Match_False(t *testing.T) { // Arrange ass := assert.New(t) data := generateRandomStringSlice(size, 50) bloom := NewBloomFilter(data) m := NewExactMatch(data) all := NewMatchAll(bloom, m) s := data[size/2] // Act ok := all.Match(s) // Assert ass.True(ok) }
explode_data.jsonl/13364
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 1245, 754, 1400, 710, 1155, 353, 8840, 836, 8, 341, 197, 322, 40580, 198, 197, 395, 1669, 2060, 7121, 1155, 340, 8924, 1669, 6923, 13999, 703, 33236, 6856, 11, 220, 20, 15, 340, 2233, 18474, 1669, 1532, 33, 18474, 5632, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInitialize(t *testing.T) { discovery := &ConsulDiscoveryService{} discovery.Initialize("127.0.0.1:8500/path", 0) assert.Equal(t, discovery.prefix, "path/") }
explode_data.jsonl/63833
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 67 }
[ 2830, 3393, 9928, 1155, 353, 8840, 836, 8, 341, 34597, 7449, 1669, 609, 15220, 360, 67400, 1860, 16094, 34597, 7449, 45829, 445, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, 23, 20, 15, 15, 50976, 497, 220, 15, 340, 6948, 12808, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestFindStepStatusByServiceRequestIdAndStatusOrderByCreatedAtDesc(t *testing.T) { stepsStatusReq := prepareStepsStatus() findStepStatusByServiceRequestIDAndStatusMock = func(serviceRequestId uuid.UUID, status models.Status) (statuses []*models.StepsStatus, err error) { step1Time := time.Date(2020, time.April, 07, 16, 32, 00, 00, time.UTC) statuses = make([]*models.StepsStatus, 1) statuses[0] = &models.StepsStatus{} statuses[0].WorkflowName = "testWF" statuses[0].ID = "1" statuses[0].Status = models.StatusStarted statuses[0].StepName = "step1" statuses[0].TotalTimeInMs = 10 statuses[0].CreatedAt = step1Time return statuses, err } stepsStatuses, err := FindStepStatusByServiceRequestIDAndStatus(stepsStatusReq.ServiceRequestID, models.StatusStarted) stepsStatus := stepsStatuses[0] assert.Nil(t, err) assert.Equal(t, stepsStatusReq.WorkflowName, stepsStatus.WorkflowName) assert.Equal(t, models.StatusStarted, stepsStatus.Status) assert.Equal(t, int64(10), stepsStatus.TotalTimeInMs) assert.NotNil(t, stepsStatus.ServiceRequestID) assert.Equal(t, "step1", stepsStatus.StepName) assert.Equal(t, stepsStatusReq.TotalTimeInMs, stepsStatus.TotalTimeInMs) findStepStatusByServiceRequestIDAndStatusMock = func(serviceRequestId uuid.UUID, status models.Status) (statuses []*models.StepsStatus, err error) { return statuses, errors.New("select query failed") } _, err = FindStepStatusByServiceRequestIDAndStatus(stepsStatusReq.ServiceRequestID, models.StatusStarted) assert.NotNil(t, err) }
explode_data.jsonl/42477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 538 }
[ 2830, 3393, 9885, 8304, 2522, 1359, 1860, 61774, 3036, 2522, 34605, 70387, 11065, 1155, 353, 8840, 836, 8, 341, 18388, 7124, 2522, 27234, 1669, 10549, 33951, 2522, 741, 80603, 8304, 2522, 1359, 1860, 1900, 915, 3036, 2522, 11571, 284, 291...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1