text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestCSVRegex(t *testing.T) { csvRegex, err := regexp.Compile("(\\-?[0-9\\.]\\,)+(\\-?[0-9\\.])") if err != nil { t.Errorf("%v", err) } csvStings := []string{ "1,2,3,4", "1.3,-3.2,132,32", "33,-33", } for _, s := range csvStings { if !csvRegex.MatchString(s) { t.Errorf("%s is not matched", s) } } nonCSVStings := []string{ "100", "-100", "023,", ",123", "1.23", } for _, s := range nonCSVStings { if csvRegex.MatchString(s) { t.Errorf("%s should not be matched", s) } } }
explode_data.jsonl/24026
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 278 }
[ 2830, 3393, 44209, 32464, 1155, 353, 8840, 836, 8, 341, 1444, 3492, 32464, 11, 1848, 1669, 41877, 89323, 31732, 3422, 12, 60764, 15, 12, 24, 3422, 24719, 3422, 11, 41794, 3422, 12, 60764, 15, 12, 24, 67157, 2467, 1138, 743, 1848, 961,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestReplicateGroupIDAssignedNodes(t *testing.T) { defer base.SetUpTestLogging(base.LevelInfo, base.KeyAll)() tb := base.GetTestBucket(t) defer tb.Close() // Set up SG Configs cfgDefault, err := base.NewCfgSG(tb, "") require.NoError(t, err) cfgGroupA, err := base.NewCfgSG(tb, "GroupA") require.NoError(t, err) cfgGGroupB, err := base.NewCfgSG(tb, "GroupB") require.NoError(t, err) // Set up replicators dbDefault, err := NewDatabaseContext("default", tb, false, DatabaseContextOptions{GroupID: ""}) require.NoError(t, err) managerDefault, err := NewSGReplicateManager(dbDefault, cfgDefault) require.NoError(t, err) err = managerDefault.RegisterNode("nodeDefault") require.NoError(t, err) err = managerDefault.AddReplication(&ReplicationCfg{ ReplicationConfig: ReplicationConfig{ ID: "repl", InitialState: ReplicationStateStopped, }, }) require.NoError(t, err) dbGroupA, err := NewDatabaseContext("groupa", tb, false, DatabaseContextOptions{GroupID: "GroupA"}) require.NoError(t, err) managerGroupA, err := NewSGReplicateManager(dbGroupA, cfgGroupA) require.NoError(t, err) err = managerGroupA.RegisterNode("nodeGroupA") require.NoError(t, err) err = managerGroupA.AddReplication(&ReplicationCfg{ ReplicationConfig: ReplicationConfig{ ID: "repl", InitialState: ReplicationStateStopped, }, }) require.NoError(t, err) dbGroupB, err := NewDatabaseContext("groupb", tb, false, DatabaseContextOptions{GroupID: "GroupB"}) require.NoError(t, err) managerGroupB, err := NewSGReplicateManager(dbGroupB, cfgGGroupB) require.NoError(t, err) err = managerGroupB.RegisterNode("nodeGroupB") require.NoError(t, err) err = managerGroupB.AddReplication(&ReplicationCfg{ ReplicationConfig: ReplicationConfig{ ID: "repl", InitialState: ReplicationStateStopped, }, }) require.NoError(t, err) // Check replications are assigned to correct nodes replications, err := managerDefault.GetReplications() require.NoError(t, err) assert.Len(t, replications, 1) cfg, exists := replications["repl"] require.True(t, exists, "Replicator not found") assert.Equal(t, "nodeDefault", cfg.AssignedNode) replications, err = managerGroupA.GetReplications() require.NoError(t, err) assert.Len(t, replications, 1) cfg, exists = replications["repl"] require.True(t, exists, "Replicator not found") assert.Equal(t, "nodeGroupA", cfg.AssignedNode) replications, err = managerGroupB.GetReplications() require.NoError(t, err) assert.Len(t, replications, 1) cfg, exists = replications["repl"] require.True(t, exists, "Replicator not found") assert.Equal(t, "nodeGroupB", cfg.AssignedNode) }
explode_data.jsonl/73345
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 981 }
[ 2830, 3393, 18327, 48795, 2808, 915, 94025, 12288, 1155, 353, 8840, 836, 8, 341, 16867, 2331, 4202, 2324, 2271, 34575, 12663, 25259, 1731, 11, 2331, 9610, 2403, 8, 741, 62842, 1669, 2331, 2234, 2271, 36018, 1155, 340, 16867, 16363, 10421,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEth_chainId(t *testing.T) { rpcRes := call(t, "eth_chainId", []string{}) var res hexutil.Uint err := res.UnmarshalJSON(rpcRes.Result) require.NoError(t, err) require.NotEqual(t, "0x0", res.String()) }
explode_data.jsonl/841
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 93 }
[ 2830, 3393, 65390, 30583, 764, 1155, 353, 8840, 836, 8, 341, 7000, 3992, 1061, 1669, 1618, 1155, 11, 330, 769, 30583, 764, 497, 3056, 917, 6257, 692, 2405, 592, 12371, 1314, 71869, 198, 9859, 1669, 592, 38097, 5370, 2601, 3992, 1061, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func Test_GetAuthURL(t *testing.T) { t.Parallel() a := assert.New(t) s := &digitalocean.Session{} _, err := s.GetAuthURL() a.Error(err) s.AuthURL = "/foo" url, _ := s.GetAuthURL() a.Equal(url, "/foo") }
explode_data.jsonl/52642
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 99 }
[ 2830, 3393, 13614, 5087, 3144, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 11323, 1669, 2060, 7121, 1155, 340, 1903, 1669, 609, 57269, 78, 11206, 20674, 31483, 197, 6878, 1848, 1669, 274, 2234, 5087, 3144, 741, 11323, 6141, 39...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHead_LogRollback(t *testing.T) { dir, err := ioutil.TempDir("", "wal_rollback") testutil.Ok(t, err) defer func() { testutil.Ok(t, os.RemoveAll(dir)) }() w, err := wal.New(nil, nil, dir) testutil.Ok(t, err) defer w.Close() h, err := NewHead(nil, nil, w, 1000) testutil.Ok(t, err) app := h.Appender() _, err = app.Add(labels.FromStrings("a", "b"), 1, 2) testutil.Ok(t, err) testutil.Ok(t, app.Rollback()) recs := readTestWAL(t, w.Dir()) testutil.Equals(t, 1, len(recs)) series, ok := recs[0].([]RefSeries) testutil.Assert(t, ok, "expected series record but got %+v", recs[0]) testutil.Equals(t, []RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, series) }
explode_data.jsonl/38172
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 311 }
[ 2830, 3393, 12346, 44083, 32355, 1419, 1155, 353, 8840, 836, 8, 341, 48532, 11, 1848, 1669, 43144, 65009, 6184, 19814, 330, 26397, 62, 33559, 1138, 18185, 1314, 54282, 1155, 11, 1848, 340, 16867, 2915, 368, 341, 197, 18185, 1314, 54282, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEmptyDotNet(t *testing.T) { integration.ProgramTest(t, &integration.ProgramTestOptions{ Dir: filepath.Join("empty", "dotnet"), Quick: true, }) }
explode_data.jsonl/76340
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 62 }
[ 2830, 3393, 3522, 34207, 6954, 1155, 353, 8840, 836, 8, 341, 2084, 17376, 80254, 2271, 1155, 11, 609, 60168, 80254, 2271, 3798, 515, 197, 197, 6184, 25, 256, 26054, 22363, 445, 3194, 497, 330, 16119, 4711, 4461, 197, 197, 24318, 25, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestLockOutputSuccessful(t *testing.T) { env := BuildEnv(t) kbld := Kbld{t, env.Namespace, env.KbldBinaryPath, Logger{}} input := ` images: - image: nginx:1.14.2 - image: sample-app - sidecarImage: sample-app - - sample-app --- apiVersion: kbld.k14s.io/v1alpha1 kind: ImageOverrides overrides: - image: sample-app newImage: nginx:1.15.1 --- apiVersion: kbld.k14s.io/v1alpha1 kind: ImageKeys keys: - sidecarImage --- apiVersion: kbld.k14s.io/v1alpha1 kind: Config searchRules: - keyMatcher: path: [images, {allIndexes: true}, {index: 0}] ` path := "/tmp/kbld-test-lock-output-successful" defer os.RemoveAll(path) out, _ := kbld.RunWithOpts([]string{"-f", "-", "--images-annotation=false", "--lock-output=" + path}, RunOpts{ StdinReader: strings.NewReader(input), }) expectedOut := `--- images: - image: index.docker.io/library/nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d - image: index.docker.io/library/nginx@sha256:4a5573037f358b6cdfa2f3e8a9c33a5cf11bcd1675ca72ca76fbe5bd77d0d682 - sidecarImage: index.docker.io/library/nginx@sha256:4a5573037f358b6cdfa2f3e8a9c33a5cf11bcd1675ca72ca76fbe5bd77d0d682 - - index.docker.io/library/nginx@sha256:4a5573037f358b6cdfa2f3e8a9c33a5cf11bcd1675ca72ca76fbe5bd77d0d682 ` if out != expectedOut { t.Fatalf("Expected >>>%s<<< to match >>>%s<<<", out, expectedOut) } expectedFileContents := strings.ReplaceAll(`apiVersion: kbld.k14s.io/v1alpha1 kind: Config minimumRequiredVersion: __ver__ overrides: - image: nginx:1.14.2 newImage: index.docker.io/library/nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d preresolved: true - image: sample-app newImage: index.docker.io/library/nginx@sha256:4a5573037f358b6cdfa2f3e8a9c33a5cf11bcd1675ca72ca76fbe5bd77d0d682 preresolved: true searchRules: - keyMatcher: name: sidecarImage - keyMatcher: path: - images - allIndexes: true - index: 0 `, "__ver__", version.Version) bs, err := ioutil.ReadFile(path) if err != nil { t.Fatalf("Failed while reading " + path) } if string(bs) != expectedFileContents { t.Fatalf("Expected >>>%s<<< to match >>>%s<<<", bs, expectedFileContents) } out, _ = kbld.RunWithOpts([]string{"-f", "-", "--images-annotation=false", "-f", path}, RunOpts{ StdinReader: strings.NewReader(input), }) if out != expectedOut { t.Fatalf("Expected >>>%s<<< to match >>>%s<<<", out, expectedOut) } }
explode_data.jsonl/33765
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1064 }
[ 2830, 3393, 11989, 5097, 36374, 1155, 353, 8840, 836, 8, 341, 57538, 1669, 7854, 14359, 1155, 340, 16463, 65, 507, 1669, 98086, 507, 90, 83, 11, 6105, 46011, 11, 6105, 11352, 65, 507, 21338, 1820, 11, 9514, 6257, 630, 22427, 1669, 220...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestJsonEmpty(t *testing.T) { assert := assert.New(t) settings := testutil.Settings().With(&print.Settings{ ShowHeader: false, ShowProviders: false, ShowInputs: false, ShowOutputs: false, }).Build() expected, err := testutil.GetExpected("json", "json-Empty") assert.Nil(err) options, err := module.NewOptions().WithOverwrite(&module.Options{ HeaderFromFile: "bad.tf", }) options.ShowHeader = false // Since we don't show the header, the file won't be loaded at all assert.Nil(err) module, err := testutil.GetModule(options) assert.Nil(err) printer := NewJSON(settings) actual, err := printer.Print(module, settings) assert.Nil(err) assert.Equal(expected, actual) }
explode_data.jsonl/40860
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 253 }
[ 2830, 3393, 5014, 3522, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 340, 62930, 1669, 1273, 1314, 27000, 1005, 2354, 2099, 1350, 27000, 515, 197, 197, 7812, 4047, 25, 262, 895, 345, 197, 197, 7812, 37351, 25, 895, 345,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSplitDomain(t *testing.T) { cases := map[string][]string{ "http://zh.wikipedia.org": {"zh", "wikipedia", "org"}, "zh.wikipedia.org": {"zh", "wikipedia", "org"}, "https://zh.wikipedia.org/wiki/%E5%9F%9F%E5%90%8D": {"zh", "wikipedia", "org"}, "wikipedia.org": {"wikipedia", "org"}, ".org": {"org"}, "org": nil, "a.b.c.d.wikipedia.org": {"a", "b", "c", "d", "wikipedia", "org"}, } for url, array := range cases { arrVal := SplitDomain(url) if !reflect.DeepEqual(array, arrVal) { t.Errorf("Url (%q) return %v for SplitDomain, bug %v was expected", url, arrVal, array) } } }
explode_data.jsonl/30828
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 460 }
[ 2830, 3393, 20193, 13636, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 2415, 14032, 45725, 917, 515, 197, 197, 76932, 1110, 23815, 33366, 2659, 788, 3502, 5212, 23815, 497, 330, 86, 14939, 497, 330, 1775, 7115, 197, 197, 1, 23815, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestTokenStructure(t *testing.T) { t.Run("when JSON is valid", func(t *testing.T) { server := makeServer(t) dest := &pb.GetDestination{ContextToken: "{\"ns\":\"ns-1\",\"nodeName\":\"node-1\"}\n"} token := server.parseContextToken(dest.ContextToken) if token.Ns != "ns-1" { t.Fatalf("Expected token namespace to be %s got %s", "ns-1", token.Ns) } if token.NodeName != "node-1" { t.Fatalf("Expected token nodeName to be %s got %s", "node-1", token.NodeName) } }) t.Run("when JSON is invalid and old token format used", func(t *testing.T) { server := makeServer(t) dest := &pb.GetDestination{ContextToken: "ns:ns-2"} token := server.parseContextToken(dest.ContextToken) if token.Ns != "ns-2" { t.Fatalf("Expected %s got %s", "ns-2", token.Ns) } }) t.Run("when invalid JSON and invalid old format", func(t *testing.T) { server := makeServer(t) dest := &pb.GetDestination{ContextToken: "123fa-test"} token := server.parseContextToken(dest.ContextToken) if token.Ns != "" || token.NodeName != "" { t.Fatalf("Expected context token to be empty, got %v", token) } }) }
explode_data.jsonl/75643
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 436 }
[ 2830, 3393, 3323, 22952, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 9309, 4718, 374, 2697, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 41057, 1669, 1281, 5475, 1155, 340, 197, 49616, 1669, 609, 16650, 2234, 33605, 90, 1972, 332...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDeleteMigrationsNoMigration(t *testing.T) { cmdArgs := []string{"delete", "migrations", "-c", "migration1"} expected := "No resources found.\n" testCommon(t, cmdArgs, nil, expected, false) }
explode_data.jsonl/18260
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 6435, 44, 17824, 2753, 20168, 1155, 353, 8840, 836, 8, 341, 25920, 4117, 1669, 3056, 917, 4913, 4542, 497, 330, 76, 17824, 497, 6523, 66, 497, 330, 80227, 16, 63159, 42400, 1669, 330, 2753, 4963, 1730, 7110, 77, 698, 18185...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestBadgerEngine(t *testing.T) { var val int64 config, _ := c.LoadConf("") badger := New(config) err := badger.Init() assert.Nil(t, err) badger.Reset() badger.AddTotalCount(10) val = badger.GetTotalCount() assert.Equal(t, int64(10), val) badger.AddTotalCount(10) val = badger.GetTotalCount() assert.Equal(t, int64(20), val) badger.AddIosSuccess(20) val = badger.GetIosSuccess() assert.Equal(t, int64(20), val) badger.AddIosError(30) val = badger.GetIosError() assert.Equal(t, int64(30), val) badger.AddAndroidSuccess(40) val = badger.GetAndroidSuccess() assert.Equal(t, int64(40), val) badger.AddAndroidError(50) val = badger.GetAndroidError() assert.Equal(t, int64(50), val) // test reset db badger.Reset() val = badger.GetAndroidError() assert.Equal(t, int64(0), val) assert.NoError(t, badger.Close()) }
explode_data.jsonl/81186
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 357 }
[ 2830, 3393, 17082, 1389, 4571, 1155, 353, 8840, 836, 8, 341, 2405, 1044, 526, 21, 19, 271, 25873, 11, 716, 1669, 272, 13969, 15578, 445, 5130, 2233, 329, 1389, 1669, 1532, 8754, 340, 9859, 1669, 3873, 1389, 26849, 741, 6948, 59678, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAddItemWithEnclosureGUIDSet(t *testing.T) { t.Parallel() // arrange theLink := "http://someotherurl.com/story.html" theGUID := "someGUID" length := 3 p := podcast.New("title", "link", "description", nil, nil) i := podcast.Item{ Title: "title", Description: "desc", GUID: theGUID, } i.AddEnclosure(theLink, podcast.MP3, int64(length)) // act added, err := p.AddItem(i) // assert assert.EqualValues(t, 1, added) assert.NoError(t, err) assert.Len(t, p.Items, 1) assert.EqualValues(t, theGUID, p.Items[0].GUID) assert.EqualValues(t, length, p.Items[0].Enclosure.Length) }
explode_data.jsonl/73081
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 262 }
[ 2830, 3393, 2212, 1234, 2354, 7408, 11653, 41778, 1649, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 322, 30893, 198, 32088, 3939, 1669, 330, 1254, 1110, 14689, 1575, 1085, 905, 78389, 2564, 698, 32088, 41778, 1669, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCascadingDeletion(t *testing.T) { ctx := setup(t, 5) defer ctx.tearDown() gc, clientSet := ctx.gc, ctx.clientSet ns := createNamespaceOrDie("gc-cascading-deletion", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } rcs, err := rcClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list replication controllers: %v", err) } if len(rcs.Items) != 2 { t.Fatalf("Expect only 2 replication controller") } // this pod should be cascadingly deleted. pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) _, err = podClient.Create(pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // this pod shouldn't be cascadingly deleted, because it has a valid reference. pod = newPod(oneValidOwnerPodName, ns.Name, []metav1.OwnerReference{ {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRCName}, }) _, err = podClient.Create(pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // this pod shouldn't be cascadingly deleted, because it doesn't have an owner. pod = newPod(independentPodName, ns.Name, []metav1.OwnerReference{}) _, err = podClient.Create(pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // set up watch pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } if len(pods.Items) != 3 { t.Fatalf("Expect only 3 pods") } // delete one of the replication controller if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil { t.Fatalf("failed to delete replication controller: %v", err) } // sometimes the deletion of the RC takes long time to be observed by // the gc, so wait for the garbage collector to observe the deletion of // the toBeDeletedRC if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { return !gc.GraphHasUID(toBeDeletedRC.ObjectMeta.UID), nil }); err != nil { t.Fatal(err) } if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 1*time.Second, 30*time.Second); err != nil { t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err) } // checks the garbage collect doesn't delete pods it shouldn't delete. if _, err := podClient.Get(independentPodName, metav1.GetOptions{}); err != nil { t.Fatal(err) } if _, err := podClient.Get(oneValidOwnerPodName, metav1.GetOptions{}); err != nil { t.Fatal(err) } }
explode_data.jsonl/18175
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1096 }
[ 2830, 3393, 34, 5061, 2228, 1912, 52625, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 6505, 1155, 11, 220, 20, 340, 16867, 5635, 31853, 59342, 2822, 3174, 66, 11, 2943, 1649, 1669, 5635, 65554, 11, 5635, 6581, 1649, 271, 84041, 1669, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRequestSetsNamespace(t *testing.T) { r := (&Request{ baseURL: &url.URL{ Path: "/", }, }).Namespace("foo") if r.namespace == "" { t.Errorf("namespace should be set: %#v", r) } if s := r.URL().String(); s != "namespaces/foo" { t.Errorf("namespace should be in path: %s", s) } }
explode_data.jsonl/13251
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 132 }
[ 2830, 3393, 1900, 30175, 22699, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 15899, 1900, 515, 197, 24195, 3144, 25, 609, 1085, 20893, 515, 298, 69640, 25, 3521, 756, 197, 197, 1583, 197, 16630, 22699, 445, 7975, 1138, 743, 435, 50409, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMuxer(t *testing.T) { videoTrack, err := gortsplib.NewTrackH264(96, &gortsplib.TrackConfigH264{SPS: []byte{0x07, 0x01, 0x02, 0x03}, PPS: []byte{0x08}}) require.NoError(t, err) audioTrack, err := gortsplib.NewTrackAAC(97, &gortsplib.TrackConfigAAC{Type: 2, SampleRate: 44100, ChannelCount: 2}) require.NoError(t, err) m, err := NewMuxer(3, 1*time.Second, videoTrack, audioTrack) require.NoError(t, err) defer m.Close() // group without IDR err = m.WriteH264(1*time.Second, [][]byte{ {0x06}, {0x07}, }) require.NoError(t, err) // group with IDR err = m.WriteH264(2*time.Second, [][]byte{ {5}, // IDR {9}, // AUD {8}, // PPS {7}, // SPS }) require.NoError(t, err) err = m.WriteAAC(3*time.Second, [][]byte{ {0x01, 0x02, 0x03, 0x04}, {0x05, 0x06, 0x07, 0x08}, }) require.NoError(t, err) // group without IDR err = m.WriteH264(4*time.Second, [][]byte{ {6}, {7}, }) require.NoError(t, err) time.Sleep(2 * time.Second) // group with IDR err = m.WriteH264(6*time.Second, [][]byte{ {5}, // IDR }) require.NoError(t, err) byts, err := ioutil.ReadAll(m.PrimaryPlaylist()) require.NoError(t, err) require.Equal(t, "#EXTM3U\n"+ "#EXT-X-STREAM-INF:BANDWIDTH=200000,CODECS=\"avc1.010203,mp4a.40.2\"\n"+ "stream.m3u8\n", string(byts)) byts, err = ioutil.ReadAll(m.StreamPlaylist()) require.NoError(t, err) re := regexp.MustCompile(`^#EXTM3U\n` + `#EXT-X-VERSION:3\n` + `#EXT-X-ALLOW-CACHE:NO\n` + `#EXT-X-TARGETDURATION:4\n` + `#EXT-X-MEDIA-SEQUENCE:0\n` + `#EXTINF:4,\n` + `([0-9]+\.ts)\n$`) ma := re.FindStringSubmatch(string(byts)) require.NotEqual(t, 0, len(ma)) byts, err = ioutil.ReadAll(m.Segment(ma[1])) require.NoError(t, err) checkTSPacket(t, byts, 0, 1) byts = byts[188:] checkTSPacket(t, byts, 4096, 1) byts = byts[188:] checkTSPacket(t, byts, 256, 3) byts = byts[4+145+15:] require.Equal(t, []byte{ 0, 0, 0, 1, 9, 240, // AUD 0, 0, 0, 1, 7, 1, 2, 3, // SPS 0, 0, 0, 1, 8, // PPS 0, 0, 0, 1, 5, // IDR }, byts[:24], ) }
explode_data.jsonl/24142
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1053 }
[ 2830, 3393, 44, 2200, 261, 1155, 353, 8840, 836, 8, 341, 96947, 15667, 11, 1848, 1669, 342, 18955, 81682, 7121, 15667, 39, 17, 21, 19, 7, 24, 21, 345, 197, 197, 5, 70, 18955, 81682, 89090, 2648, 39, 17, 21, 19, 90, 4592, 50, 25,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreatePostDeduplicate(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() t.Run("duplicate create post is idempotent", func(t *testing.T) { pendingPostId := model.NewId() post, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "message", PendingPostId: pendingPostId, }, "", true) require.Nil(t, err) require.Equal(t, "message", post.Message) duplicatePost, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "message", PendingPostId: pendingPostId, }, "", true) require.Nil(t, err) require.Equal(t, post.Id, duplicatePost.Id, "should have returned previously created post id") require.Equal(t, "message", duplicatePost.Message) }) t.Run("post rejected by plugin leaves cache ready for non-deduplicated try", func(t *testing.T) { setupPluginApiTest(t, ` package main import ( "github.com/mattermost/mattermost-server/v5/plugin" "github.com/mattermost/mattermost-server/v5/model" ) type MyPlugin struct { plugin.MattermostPlugin allow bool } func (p *MyPlugin) MessageWillBePosted(c *plugin.Context, post *model.Post) (*model.Post, string) { if !p.allow { p.allow = true return nil, "rejected" } return nil, "" } func main() { plugin.ClientMain(&MyPlugin{}) } `, `{"id": "testrejectfirstpost", "backend": {"executable": "backend.exe"}}`, "testrejectfirstpost", th.App, th.Context) pendingPostId := model.NewId() post, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "message", PendingPostId: pendingPostId, }, "", true) require.NotNil(t, err) require.Equal(t, "Post rejected by plugin. rejected", err.Id) require.Nil(t, post) duplicatePost, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "message", PendingPostId: pendingPostId, }, "", true) require.Nil(t, err) require.Equal(t, "message", duplicatePost.Message) }) t.Run("slow posting after cache entry blocks duplicate request", func(t *testing.T) { setupPluginApiTest(t, ` package main import ( "github.com/mattermost/mattermost-server/v5/plugin" "github.com/mattermost/mattermost-server/v5/model" "time" ) type MyPlugin struct { plugin.MattermostPlugin instant bool } func (p *MyPlugin) MessageWillBePosted(c *plugin.Context, post *model.Post) (*model.Post, string) { if !p.instant { p.instant = true time.Sleep(3 * time.Second) } return nil, "" } func main() { plugin.ClientMain(&MyPlugin{}) } `, `{"id": "testdelayfirstpost", "backend": {"executable": "backend.exe"}}`, "testdelayfirstpost", th.App, th.Context) var post *model.Post pendingPostId := model.NewId() wg := sync.WaitGroup{} // Launch a goroutine to make the first CreatePost call that will get delayed // by the plugin above. wg.Add(1) go func() { defer wg.Done() var appErr *model.AppError post, appErr = th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "plugin delayed", PendingPostId: pendingPostId, }, "", true) require.Nil(t, appErr) require.Equal(t, post.Message, "plugin delayed") }() // Give the goroutine above a chance to start and get delayed by the plugin. time.Sleep(2 * time.Second) // Try creating a duplicate post duplicatePost, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "plugin delayed", PendingPostId: pendingPostId, }, "", true) require.NotNil(t, err) require.Equal(t, "api.post.deduplicate_create_post.pending", err.Id) require.Nil(t, duplicatePost) // Wait for the first CreatePost to finish to ensure assertions are made. wg.Wait() }) t.Run("duplicate create post after cache expires is not idempotent", func(t *testing.T) { pendingPostId := model.NewId() post, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "message", PendingPostId: pendingPostId, }, "", true) require.Nil(t, err) require.Equal(t, "message", post.Message) time.Sleep(PendingPostIDsCacheTTL) duplicatePost, err := th.App.CreatePostAsUser(th.Context, &model.Post{ UserId: th.BasicUser.Id, ChannelId: th.BasicChannel.Id, Message: "message", PendingPostId: pendingPostId, }, "", true) require.Nil(t, err) require.NotEqual(t, post.Id, duplicatePost.Id, "should have created new post id") require.Equal(t, "message", duplicatePost.Message) }) }
explode_data.jsonl/26423
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2110 }
[ 2830, 3393, 4021, 4133, 35, 291, 14070, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 2822, 3244, 16708, 445, 63826, 1855, 1736, 374, 877, 3262, 63532, 497, 2915, 1155, 353, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMediaTypes(t *testing.T) { mediaTypeTest(t, MediaTypeManifestList, "", true) mediaTypeTest(t, MediaTypeManifestList, MediaTypeManifestList, false) mediaTypeTest(t, MediaTypeManifestList, MediaTypeManifestList+"XXX", true) mediaTypeTest(t, v1.MediaTypeImageIndex, "", false) mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex, false) mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex+"XXX", true) }
explode_data.jsonl/11756
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 154 }
[ 2830, 3393, 12661, 4173, 1155, 353, 8840, 836, 8, 341, 197, 7399, 929, 2271, 1155, 11, 50423, 38495, 852, 11, 7342, 830, 340, 197, 7399, 929, 2271, 1155, 11, 50423, 38495, 852, 11, 50423, 38495, 852, 11, 895, 340, 197, 7399, 929, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConnectToMongoErrorNoUsername(t *testing.T) { originalMongoRootUsername := os.Getenv("MONGO_INITDB_ROOT_USERNAME") os.Setenv("MONGO_INITDB_ROOT_USERNAME", "") _, _, _, err := ConnectToMongo(mongoTestsHostURL, "", "") expectedError := errors.New("is not possible store notifications results in mongodb no value provided for MongoDB username") if err.Error() != expectedError.Error() { log.Fatal("Error expected is not equal to error obtained") } defer os.Setenv("MONGO_INITDB_ROOT_USERNAME", originalMongoRootUsername) }
explode_data.jsonl/71135
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 186 }
[ 2830, 3393, 14611, 1249, 54998, 1454, 2753, 11115, 1155, 353, 8840, 836, 8, 341, 197, 9889, 54998, 8439, 11115, 1669, 2643, 64883, 445, 44, 63984, 14446, 3506, 16197, 42696, 1138, 25078, 4202, 3160, 445, 44, 63984, 14446, 3506, 16197, 426...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecsWithDefaultAndCustomReporters(t, "Webhook Suite", []Reporter{printer.NewlineReporter{}}) }
explode_data.jsonl/9344
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 61 }
[ 2830, 3393, 7082, 82, 1155, 353, 8840, 836, 8, 341, 79096, 19524, 3050, 7832, 604, 692, 85952, 8327, 16056, 3675, 3036, 10268, 10361, 388, 1155, 345, 197, 197, 1, 5981, 20873, 20977, 756, 197, 197, 1294, 52766, 90, 62956, 7121, 1056, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestEditAddedFilmFilmNotFound(t *testing.T) { testTelegramClientInst, answerChan, _ := NewTestMovieBot("./test_data/test_data.sql") updates := make(chan tgbotapi.Update) go testTelegramClientInst.EditAddedFilm(updates) updates <- tgbotapi.Update{Message: &tgbotapi.Message{Text: "", Chat: &tgbotapi.Chat{ID: 1}}} answer := <-answerChan expectedAnswer := "Напишите название редактируемого фильма." if answer != expectedAnswer { t.Errorf(fmt.Sprintf("Not expected bot answer: %s, expected: %s", answer, expectedAnswer)) return } searchedFims := "NotExistingFilm" updates <- tgbotapi.Update{Message: &tgbotapi.Message{Text: searchedFims, Chat: &tgbotapi.Chat{ID: 1}}} answer = <-answerChan expectedAnswer = fmt.Sprintf("Фильм %s, не найден в шляпе", searchedFims) if answer != expectedAnswer { t.Errorf(fmt.Sprintf("Not expected bot answer: %s, expected: %s", answer, expectedAnswer)) return } t.Logf("TestEditAddedFilmFilmNotFound complete") }
explode_data.jsonl/19228
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 391 }
[ 2830, 3393, 4036, 19337, 51487, 51487, 10372, 1155, 353, 8840, 836, 8, 341, 18185, 72244, 2959, 8724, 11, 4226, 46019, 11, 716, 1669, 1532, 2271, 19668, 23502, 13988, 1944, 1769, 12697, 1769, 10045, 1138, 197, 49661, 1669, 1281, 35190, 53...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestParsesEnvInner(t *testing.T) { os.Setenv("innervar", "someinnervalue") defer os.Clearenv() cfg := ParentStruct{ InnerStruct: &InnerStruct{}, unexported: &InnerStruct{}, } assert.NoError(t, env.Parse(&cfg)) assert.Equal(t, "someinnervalue", cfg.InnerStruct.Inner) }
explode_data.jsonl/7478
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 47, 1561, 288, 14359, 31597, 1155, 353, 8840, 836, 8, 341, 25078, 4202, 3160, 445, 6130, 648, 277, 497, 330, 14689, 6130, 648, 540, 1138, 16867, 2643, 727, 273, 9151, 85, 741, 50286, 1669, 17022, 9422, 515, 197, 197, 31597...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPriorityQueuePull(t *testing.T) { item := struct{}{} pq := New(true) pq.Push(item, 1) pulled, err := pq.Pull() if err != nil { t.Error(err) } quickAssert(pulled, item, t) }
explode_data.jsonl/16773
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 88 }
[ 2830, 3393, 20555, 7554, 36068, 1155, 353, 8840, 836, 8, 341, 22339, 1669, 2036, 6257, 16094, 3223, 80, 1669, 1532, 3715, 340, 3223, 80, 34981, 5393, 11, 220, 16, 340, 3223, 91022, 11, 1848, 1669, 39639, 97357, 741, 743, 1848, 961, 20...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestNativeCallWithRuntimeParameter(t *testing.T) { vm := New() vm.Set("f", func(_ FunctionCall, r *Runtime) Value { if r == vm { return valueTrue } return valueFalse }) ret, err := vm.RunString(`f()`) if err != nil { t.Fatal(err) } if ret != valueTrue { t.Fatal(ret) } }
explode_data.jsonl/10522
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 127 }
[ 2830, 3393, 20800, 7220, 2354, 15123, 4971, 1155, 353, 8840, 836, 8, 341, 54879, 1669, 1532, 741, 54879, 4202, 445, 69, 497, 2915, 2490, 5712, 7220, 11, 435, 353, 15123, 8, 5162, 341, 197, 743, 435, 621, 10995, 341, 298, 853, 897, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAddGCLive(t *testing.T) { r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ PeerID: testPeerID, // required by offline node }, }, D: syncds.MutexWrap(datastore.NewMapDatastore()), } node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } out := make(chan interface{}) adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG) if err != nil { t.Fatal(err) } adder.Out = out dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA")) rfa := files.NewReaderFile("a", "a", dataa, nil) // make two files with pipes so we can 'pause' the add for timing of the test piper, pipew := io.Pipe() hangfile := files.NewReaderFile("b", "b", piper, nil) datad := ioutil.NopCloser(bytes.NewBufferString("testfileD")) rfd := files.NewReaderFile("d", "d", datad, nil) slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd}) addDone := make(chan struct{}) go func() { defer close(addDone) defer close(out) err := adder.AddFile(slf) if err != nil { t.Fatal(err) } }() addedHashes := make(map[string]struct{}) select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case <-addDone: t.Fatal("add shouldnt complete yet") } var gcout <-chan gc.Result gcstarted := make(chan struct{}) go func() { defer close(gcstarted) gcout = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) }() // gc shouldnt start until we let the add finish its current file. pipew.Write([]byte("some data for file b")) select { case <-gcstarted: t.Fatal("gc shouldnt have started yet") default: } time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock // finish write and unblock gc pipew.Close() // receive next object from adder o := <-out addedHashes[o.(*AddedObject).Hash] = struct{}{} <-gcstarted for r := range gcout { if r.Error != nil { t.Fatal(err) } if _, ok := addedHashes[r.KeyRemoved.String()]; ok { t.Fatal("gc'ed a hash we just added") } } var last *cid.Cid for a := range out { // wait for it to finish c, err := cid.Decode(a.(*AddedObject).Hash) if err != nil { t.Fatal(err) } last = c } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() set := cid.NewSet() err = dag.EnumerateChildren(ctx, dag.GetLinksWithDAG(node.DAG), last, set.Visit) if err != nil { t.Fatal(err) } }
explode_data.jsonl/39683
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1019 }
[ 2830, 3393, 2212, 38, 3140, 533, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 609, 23476, 24664, 515, 197, 6258, 25, 2193, 10753, 515, 298, 197, 18558, 25, 2193, 24423, 515, 571, 10025, 34756, 915, 25, 1273, 30888, 915, 11, 442, 2567, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNewRequestContentTypeIsSet(t *testing.T) { client := newClient() req, err := client.NewRequestWithJSONBody( context.Background(), "GET", "/", nil, []string{}, ) if err != nil { t.Fatal(err) } if req.Header.Get("Content-Type") != "application/json" { t.Fatal("expected different Content-Type here") } }
explode_data.jsonl/60964
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 118 }
[ 2830, 3393, 3564, 1900, 29504, 3872, 1649, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 501, 2959, 741, 24395, 11, 1848, 1669, 2943, 75274, 2354, 5370, 5444, 1006, 197, 28413, 19047, 1507, 330, 3806, 497, 64657, 2092, 11, 3056, 917, 38837...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCreateDaisyInflater_SetsWorkflowNameToGcloudPrefix(t *testing.T) { inflater := createDaisyInflaterForImageSafe(t, ImageImportRequest{ Source: imageSource{uri: "projects/test/uri/image"}, DaisyLogLinePrefix: "disk-1", }) daisyutils.CheckEnvironment(inflater.worker, func(env daisyutils.EnvironmentSettings) { assert.Equal(t, "disk-1-inflate", env.DaisyLogLinePrefix) }) }
explode_data.jsonl/75624
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 157 }
[ 2830, 3393, 4021, 35, 49056, 12342, 1098, 1415, 62768, 675, 1249, 38, 12361, 14335, 1155, 353, 8840, 836, 8, 341, 17430, 11729, 1669, 1855, 35, 49056, 12342, 2461, 1906, 25663, 1155, 11, 4654, 11511, 1900, 515, 197, 197, 3608, 25, 1797,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCheckLeaseWithSig(t *testing.T) { to, path := createCheckerTestObjects(t) defer func() { to.stor.close(t) err := common.CleanTemporaryDirs(path) assert.NoError(t, err, "failed to clean test data dirs") }() tx := createLeaseWithSig(t) info := defaultCheckerInfo(t) tx.Recipient = proto.NewRecipientFromAddress(testGlobal.senderInfo.addr) _, err := to.tc.checkLeaseWithSig(tx, info) assert.Error(t, err, "checkLeaseWithSig did not fail when leasing to self") tx = createLeaseWithSig(t) _, err = to.tc.checkLeaseWithSig(tx, info) assert.NoError(t, err, "checkLeaseWithSig failed with valid lease tx") }
explode_data.jsonl/63088
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 244 }
[ 2830, 3393, 3973, 2304, 519, 2354, 47246, 1155, 353, 8840, 836, 8, 341, 31709, 11, 1815, 1669, 1855, 35188, 2271, 11543, 1155, 692, 16867, 2915, 368, 341, 197, 31709, 1236, 269, 4653, 1155, 692, 197, 9859, 1669, 4185, 727, 2675, 59362, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSinglePolicySinglePod(t *testing.T) { gomega.RegisterTestingT(t) logger := logrus.DefaultLogger() logger.SetLevel(logging.DebugLevel) logger.Debug("TestSinglePolicySinglePod") // Prepare input data. const ( namespace = "default" pod1Name = "pod1" pod2Name = "pod2" pod1IP = "192.168.1.1" pod2IP = "192.168.1.2" ) pod1 := podmodel.ID{Name: pod1Name, Namespace: namespace} pod2 := podmodel.ID{Name: pod2Name, Namespace: namespace} policy1 := &ContivPolicy{ ID: policymodel.ID{Name: "policy1", Namespace: namespace}, Type: PolicyIngress, Matches: []Match{ { Type: MatchIngress, Pods: []podmodel.ID{ pod2, }, Ports: []Port{ {Protocol: TCP, Number: 80}, {Protocol: TCP, Number: 443}, }, }, }, } pod1Policies := []*ContivPolicy{policy1} // Initialize mocks. cache := NewMockPolicyCache() cache.AddPodConfig(pod1, pod1IP) cache.AddPodConfig(pod2, pod2IP) ipam := &ipamMock{} ipam.SetNatLoopbackIP(natLoopbackIP) renderer := NewMockRenderer("A", logger) // Initialize configurator. configurator := &PolicyConfigurator{ Deps: Deps{ Log: logger, Cache: cache, IPAM: ipam, }, } configurator.Init(false) // Register one renderer. err := configurator.RegisterRenderer(renderer) gomega.Expect(err).To(gomega.BeNil()) // Run single transaction. txn := configurator.NewTxn(false) txn.Configure(pod1, pod1Policies) err = txn.Commit() gomega.Expect(err).To(gomega.BeNil()) // Test IP address provided by the configurator. ip, masklen := renderer.GetPodIP(pod1) gomega.Expect(masklen).To(gomega.BeEquivalentTo(net.IPv4len * 8)) gomega.Expect(ip).To(gomega.BeEquivalentTo(pod1IP)) // Test with fake traffic. // Allowed by policy1. action := renderer.TestTraffic(pod1, EgressTraffic, parseIP(pod2IP), parseIP(pod1IP), rendererAPI.TCP, 123, 80) gomega.Expect(action).To(gomega.BeEquivalentTo(AllowedTraffic)) // Allowed by policy1. action = renderer.TestTraffic(pod1, EgressTraffic, parseIP(pod2IP), parseIP(pod1IP), rendererAPI.TCP, 456, 443) gomega.Expect(action).To(gomega.BeEquivalentTo(AllowedTraffic)) // Always allowed from NAT-loopback. action = renderer.TestTraffic(pod1, EgressTraffic, parseIP(natLoopbackIP), parseIP(pod1IP), rendererAPI.TCP, 456, 100) gomega.Expect(action).To(gomega.BeEquivalentTo(AllowedTraffic)) action = renderer.TestTraffic(pod1, EgressTraffic, parseIP(natLoopbackIP), parseIP(pod1IP), rendererAPI.OTHER, 0, 0) gomega.Expect(action).To(gomega.BeEquivalentTo(AllowedTraffic)) // Not covered by any policy. action = renderer.TestTraffic(pod1, IngressTraffic, parseIP(pod1IP), parseIP(pod2IP), rendererAPI.TCP, 123, 456) gomega.Expect(action).To(gomega.BeEquivalentTo(UnmatchedTraffic)) // Not covered by any policy. action = renderer.TestTraffic(pod1, IngressTraffic, parseIP(pod1IP), parseIP(pod2IP), rendererAPI.OTHER, 0, 0) gomega.Expect(action).To(gomega.BeEquivalentTo(UnmatchedTraffic)) // Blocked by policy1 - TCP:100 not allowed. action = renderer.TestTraffic(pod1, EgressTraffic, parseIP(pod2IP), parseIP(pod1IP), rendererAPI.TCP, 789, 100) gomega.Expect(action).To(gomega.BeEquivalentTo(DeniedTraffic)) // Blocked by policy1 - UDP not allowed. action = renderer.TestTraffic(pod1, EgressTraffic, parseIP(pod2IP), parseIP(pod1IP), rendererAPI.UDP, 123, 80) gomega.Expect(action).To(gomega.BeEquivalentTo(DeniedTraffic)) // Blocked by policy1 - other protocols not allowed. action = renderer.TestTraffic(pod1, EgressTraffic, parseIP(pod2IP), parseIP(pod1IP), rendererAPI.OTHER, 0, 0) gomega.Expect(action).To(gomega.BeEquivalentTo(DeniedTraffic)) }
explode_data.jsonl/19615
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1505 }
[ 2830, 3393, 10888, 13825, 10888, 23527, 1155, 353, 8840, 836, 8, 341, 3174, 32696, 19983, 16451, 51, 1155, 340, 17060, 1669, 1487, 20341, 13275, 7395, 741, 17060, 4202, 4449, 51687, 20345, 4449, 340, 17060, 20345, 445, 2271, 10888, 13825, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestServerRootWithToken(t *testing.T) { token := "verysecret" badToken := "Verysecret" ucfg, err := common.NewConfigFrom(map[string]interface{}{"secret_token": token}) assert.NoError(t, err) apm, teardown, err := setupServer(t, ucfg, nil) require.NoError(t, err) defer teardown() baseUrl, client := apm.client(false) rootRequest := func(token *string) *http.Response { req, err := http.NewRequest(http.MethodGet, baseUrl+"/", nil) require.NoError(t, err, "Failed to create test request object: %v", err) if token != nil { req.Header.Add("Authorization", "Bearer "+*token) } res, err := client.Do(req) require.NoError(t, err) return res } noToken := body(t, rootRequest(nil)) withToken := body(t, rootRequest(&token)) assert.NotEqual(t, token, badToken) withBadToken := body(t, rootRequest(&badToken)) assert.Equal(t, 0, len(noToken), noToken) assert.True(t, len(withToken) > 0, withToken) assert.NotEqual(t, noToken, withToken) assert.Equal(t, noToken, withBadToken) }
explode_data.jsonl/4938
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 386 }
[ 2830, 3393, 5475, 8439, 2354, 3323, 1155, 353, 8840, 836, 8, 341, 43947, 1669, 330, 1204, 20474, 698, 2233, 329, 3323, 1669, 330, 25756, 20474, 698, 197, 1754, 4817, 11, 1848, 1669, 4185, 7121, 2648, 3830, 9147, 14032, 31344, 6257, 4913...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCRUDOfApplicationConfigWithMatchSpecificationResourceWithMockServer(t *testing.T) { httpServer := createMockHttpServerForResource(restapi.ApplicationConfigsResourcePath, serverResponseWithMatchSpecificationTemplate) httpServer.Start() defer httpServer.Close() resource.UnitTest(t, resource.TestCase{ Providers: testProviders, Steps: []resource.TestStep{ createApplicationConfigWithMatchSpecificationResourceTestStep(httpServer.GetPort(), 0), testStepImport(testApplicationConfigDefinition), createApplicationConfigWithMatchSpecificationResourceTestStep(httpServer.GetPort(), 1), testStepImport(testApplicationConfigDefinition), }, }) }
explode_data.jsonl/64914
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 187 }
[ 2830, 3393, 8973, 4656, 2124, 4988, 2648, 2354, 8331, 56139, 4783, 2354, 11571, 5475, 1155, 353, 8840, 836, 8, 341, 28080, 5475, 1669, 1855, 11571, 2905, 5475, 90100, 62036, 2068, 17521, 84905, 4783, 1820, 11, 3538, 2582, 2354, 8331, 5613...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNasTypePDUSESSIONRELEASEREQUESTMessageIdentityGetSetMessageType(t *testing.T) { a := nasType.NewPDUSESSIONRELEASEREQUESTMessageIdentity() for _, table := range nasTypePDUSESSIONRELEASEREQUESTMessageIdentityTable { a.SetMessageType(table.in) assert.Equal(t, table.out, a.GetMessageType()) } }
explode_data.jsonl/11126
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 109 }
[ 2830, 3393, 45, 300, 929, 47, 21547, 6302, 787, 867, 1911, 42172, 6671, 2052, 18558, 1949, 1649, 82107, 1155, 353, 8840, 836, 8, 341, 11323, 1669, 17141, 929, 7121, 47, 21547, 6302, 787, 867, 1911, 42172, 6671, 2052, 18558, 741, 2023, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_carbonreceiver_New(t *testing.T) { defaultConfig := createDefaultConfig().(*Config) type args struct { config Config nextConsumer consumer.Metrics } tests := []struct { name string args args wantErr error }{ { name: "default_config", args: args{ config: *defaultConfig, nextConsumer: consumertest.NewNop(), }, }, { name: "zero_value_parser", args: args{ config: Config{ ReceiverSettings: defaultConfig.ReceiverSettings, NetAddr: confignet.NetAddr{ Endpoint: defaultConfig.Endpoint, Transport: defaultConfig.Transport, }, TCPIdleTimeout: defaultConfig.TCPIdleTimeout, }, nextConsumer: consumertest.NewNop(), }, }, { name: "nil_nextConsumer", args: args{ config: *defaultConfig, }, wantErr: errNilNextConsumer, }, { name: "empty_endpoint", args: args{ config: Config{ ReceiverSettings: config.ReceiverSettings{}, }, nextConsumer: consumertest.NewNop(), }, wantErr: errEmptyEndpoint, }, { name: "invalid_transport", args: args{ config: Config{ ReceiverSettings: config.ReceiverSettings{ NameVal: "invalid_transport_rcv", }, NetAddr: confignet.NetAddr{ Endpoint: "localhost:2003", Transport: "unknown_transp", }, Parser: &protocol.Config{ Type: "plaintext", Config: &protocol.PlaintextConfig{}, }, }, nextConsumer: consumertest.NewNop(), }, wantErr: errors.New("unsupported transport \"unknown_transp\" for receiver \"invalid_transport_rcv\""), }, { name: "regex_parser", args: args{ config: Config{ ReceiverSettings: config.ReceiverSettings{ NameVal: "regex_parser_rcv", }, NetAddr: confignet.NetAddr{ Endpoint: "localhost:2003", Transport: "tcp", }, Parser: &protocol.Config{ Type: "regex", Config: &protocol.RegexParserConfig{ Rules: []*protocol.RegexRule{ { Regexp: `(?P<key_root>[^.]*)\.test`, }, }, }, }, }, nextConsumer: consumertest.NewNop(), }, }, { name: "negative_tcp_idle_timeout", args: args{ config: Config{ ReceiverSettings: config.ReceiverSettings{ NameVal: "negative_tcp_idle_timeout", }, NetAddr: confignet.NetAddr{ Endpoint: "localhost:2003", Transport: "tcp", }, TCPIdleTimeout: -1 * time.Second, Parser: &protocol.Config{ Type: "plaintext", Config: &protocol.PlaintextConfig{}, }, }, nextConsumer: consumertest.NewNop(), }, wantErr: errors.New("invalid idle timeout: -1s"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := New(zap.NewNop(), tt.args.config, tt.args.nextConsumer) assert.Equal(t, tt.wantErr, err) if err == nil { require.NotNil(t, got) assert.NoError(t, got.Shutdown(context.Background())) } else { assert.Nil(t, got) } }) } }
explode_data.jsonl/81121
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1450 }
[ 2830, 3393, 26616, 5970, 37553, 39582, 1155, 353, 8840, 836, 8, 341, 11940, 2648, 1669, 1855, 3675, 2648, 1005, 4071, 2648, 340, 13158, 2827, 2036, 341, 197, 25873, 981, 5532, 198, 197, 28144, 29968, 11502, 1321, 13468, 198, 197, 532, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSpread(t *testing.T) { t.Parallel() // Scenario: inst1 initiates to inst2, inst3 inst4 and each have items 0-100. inst5 also has the same items but isn't selected // Expected outcome: each responder (inst2, inst3 and inst4) is chosen at least once (the probability for not choosing each of them is slim) // inst5 isn't selected at all peers := make(map[string]*pullTestInstance) inst1 := newPushPullTestInstance("p1", peers) inst2 := newPushPullTestInstance("p2", peers) inst3 := newPushPullTestInstance("p3", peers) inst4 := newPushPullTestInstance("p4", peers) inst5 := newPushPullTestInstance("p5", peers) defer inst1.stop() defer inst2.stop() defer inst3.stop() defer inst4.stop() defer inst5.stop() chooseCounters := make(map[string]int) chooseCounters["p2"] = 0 chooseCounters["p3"] = 0 chooseCounters["p4"] = 0 chooseCounters["p5"] = 0 lock := &sync.Mutex{} addToCounters := func(dest string) func(m interface{}) { return func(m interface{}) { if _, isReq := m.(*reqMsg); isReq { lock.Lock() chooseCounters[dest]++ lock.Unlock() } } } inst2.hook(addToCounters("p2")) inst3.hook(addToCounters("p3")) inst4.hook(addToCounters("p4")) inst5.hook(addToCounters("p5")) for i := 0; i < 100; i++ { item := fmt.Sprintf("%d", i) inst2.Add(item) inst3.Add(item) inst4.Add(item) } inst1.setNextPeerSelection([]string{"p2", "p3", "p4"}) time.Sleep(time.Duration(2000) * time.Millisecond) lock.Lock() for pI, counter := range chooseCounters { if pI == "p5" { assert.Equal(t, 0, counter) } else { assert.True(t, counter > 0, "%s was not selected!", pI) } } lock.Unlock() }
explode_data.jsonl/56508
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 671 }
[ 2830, 3393, 46486, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 322, 58663, 25, 1761, 16, 12672, 973, 311, 1761, 17, 11, 1761, 18, 1761, 19, 323, 1817, 614, 3589, 220, 15, 12, 16, 15, 15, 13, 1761, 20, 1083, 702, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMapPinMultiple(t *testing.T) { tmp := testutils.TempBPFFS(t) c := qt.New(t) spec := spec1.Copy() m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) if err != nil { t.Fatal("Can't create map:", err) } defer m1.Close() pinned := m1.IsPinned() c.Assert(pinned, qt.Equals, true) newPath := filepath.Join(tmp, "bar") err = m1.Pin(newPath) c.Assert(err, qt.IsNil) oldPath := filepath.Join(tmp, spec.Name) if _, err := os.Stat(oldPath); err == nil { t.Fatal("Previous pinned map path still exists:", err) } m2, err := LoadPinnedMap(newPath, nil) c.Assert(err, qt.IsNil) defer m2.Close() }
explode_data.jsonl/21652
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 276 }
[ 2830, 3393, 2227, 19861, 32089, 1155, 353, 8840, 836, 8, 341, 20082, 1669, 1273, 6031, 65009, 26095, 1748, 50, 1155, 340, 1444, 1669, 38949, 7121, 1155, 692, 98100, 1669, 1398, 16, 31770, 2822, 2109, 16, 11, 1848, 1669, 1532, 2227, 7423...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetKeyWhenLabelIsWrong(t *testing.T) { s, ctx := newSessionWithMock() pubKey := &rsa.PublicKey{N: big.NewInt(1), E: 1} rightLabel := "label" var objectsToReturn []pkcs11.ObjectHandle ctx.FindObjectsInitFunc = func(_ pkcs11.SessionHandle, attr []*pkcs11.Attribute) error { objectsToReturn = []pkcs11.ObjectHandle{1} for _, a := range attr { if a.Type == pkcs11.CKA_LABEL && !bytes.Equal(a.Value, []byte(rightLabel)) { objectsToReturn = nil } } return nil } ctx.FindObjectsFunc = func(_ pkcs11.SessionHandle, _ int) ([]pkcs11.ObjectHandle, bool, error) { return objectsToReturn, false, nil } ctx.FindObjectsFinalFunc = func(_ pkcs11.SessionHandle) error { return nil } _, err := s.NewSigner("wrong-label", pubKey) test.AssertError(t, err, "newSigner didn't fail when label was a mismatch for public key") expected := "no objects found matching provided template" if !strings.Contains(err.Error(), expected) { t.Errorf("expected error to contain %q but it was %q", expected, err) } }
explode_data.jsonl/1161
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 392 }
[ 2830, 3393, 1949, 1592, 4498, 2476, 3872, 29185, 1155, 353, 8840, 836, 8, 341, 1903, 11, 5635, 1669, 501, 5283, 2354, 11571, 741, 62529, 1592, 1669, 609, 60869, 49139, 1592, 90, 45, 25, 2409, 7121, 1072, 7, 16, 701, 468, 25, 220, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestContractRequestHandler(t *testing.T) { dbConn, err := sql.Open("sqlite3", constants.AccountsTable) if err != nil { t.Errorf("failed to open database connection : %v", err) } defer func() { if err := dbConn.Close(); err != nil { t.Errorf("failed to close database connection : %v", err) } if err := os.Remove(constants.AccountsTable); err != nil { t.Errorf("failed to remove database : %v", err) } }() statement, _ := dbConn.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE) statement.Exec() pMap := pendingpool.NewPendingMap() contractChan := make(chan contracts.Contract, 2) pLock := new(sync.Mutex) handler := http.HandlerFunc(HandleContractRequest(dbConn, contractChan, pMap, pLock)) senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) encodedSenderPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey) encodedSenderStr := hex.EncodeToString(hashing.New(encodedSenderPublicKey)) recipientPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) encodedRecipientPublicKey, _ := publickey.Encode(&recipientPrivateKey.PublicKey) var recipientWalletAddress = hashing.New(encodedRecipientPublicKey) sender2PrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) encodedSender2PublicKey, _ := publickey.Encode(&sender2PrivateKey.PublicKey) encodedSender2Str := hex.EncodeToString(hashing.New(encodedSender2PublicKey)) var walletAddress2 = hashing.New(encodedSender2PublicKey) if err := accountstable.InsertAccountIntoAccountBalanceTable(dbConn, walletAddress2, 5000); err != nil { t.Errorf("failed to insert sender account") } testContract, req, err := createContractNReq(1, senderPrivateKey, recipientWalletAddress, 25, 1) if err != nil { t.Errorf("failed to make contract : %v", err) } testContract2, req2, err := createContractNReq(1, senderPrivateKey, recipientWalletAddress, 59, 2) if err != nil { t.Errorf("failed to make contract : %v", err) } invalidNonceContract, invalidNonceReq, err := createContractNReq(1, senderPrivateKey, recipientWalletAddress, 10, 4) if err != nil { t.Errorf("failed to make contract : %v", err) } invalidBalanceContract, invalidBalanceReq, err := createContractNReq(1, senderPrivateKey, recipientWalletAddress, 100000, 3) if err != nil { t.Errorf("failed to make contract : %v", err) } testContract3, req3, err := createContractNReq(1, senderPrivateKey, recipientWalletAddress, 100, 3) if err != nil { t.Errorf("failed to make contract : %v", err) } diffSenderContract, diffSenderReq, err := createContractNReq(1, sender2PrivateKey, recipientWalletAddress, 10, 1) if err != nil { t.Errorf("failed to make contract : %v", err) } rr := httptest.NewRecorder() handler.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusBadRequest { t.Errorf("handler returned with wrong status code: got %v want %v", status, http.StatusBadRequest) t.Logf("%s", rr.Body.String()) } var walletAddress = hashing.New(encodedSenderPublicKey) if err := accountstable.InsertAccountIntoAccountBalanceTable(dbConn, walletAddress, 1337); err != nil { t.Errorf("failed to insert sender account") } req, err = requests.NewContractRequest("", *testContract) if err != nil { t.Errorf("failed to create new contract request: %v", err) } tests := []struct { name string c *contracts.Contract req *http.Request wantBal uint64 wantNonce uint64 key string status int }{ { "valid contract", testContract, req, 1312, 1, encodedSenderStr, http.StatusOK, }, { "valid contract2", testContract2, req2, 1337 - 25 - 59, 2, encodedSenderStr, http.StatusOK, }, { "invalid nonce contract", invalidNonceContract, invalidNonceReq, 1337 - 25 - 59, 2, encodedSenderStr, http.StatusBadRequest, }, { "invalid balance contract", invalidBalanceContract, invalidBalanceReq, 1337 - 25 - 59, 2, encodedSenderStr, http.StatusBadRequest, }, { "valid contract3", testContract3, req3, 1337 - 25 - 59 - 100, 3, encodedSenderStr, http.StatusOK, }, { "Diff sender contract", diffSenderContract, diffSenderReq, 5000 - 10, 1, encodedSender2Str, http.StatusOK, }, } var wG sync.WaitGroup for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { rr = httptest.NewRecorder() wG.Add(1) go func() { handler.ServeHTTP(rr, tt.req) wG.Done() }() wG.Wait() status := rr.Code if status != tt.status { t.Errorf("handler returned with wrong status code: got %v want %v", status, tt.status) } if status == http.StatusOK { channelledContract := <-contractChan if !tt.c.Equals(channelledContract) { t.Errorf("contracts do not match: got %+v want %+v", *tt.c, channelledContract) } if pMap.Sender[tt.key].PendingBal != tt.wantBal { t.Errorf("balance do not match") } if pMap.Sender[tt.key].PendingNonce != tt.wantNonce { t.Errorf("state nonce do not match") } } if i < 5 { if l := len(pMap.Sender); l != 1 { t.Errorf("number of key-value pairs in map does not match: got %v want %v", l, 1) } } else { if l := len(pMap.Sender); l != 2 { t.Errorf("number of key-value pairs in map does not match: got %v want %v", l, 2) } } }) } }
explode_data.jsonl/9365
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2153 }
[ 2830, 3393, 14067, 1900, 3050, 1155, 353, 8840, 836, 8, 341, 20939, 9701, 11, 1848, 1669, 5704, 12953, 445, 37042, 18, 497, 18021, 30877, 82, 2556, 340, 743, 1848, 961, 2092, 341, 197, 3244, 13080, 445, 16091, 311, 1787, 4625, 3633, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDNSNameNotValidTLD(t *testing.T) { inputPath := "dnsNameNotValidTLD.pem" expected := lint.Error out := test.TestLint("e_dnsname_not_valid_tld", inputPath) if out.Status != expected { t.Errorf("%s: expected %s, got %s", inputPath, expected, out.Status) } }
explode_data.jsonl/24842
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 110 }
[ 2830, 3393, 61088, 675, 2623, 4088, 51, 12335, 1155, 353, 8840, 836, 8, 341, 22427, 1820, 1669, 330, 45226, 675, 2623, 4088, 51, 12335, 49373, 698, 42400, 1669, 57920, 6141, 198, 13967, 1669, 1273, 8787, 47556, 445, 68, 71125, 606, 7913...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLocalStorageEnvWithFeatureGate(t *testing.T) { testCases := []core.EnvVar{ { Name: "ephemeral-storage-limits", ValueFrom: &core.EnvVarSource{ ResourceFieldRef: &core.ResourceFieldSelector{ ContainerName: "test-container", Resource: "limits.ephemeral-storage", }, }, }, { Name: "ephemeral-storage-requests", ValueFrom: &core.EnvVarSource{ ResourceFieldRef: &core.ResourceFieldSelector{ ContainerName: "test-container", Resource: "requests.ephemeral-storage", }, }, }, } // Enable alpha feature LocalStorageCapacityIsolation err := utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=true") if err != nil { t.Errorf("Failed to enable feature gate for LocalStorageCapacityIsolation: %v", err) return } for _, testCase := range testCases { if errs := validateEnvVarValueFrom(testCase, field.NewPath("field")); len(errs) != 0 { t.Errorf("expected success, got: %v", errs) } } // Disable alpha feature LocalStorageCapacityIsolation err = utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=false") if err != nil { t.Errorf("Failed to disable feature gate for LocalStorageCapacityIsolation: %v", err) return } for _, testCase := range testCases { if errs := validateEnvVarValueFrom(testCase, field.NewPath("field")); len(errs) == 0 { t.Errorf("expected failure for %v", testCase.Name) } } }
explode_data.jsonl/27070
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 537 }
[ 2830, 3393, 90464, 14359, 2354, 13859, 42318, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 2153, 81214, 3962, 515, 197, 197, 515, 298, 21297, 25, 330, 23544, 336, 3253, 62795, 2852, 22866, 756, 298, 47399, 3830, 25, 609, 2153...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestStatusListToJson(t *testing.T) { statuses := []*Status{{NewId(), STATUS_ONLINE, true, 0, "123"}, {NewId(), STATUS_OFFLINE, true, 0, ""}} jsonStatuses := StatusListToJson(statuses) var dat []map[string]interface{} if err := json.Unmarshal([]byte(jsonStatuses), &dat); err != nil { panic(err) } assert.Equal(t, len(dat), 2) _, ok := dat[0]["active_channel"] assert.False(t, ok) assert.Equal(t, statuses[0].ActiveChannel, "123") assert.Equal(t, statuses[0].UserId, dat[0]["user_id"]) assert.Equal(t, statuses[1].UserId, dat[1]["user_id"]) }
explode_data.jsonl/67569
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 236 }
[ 2830, 3393, 2522, 852, 78967, 1155, 353, 8840, 836, 8, 341, 23847, 288, 1669, 29838, 2522, 2979, 3564, 764, 1507, 24014, 11077, 8265, 11, 830, 11, 220, 15, 11, 330, 16, 17, 18, 14345, 314, 3564, 764, 1507, 24014, 11987, 8265, 11, 83...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestQemuLauncherHandle(t *testing.T) { launcher := &QemuLauncher{Pid: 4141, TmpDir: "/tmp/woof"} handle, err := NewHandleFromObjects(launcher) if err != nil { t.Fatalf("error creating handle: %s", err) } // Note: we don't serialize here because that is covered by handle tests build, _ := newMockBuild() reloadedLauncher, err := loadLauncherFromHandle(build, handle) if err != nil { t.Fatalf("error loading launcher from handle: %s", err) } ql, ok := reloadedLauncher.(*QemuLauncher) if !ok { t.Fatalf("incorrect launcher type") } if diff := cmp.Diff(launcher, ql, cmpopts.IgnoreUnexported(QemuLauncher{})); diff != "" { t.Fatalf("incorrect data in reloaded launcher (-want +got):\n%s", diff) } }
explode_data.jsonl/21704
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 274 }
[ 2830, 3393, 48, 33063, 91176, 6999, 1155, 353, 8840, 836, 8, 341, 197, 77191, 1669, 609, 48, 33063, 91176, 90, 32339, 25, 220, 19, 16, 19, 16, 11, 350, 1307, 6184, 25, 3521, 5173, 14, 1126, 1055, 63159, 53822, 11, 1848, 1669, 1532, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestRSSyncExpectations(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 2) manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod") informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{ controller.NewControllerExpectations(), true, func() { // If we check active pods before checking expectataions, the // ReplicaSet will create a new replica because it doesn't see // this pod, but has fulfilled its expectations. informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod) }, }) manager.syncReplicaSet(GetKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) }
explode_data.jsonl/7980
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 437 }
[ 2830, 3393, 68980, 1721, 17536, 804, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 2943, 746, 7121, 2461, 2648, 2195, 18175, 2099, 3927, 2972, 10753, 90, 9296, 25, 7342, 8883, 2648, 25, 2732, 2972, 12614, 2648, 90, 2808, 5637, 25, 609, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGocloak_Login(t *testing.T) { t.Parallel() cfg := GetConfig(t) client := NewClientWithDebug(t) SetUpTestUser(t, client) _, err := client.Login( cfg.GoCloak.ClientID, cfg.GoCloak.ClientSecret, cfg.GoCloak.Realm, cfg.GoCloak.UserName, cfg.GoCloak.Password) FailIfErr(t, err, "Login failed") }
explode_data.jsonl/79517
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 140 }
[ 2830, 3393, 38, 509, 385, 585, 79232, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 50286, 1669, 2126, 2648, 1155, 340, 25291, 1669, 1532, 2959, 2354, 7939, 1155, 340, 22212, 2324, 2271, 1474, 1155, 11, 2943, 340, 197, 6878, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCStore_Load(t *testing.T) { type args struct { key string value string } tests := []struct { name string args args want args present bool }{ {"key_value_ok", args{"testKey", "testValue"}, args{"testKey", "testValue"}, true}, {"value_empty", args{"testKey", ""}, args{"testKey", ""}, true}, {"key_empty", args{"", "testValue"}, args{"", "testValue"}, true}, {"both_empty", args{"", ""}, args{"", ""}, true}, {"nothing_to_load", args{"", ""}, args{"testKey", ""}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cs := New() cs.store[tt.args.key] = tt.args.value val, ok := cs.Load(tt.want.key) if ok != tt.present { t.Errorf("expected %t for ok, is %t instead.", tt.present, ok) } if !ok { return } if val != tt.want.value { t.Errorf("loaded key %q while expecting %q", val, tt.want.value) } }) } }
explode_data.jsonl/59218
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 397 }
[ 2830, 3393, 34, 6093, 19553, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 23634, 256, 914, 198, 197, 16309, 914, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 262, 914, 198, 197, 31215, 262, 2827, 198, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestSize(t *testing.T) { m := NewSimpleLongConcurrentHashMap() if m.Size() != 0 { t.Error("map should be empty") } alma := Girl{"Alma"} alva := Girl{"Alva"} m.Put(1, alma) m.Put(2, alva) if m.Size() != 2 { t.Error("map should just contain only two elements") } }
explode_data.jsonl/19659
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 1695, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 1532, 16374, 6583, 1109, 3231, 18497, 2822, 743, 296, 2465, 368, 961, 220, 15, 341, 197, 3244, 6141, 445, 2186, 1265, 387, 4287, 1138, 197, 630, 69571, 1728, 1669, 11363, 491...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestWriterInterface(t *testing.T) { t.Parallel() var iface Writer iface = csv.NewWriter(new(bytes.Buffer)) iface = csv.NewDialectWriter(new(bytes.Buffer), csv.Dialect{}) iface = csv.NewWriter(new(bytes.Buffer)) // To get rid of compile-time warning that this variable is not used. iface.Flush() }
explode_data.jsonl/63005
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 113 }
[ 2830, 3393, 6492, 5051, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 2405, 49313, 29404, 198, 743, 578, 284, 13147, 7121, 6492, 1755, 23158, 22622, 1171, 743, 578, 284, 13147, 7121, 35, 55056, 6492, 1755, 23158, 22622, 701, 13...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTLS12OnlyCipherSuites(t *testing.T) { // Test that a Server doesn't select a TLS 1.2-only cipher suite when // the client negotiates TLS 1.1. clientHello := &clientHelloMsg{ vers: VersionTLS11, random: make([]byte, 32), cipherSuites: []uint16{ // The Server, by default, will use the client's // preference order. So the GCM cipher suite // will be selected unless it's excluded because // of the version in this ClientHello. TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_RC4_128_SHA, }, compressionMethods: []uint8{compressionNone}, supportedCurves: []CurveID{CurveP256, CurveP384, CurveP521}, supportedPoints: []uint8{pointFormatUncompressed}, } c, s := localPipe(t) replyChan := make(chan interface{}) go func() { cli := Client(c, testConfig) cli.vers = clientHello.vers cli.writeRecord(recordTypeHandshake, clientHello.marshal()) reply, err := cli.readHandshake() c.Close() if err != nil { replyChan <- err } else { replyChan <- reply } }() config := testConfig.Clone() config.CipherSuites = clientHello.cipherSuites Server(s, config).Handshake() s.Close() reply := <-replyChan if err, ok := reply.(error); ok { t.Fatal(err) } serverHello, ok := reply.(*serverHelloMsg) if !ok { t.Fatalf("didn't get ServerHello message in reply. Got %v\n", reply) } if s := serverHello.cipherSuite; s != TLS_RSA_WITH_RC4_128_SHA { t.Fatalf("bad cipher suite from server: %x", s) } }
explode_data.jsonl/36316
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 578 }
[ 2830, 3393, 45439, 16, 17, 7308, 79460, 62898, 288, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 429, 264, 8422, 3171, 944, 3293, 264, 41654, 220, 16, 13, 17, 15382, 31088, 16182, 979, 198, 197, 322, 279, 2943, 11642, 42298, 41654, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStagingEmailLength(t *testing.T) { var errs []error for _, g := range cfg.Groups { if strings.HasPrefix(g.EmailId, "k8s-infra-staging-") { projectName := strings.TrimSuffix(strings.TrimPrefix(g.EmailId, "k8s-infra-staging-"), "@knative.team") len := utf8.RuneCountInString(projectName) if len > 18 { errs = append(errs, fmt.Errorf("Number of characters in project name \"%s\" should not exceed 18; is: %d", projectName, len)) } } } if len(errs) > 0 { for _, err := range errs { t.Error(err) } } }
explode_data.jsonl/24788
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 233 }
[ 2830, 3393, 623, 4118, 4781, 4373, 1155, 353, 8840, 836, 8, 341, 2405, 70817, 3056, 841, 198, 2023, 8358, 342, 1669, 2088, 13286, 59800, 341, 197, 743, 9069, 94357, 3268, 24066, 764, 11, 330, 74, 23, 82, 3419, 42289, 5477, 4118, 12, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestMapStore_Save(t *testing.T) { t.Parallel() store := mapstore.New() if len(store.List()) != 0 { t.Fatalf("Save(): expected len(store) 0, got %d", len(store.List())) } err := store.Save(buildStorageItem(1)) if err != nil { t.Fatalf("Save(): expected no error, got %v", err) } if len(store.List()) != 1 { t.Fatalf("Save(): expected len(store) 1, got %d", len(store.List())) } err = store.Save(buildStorageItem(2)) if err != nil { t.Fatalf("Save(): expected no error, got %v", err) } if len(store.List()) != 2 { t.Fatalf("Save(): expected len(store) 2, got %d", len(store.List())) } }
explode_data.jsonl/37892
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 249 }
[ 2830, 3393, 2227, 6093, 78746, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 57279, 1669, 2415, 4314, 7121, 2822, 743, 2422, 31200, 5814, 2140, 961, 220, 15, 341, 197, 3244, 30762, 445, 8784, 4555, 3601, 2422, 31200, 8, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestWSJWTWithAllowedConnectionTypes(t *testing.T) { o := testWSOptions() setupAddTrusted(o) s := RunServer(o) buildMemAccResolver(s) defer s.Shutdown() for _, test := range []struct { name string connectionTypes []string expectedAnswer string }{ {"not allowed", []string{jwt.ConnectionTypeStandard}, "-ERR"}, {"allowed", []string{jwt.ConnectionTypeStandard, strings.ToLower(jwt.ConnectionTypeWebsocket)}, "+OK"}, {"allowed with unknown", []string{jwt.ConnectionTypeWebsocket, "SomeNewType"}, "+OK"}, {"not allowed with unknown", []string{"SomeNewType"}, "-ERR"}, } { t.Run(test.name, func(t *testing.T) { nuc := newJWTTestUserClaims() nuc.AllowedConnectionTypes = test.connectionTypes claimOpt := testClaimsOptions{ nuc: nuc, expectAnswer: test.expectedAnswer, } _, c, _, _ := testWSWithClaims(t, s, testWSClientOptions{host: o.Websocket.Host, port: o.Websocket.Port}, claimOpt) c.Close() }) } }
explode_data.jsonl/42735
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 384 }
[ 2830, 3393, 7433, 55172, 2354, 35382, 4526, 4173, 1155, 353, 8840, 836, 8, 341, 22229, 1669, 1273, 7433, 3798, 741, 84571, 2212, 1282, 27145, 10108, 340, 1903, 1669, 6452, 5475, 10108, 340, 69371, 18816, 14603, 18190, 1141, 340, 16867, 27...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHashJoinerError(t *testing.T) { defer leaktest.AfterTest(t)() v := [10]sqlbase.EncDatum{} for i := range v { v[i] = sqlbase.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(i))) } testCases := joinerErrorTestCases() ctx := context.Background() st := cluster.MakeTestingClusterSettings() tempEngine, err := engine.NewTempEngine(engine.DefaultStorageEngine, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec) if err != nil { t.Fatal(err) } defer tempEngine.Close() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) diskMonitor := mon.MakeMonitor( "test-disk", mon.DiskResource, nil, /* curCount */ nil, /* maxHist */ -1, /* increment: use default block size */ math.MaxInt64, st, ) diskMonitor.Start(ctx, nil /* pool */, mon.MakeStandaloneBudget(math.MaxInt64)) defer diskMonitor.Stop(ctx) for _, c := range testCases { // testFunc is a helper function that runs a hashJoin with the current // test case after running the provided setup function. testFunc := func(t *testing.T, setup func(h *hashJoiner)) error { leftInput := distsqlutils.NewRowBuffer(c.leftTypes, c.leftInput, distsqlutils.RowBufferArgs{}) rightInput := distsqlutils.NewRowBuffer(c.rightTypes, c.rightInput, distsqlutils.RowBufferArgs{}) out := &distsqlutils.RowBuffer{} flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ Settings: st, TempStorage: tempEngine, DiskMonitor: &diskMonitor, }, } post := execinfrapb.PostProcessSpec{Projection: true, OutputColumns: c.outCols} spec := &execinfrapb.HashJoinerSpec{ LeftEqColumns: c.leftEqCols, RightEqColumns: c.rightEqCols, Type: c.joinType, OnExpr: c.onExpr, } h, err := newHashJoiner(&flowCtx, 0 /* processorID */, spec, leftInput, rightInput, &post, out) if err != nil { return err } outTypes := h.OutputTypes() setup(h) h.Run(context.Background()) if !out.ProducerClosed() { return errors.New("output RowReceiver not closed") } return checkExpectedRows(outTypes, nil, out) } t.Run(c.description, func(t *testing.T) { if err := testFunc(t, func(h *hashJoiner) { h.initialBufferSize = 1024 * 32 }); err == nil { t.Errorf("Expected an error:%s, but found nil", c.expectedErr) } else if err.Error() != c.expectedErr.Error() { t.Errorf("HashJoinerErrorTest: expected\n%s, but found\n%v", c.expectedErr, err) } }) } }
explode_data.jsonl/16324
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1032 }
[ 2830, 3393, 6370, 12292, 261, 1454, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 5195, 1669, 508, 16, 15, 60, 3544, 3152, 26598, 68036, 16094, 2023, 600, 1669, 2088, 348, 341, 197, 5195, 989, 60, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestChangefeedUpdatePrimaryKey(t *testing.T) { defer leaktest.AfterTest(t)() testFn := func(t *testing.T, db *gosql.DB, f testfeedFactory) { sqlDB := sqlutils.MakeSQLRunner(db) // This NOT NULL column checks a regression when used with UPDATE-ing a // primary key column or with DELETE. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'bar')`) foo := f.Feed(t, `CREATE CHANGEFEED FOR foo`) defer foo.Close(t) assertPayloads(t, foo, []string{ `foo: [0]->{"a": 0, "b": "bar"}`, }) sqlDB.Exec(t, `UPDATE foo SET a = 1`) assertPayloads(t, foo, []string{ `foo: [0]->`, `foo: [1]->{"a": 1, "b": "bar"}`, }) sqlDB.Exec(t, `DELETE FROM foo`) assertPayloads(t, foo, []string{ `foo: [1]->`, }) } t.Run(`sinkless`, sinklessTest(testFn)) t.Run(`enterprise`, enterpriseTest(testFn)) t.Run(`rangefeed`, rangefeedTest(sinklessTest, testFn)) }
explode_data.jsonl/21284
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 416 }
[ 2830, 3393, 1143, 524, 823, 12051, 4289, 25981, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 18185, 24911, 1669, 2915, 1155, 353, 8840, 836, 11, 2927, 353, 34073, 1470, 22537, 11, 282, 1273, 11184, 4153,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidate(t *testing.T) { g := initTestGoogleCaptcha(t) server := testproxyhelpers.FakeCaptchaService(t, testproxyhelpers.DefaultFakeCaptchaHost) server.Start() defer server.Close() tests := []struct { name string testFunc func() }{ { name: "test normal token from google", testFunc: func() { w := httptest.NewRecorder() err := g.GenerateCaptchaJWT(w) require.Nil(t, err) req, err := http.NewRequest(http.MethodPost, testproxyhelpers.DefaultFakeCaptchaURL+testproxyhelpers.DefaultFakeCaptchaEndpoint, nil) require.Nil(t, err) req.Form = url.Values{} req.Form["g-recaptcha-response"] = []string{testproxyhelpers.DefaultFakeGoodCaptcha} src, err := g.Validate(req) require.Nil(t, err) require.Equal(t, CaptchaFromGoogle, src) }, }, { name: "test normal token from client", testFunc: func() { w := httptest.NewRecorder() err := g.GenerateCaptchaJWT(w) require.Nil(t, err) resp := w.Result() defer resp.Body.Close() jwtToken := resp.Header.Get(testCaptchaHeader) require.NotEmpty(t, jwtToken) t.Logf("jwtToken %s", jwtToken) req, err := http.NewRequest(http.MethodPost, testproxyhelpers.DefaultFakeCaptchaURL+testproxyhelpers.DefaultFakeCaptchaEndpoint, nil) require.Nil(t, err) req.Header.Set(testCaptchaHeader, jwtToken) src, err := g.Validate(req) require.Nil(t, err) require.Equal(t, CaptchaFromClient, src) }, }, { name: "test bad token", testFunc: func() { req, err := http.NewRequest(http.MethodPost, testproxyhelpers.DefaultFakeCaptchaURL+testproxyhelpers.DefaultFakeCaptchaEndpoint, nil) require.Nil(t, err) req.Header.Set(testCaptchaHeader, testJWTToken) src, err := g.Validate(req) require.NotNil(t, err) require.Equal(t, CaptchaFromGoogle, src) }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { tt.testFunc() }) } }
explode_data.jsonl/60460
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 845 }
[ 2830, 3393, 17926, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 2930, 2271, 14444, 34, 25431, 1155, 340, 41057, 1669, 1273, 22803, 21723, 991, 726, 34, 25431, 1860, 1155, 11, 1273, 22803, 21723, 13275, 52317, 34, 25431, 9296, 340, 41057, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetDeploymentPayload(t *testing.T) { _, err := platform.GetDeploymentPayload("") if err == nil { t.Fatal("should have failed to product deployment payload due to empty chaincode path") } else if !strings.HasPrefix(err.Error(), "ChaincodeSpec's path cannot be empty") { t.Fatalf("should have returned error about path being empty, but got '%s'", err) } }
explode_data.jsonl/58617
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 1949, 75286, 29683, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1848, 1669, 5339, 2234, 75286, 29683, 31764, 743, 1848, 621, 2092, 341, 197, 3244, 26133, 445, 5445, 614, 4641, 311, 1985, 23172, 7729, 4152, 311, 4287, 8781, 1851...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_layout_maxDist(t *testing.T) { tests := []struct { regexp string want int }{ {"^WNE$", 3}, {"^ENWWW(NEEE|SSE(EE|N))$", 10}, {"^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$", 18}, {"^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$", 23}, {"^WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))$", 31}, } for _, tt := range tests { t.Run(tt.regexp, func(t *testing.T) { model := build(tt.regexp) if got := model.maxDist(); got != tt.want { t.Errorf("maxDist() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/82673
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 302 }
[ 2830, 3393, 14466, 6345, 23356, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 37013, 4580, 914, 198, 197, 50780, 256, 526, 198, 197, 59403, 197, 197, 4913, 61, 54, 3944, 73315, 220, 18, 1583, 197, 197, 4913, 61, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { var mockedService = new(TestExampleImplementation) mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() tt := new(testing.T) assert.False(t, mockedService.AssertExpectations(tt)) // make the call now mockedService.Called(1, 2, 3) assert.False(t, mockedService.AssertExpectations(tt)) mockedService.Called(1, 2, 3) // now assert expectations assert.True(t, mockedService.AssertExpectations(tt)) }
explode_data.jsonl/8604
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 217 }
[ 2830, 3393, 1245, 1176, 62222, 529, 17536, 804, 62, 2354, 50693, 375, 2096, 1403, 1155, 353, 8840, 836, 8, 8022, 2405, 46149, 1860, 284, 501, 31159, 13314, 36850, 7229, 2109, 67385, 1860, 8071, 445, 2271, 1245, 1176, 62222, 529, 17536, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRollbackTransitionOnAfterCallbackError(t *testing.T) { OrderStateMachine.Event(OrderEventCheckout).To(OrderStatePaying).From(OrderStateDraft).After(func(order interface{}, tx *gorm.DB) (err error) { order.(*Order).Address = "an address" return errors.New("intentional error") }) order := &Order{} order.State = OrderStateDraft CreateOrderAndExecuteTransition(order, OrderEventCheckout, t, false) testdb.First(&order, order.Id) if order.State != OrderStateDraft { t.Errorf("state transitioned on Before callback error") } if order.Address != "" { t.Errorf("attribute changed on Before callback error") } }
explode_data.jsonl/44780
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 202 }
[ 2830, 3393, 32355, 1419, 21768, 1925, 6025, 7494, 1454, 1155, 353, 8840, 836, 8, 341, 197, 4431, 94666, 6904, 39692, 1556, 55145, 568, 1249, 39692, 1397, 47, 17270, 568, 3830, 39692, 1397, 50086, 568, 6025, 18552, 19385, 3749, 22655, 9854...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAutoRetryApplyAllDependentModuleRetries(t *testing.T) { t.Parallel() out := new(bytes.Buffer) rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES) modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES) err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all -auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr) assert.Nil(t, err) s := out.String() assert.Contains(t, s, "app1 output") assert.Contains(t, s, "app2 output") assert.Contains(t, s, "app3 output") assert.Contains(t, s, "Apply complete!") }
explode_data.jsonl/10099
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 253 }
[ 2830, 3393, 13253, 51560, 28497, 2403, 7839, 28193, 3332, 12020, 4019, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 13967, 1669, 501, 23158, 22622, 340, 33698, 1820, 1669, 2975, 12723, 1155, 11, 13602, 42635, 41486, 29013, 77924, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetAppDetailsKsonnet(t *testing.T) { service := newService("../..") res, err := service.GetAppDetails(context.Background(), &apiclient.RepoServerAppDetailsQuery{ Repo: &argoappv1.Repository{}, Source: &argoappv1.ApplicationSource{ Path: "./test/e2e/testdata/ksonnet", }, }) assert.NoError(t, err) assert.Equal(t, "Ksonnet", res.Type) assert.NotNil(t, res.Ksonnet) assert.Equal(t, "guestbook", res.Ksonnet.Name) assert.Len(t, res.Ksonnet.Environments, 3) }
explode_data.jsonl/58040
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 200 }
[ 2830, 3393, 1949, 2164, 7799, 42, 930, 4711, 1155, 353, 8840, 836, 8, 341, 52934, 1669, 501, 1860, 17409, 496, 5130, 10202, 11, 1848, 1669, 2473, 2234, 2164, 7799, 5378, 19047, 1507, 609, 391, 292, 1451, 2817, 5368, 5475, 2164, 7799, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDocCancellation_Panic(t *testing.T) { cancelCommandPoolMock := new(task.MockedPool) ctx := context.NewMockDefault() executerMock := executermocks.NewMockExecuter() creator := func(ctx context.T) executer.Executer { return executerMock } cancelCommandPoolMock.On("Submit", ctx.Log(), "messageID", mock.Anything).Return(nil) cancelCommandPoolMock.On("BufferTokensIssued").Return(0) cancelCommandPoolMock.On("AcquireBufferToken", mock.Anything).Return(task.PoolErrorCode("")) cancelCommandPoolMock.On("ReleaseBufferToken", mock.Anything).Return(task.PoolErrorCode("")) processor := EngineProcessor{ executerCreator: creator, cancelCommandPool: cancelCommandPoolMock, context: ctx, documentMgr: nil, // assigning nil panics Submit() startWorker: NewWorkerProcessorSpec(ctx, 1, contracts.StartSession, 1), cancelWorker: NewWorkerProcessorSpec(ctx, 1, contracts.TerminateSession, 1), } docState := contracts.DocumentState{} docState.DocumentInformation.MessageID = "messageID" docState.DocumentType = contracts.TerminateSession errorCode := processor.Cancel(docState) assert.Equal(t, errorCode, SubmissionPanic) }
explode_data.jsonl/531
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 408 }
[ 2830, 3393, 9550, 82298, 1088, 31270, 1155, 353, 8840, 836, 8, 341, 84441, 4062, 10551, 11571, 1669, 501, 17483, 24664, 291, 10551, 340, 20985, 1669, 2266, 7121, 11571, 3675, 741, 67328, 27951, 11571, 1669, 23494, 4195, 25183, 7121, 11571, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateTableParsing(t *testing.T) { t.Run("Test valid select parsing", func(t *testing.T) { inputs := []string{ "create table test (id int, name text);", } expectedOutputs := []*CreateTableStatement{ { Name: tokenizer.Token{Value: "test", Kind: tokenizer.IdentifierKind}, Cols: []*ColumnDefinition{ { Name: tokenizer.Token{Value: "id", Kind: tokenizer.IdentifierKind}, Datatype: tokenizer.Token{Value: "int", Kind: tokenizer.TypeKind}, }, { Name: tokenizer.Token{Value: "name", Kind: tokenizer.IdentifierKind}, Datatype: tokenizer.Token{Value: "text", Kind: tokenizer.TypeKind}, }, }, }, } for testCase := range inputs { tokenList := *tokenizer.ParseTokenSequence(inputs[testCase]) actualResult, err := parseCreateTableStatement(tokenList) if err != nil { t.Errorf("Parsing failed on set #%d: %v", testCase, err) } if !actualResult.Equals(expectedOutputs[testCase]) { t.Errorf("Assertion failed. Expected: %s, got: %s", actualResult.String(), expectedOutputs[testCase].String()) } } }) t.Run("Test invalid table creation statement parsing", func(t *testing.T) { inputs := []string{ "create table test (id int, name text)", "create table test id int, name text;", } for testCase := range inputs { tokenList := *tokenizer.ParseTokenSequence(inputs[testCase]) actualResult, err := parseCreateTableStatement(tokenList) if err == nil { t.Errorf("Expected error on set #%d. Values got: %v", testCase, actualResult) } } }) }
explode_data.jsonl/55224
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 684 }
[ 2830, 3393, 4021, 2556, 68839, 1155, 353, 8840, 836, 8, 972, 3244, 16708, 445, 2271, 2697, 3293, 22314, 497, 2915, 1155, 353, 8840, 836, 8, 972, 197, 22427, 82, 1669, 3056, 917, 1666, 298, 197, 1, 3182, 1965, 1273, 320, 307, 526, 11...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestHas_Schema(t *testing.T) { sql, args, err := bob.HasTable("users").WithSchema("private").ToSql() if err != nil { t.Fatal(err.Error()) } result := "SELECT * FROM information_schema.tables WHERE table_name = ? AND table_schema = ?;" if sql != result { t.Fatal("sql is not equal with result:", sql) } argsResult := []interface{}{"users", "private"} if !reflect.DeepEqual(args, argsResult) { t.Fatal("args is not equal with argsResult:", args) } }
explode_data.jsonl/61170
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 173 }
[ 2830, 3393, 10281, 1098, 3416, 1155, 353, 8840, 836, 8, 341, 30633, 11, 2827, 11, 1848, 1669, 35192, 16152, 2556, 445, 4218, 1827, 2354, 8632, 445, 1996, 1827, 1249, 8269, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 6141, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_GET_statuses_home_timeline(t *testing.T) { t.SkipNow() kws := map[string]interface{}{ "uid": "2684726573", } result := new(Statuses) err := api.GET_statuses_home_timeline(kws, result) debugCheckError(err) // debugPrintln(len(*result.Statuses)) // fmt.Println(result.Statuses) }
explode_data.jsonl/64150
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 121 }
[ 2830, 3393, 12702, 83702, 21653, 77560, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 7039, 741, 16463, 8915, 1669, 2415, 14032, 31344, 67066, 197, 197, 1, 2423, 788, 330, 17, 21, 23, 19, 22, 17, 21, 20, 22, 18, 756, 197, 532, 9559, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestService_DeleteRelease(t *testing.T) { tcs := []struct { svc Service expectedRes *model.ReleaseInfo expectedErr error }{ { // TC#1 svc: Service{ storage: &storage.Fake{ GetErr: errFake, }, }, expectedErr: errFake, }, { // TC#2 svc: Service{ storage: &storage.Fake{ Item: []byte("{}"), }, newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) { return nil, errFake }, }, expectedErr: errFake, }, { // TC#3 svc: Service{ storage: &storage.Fake{ Item: []byte("{}"), }, newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) { return &fakeHelmProxy{ err: errFake, }, nil }, }, expectedErr: errFake, }, { // TC#4 svc: Service{ storage: &storage.Fake{ Item: []byte("{}"), }, newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) { return &fakeHelmProxy{ uninstReleaseResp: &services.UninstallReleaseResponse{ Release: fakeRls, }, }, nil }, }, expectedRes: &model.ReleaseInfo{ Name: fakeRls.GetName(), Namespace: fakeRls.GetNamespace(), Version: fakeRls.GetVersion(), CreatedAt: timeconv.String(fakeRls.GetInfo().GetFirstDeployed()), LastDeployed: timeconv.String(fakeRls.GetInfo().GetLastDeployed()), Chart: fakeRls.GetChart().Metadata.Name, ChartVersion: fakeRls.GetChart().Metadata.Version, Status: fakeRls.GetInfo().Status.Code.String(), }, }, } for i, tc := range tcs { rls, err := tc.svc.DeleteRelease(context.Background(), "testCluster", "", true) require.Equalf(t, tc.expectedErr, errors.Cause(err), "TC#%d: check errors", i+1) if err == nil { require.Equalf(t, tc.expectedRes, rls, "TC#%d: check results", i+1) } } }
explode_data.jsonl/2000
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 860 }
[ 2830, 3393, 1860, 57418, 16077, 1155, 353, 8840, 836, 8, 341, 3244, 4837, 1669, 3056, 1235, 341, 197, 1903, 7362, 5362, 271, 197, 42400, 1061, 353, 2528, 58693, 1731, 198, 197, 42400, 7747, 1465, 198, 197, 59403, 197, 197, 90, 442, 24...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLoadConfig(t *testing.T) { factories, err := componenttest.NopFactories() assert.NoError(t, err) factory := NewFactory() factories.Processors[typeStr] = factory // we don't need to use them in this test, but the config has them factories.Exporters["otlp"] = otlpexporter.NewFactory() factories.Exporters["jaeger"] = jaegerexporter.NewFactory() cfg, err := configtest.LoadConfigAndValidate(path.Join(".", "testdata", "config.yaml"), factories) require.NoError(t, err) require.NotNil(t, cfg) parsed := cfg.Processors[config.NewComponentID(typeStr)] assert.Equal(t, parsed, &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewComponentID(typeStr)), DefaultExporters: []string{"otlp"}, FromAttribute: "X-Tenant", Table: []RoutingTableItem{ { Value: "acme", Exporters: []string{"jaeger/acme", "otlp/acme"}, }, { Value: "globex", Exporters: []string{"otlp/globex"}, }, }, }) }
explode_data.jsonl/75150
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 400 }
[ 2830, 3393, 5879, 2648, 1155, 353, 8840, 836, 8, 341, 1166, 52893, 11, 1848, 1669, 3692, 1944, 2067, 453, 17417, 2433, 741, 6948, 35699, 1155, 11, 1848, 692, 1166, 2919, 1669, 1532, 4153, 741, 1166, 52893, 29012, 1087, 21557, 2580, 60, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFileHelper_IsPathFileString_13(t *testing.T) { fh := FileHelper{} pathFile := "..\\...\\" _, _, err := fh.IsPathFileString(pathFile) if err == nil { t.Error("Expected an error return from fh.IsPathFileString(pathFile) " + "because 'pathFile' 3-dots ('...'). However, NO ERROR WAS RETURNED! ") } }
explode_data.jsonl/14502
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 128 }
[ 2830, 3393, 1703, 5511, 31879, 1820, 1703, 703, 62, 16, 18, 1155, 353, 8840, 836, 8, 1476, 220, 36075, 1669, 2887, 5511, 16094, 220, 1815, 1703, 1669, 32213, 3422, 1112, 3422, 1837, 220, 8358, 8358, 1848, 1669, 36075, 4506, 1820, 1703, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSubCloseUnsub(t *testing.T) { // the point of this test is **not** to panic var mux TypeMux mux.Stop() sub := mux.Subscribe(int(0)) sub.Unsubscribe() }
explode_data.jsonl/61572
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 66 }
[ 2830, 3393, 3136, 7925, 1806, 1966, 1155, 353, 8840, 836, 8, 341, 197, 322, 279, 1459, 315, 419, 1273, 374, 3070, 1921, 334, 311, 21975, 198, 2405, 59807, 3990, 44, 2200, 198, 2109, 2200, 30213, 741, 28624, 1669, 59807, 82628, 1548, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestSortedArray_Rands(t *testing.T) { gtest.C(t, func(t *gtest.T) { a1 := []interface{}{"a", "d", "c"} func1 := func(v1, v2 interface{}) int { return strings.Compare(gconv.String(v1), gconv.String(v2)) } array1 := garray.NewSortedArrayFrom(a1, func1) i1 := array1.Rands(2) t.AssertIN(i1, []interface{}{"a", "d", "c"}) t.Assert(len(i1), 2) t.Assert(array1.Len(), 3) i1 = array1.Rands(4) t.Assert(len(i1), 4) }) }
explode_data.jsonl/67024
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 226 }
[ 2830, 3393, 51051, 1857, 2568, 2844, 1155, 353, 8840, 836, 8, 341, 3174, 1944, 727, 1155, 11, 2915, 1155, 353, 82038, 836, 8, 341, 197, 11323, 16, 1669, 3056, 4970, 6257, 4913, 64, 497, 330, 67, 497, 330, 66, 63159, 197, 29244, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRoundUpBytes(t *testing.T) { var sizeInBytes int64 = 1024 actual := RoundUpBytes(sizeInBytes) if actual != 1*GiB { t.Fatalf("Wrong result for RoundUpBytes. Got: %d", actual) } }
explode_data.jsonl/78368
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 73 }
[ 2830, 3393, 27497, 2324, 7078, 1155, 353, 8840, 836, 8, 341, 2405, 1379, 641, 7078, 526, 21, 19, 284, 220, 16, 15, 17, 19, 198, 88814, 1669, 17097, 2324, 7078, 6856, 641, 7078, 340, 743, 5042, 961, 220, 16, 9, 46841, 33, 341, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestValidProxyURL(t *testing.T) { var tests = []validationTest{ { value: "", shouldErr: false, }, { value: "http://foo.com:3128", shouldErr: false, }, { value: "http://127.0.0.1:3128", shouldErr: false, }, { value: "http://foo:bar@test.com:324", shouldErr: false, }, { value: "https://foo:bar@test.com:454", shouldErr: false, }, { value: "https://foo:b@r@test.com:454", shouldErr: false, }, { value: "http://myuser:my%20pass@foo.com:3128", shouldErr: false, }, { value: "htt://foo.com:3128", shouldErr: true, }, { value: "http://:foo.com:3128", shouldErr: true, }, { value: "http://myuser@my pass:foo.com:3128", shouldErr: true, }, { value: "http://foo:bar@test.com:abc", shouldErr: true, }, } runValidations(t, tests, "http-proxy", IsValidProxy) }
explode_data.jsonl/77932
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 490 }
[ 2830, 3393, 4088, 16219, 3144, 1155, 353, 8840, 836, 8, 1476, 2405, 7032, 284, 3056, 12284, 2271, 515, 197, 197, 515, 298, 16309, 25, 257, 8324, 298, 197, 5445, 7747, 25, 895, 345, 197, 197, 1583, 197, 197, 515, 298, 16309, 25, 257,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfigClients_Update_Error(t *testing.T) { s, cfg, _, h, iss, err := setupHydraTest(false) if err != nil { t.Fatalf("setupHydraTest() failed: %v", err) } clientName := "test_client" // Update the client RedirectUris. cli := &cpb.Client{ RedirectUris: []string{"http://client.example.com"}, } tests := []struct { name string persona string clientName string realm string status int }{ { name: "client not exists", persona: "admin", clientName: "invalid", realm: "master", status: http.StatusNotFound, }, { name: "not admin", persona: "non-admin", clientName: clientName, realm: "master", status: http.StatusUnauthorized, }, { name: "not master realm", persona: "admin", clientName: clientName, realm: "test", status: http.StatusForbidden, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { h.Clear() h.UpdateClientResp = &hydraapi.Client{ ClientID: test.TestClientID, Name: clientName, Secret: "secret", RedirectURIs: cli.RedirectUris, Scope: defaultScope, GrantTypes: defaultGrantTypes, ResponseTypes: defaultResponseTypes, } resp := sendConfigClientsUpdate(t, tc.persona, tc.clientName, tc.realm, test.TestClientID, test.TestClientSecret, cli, s, iss) if resp.StatusCode != tc.status { t.Errorf("resp.StatusCode = %d, wants %d", resp.StatusCode, tc.status) } if h.UpdateClientReq != nil { t.Errorf("should not call Update client to hydra") } conf, err := s.loadConfig(nil, "master") if err != nil { t.Fatalf("s.loadConfig() failed %v", err) } if diff := cmp.Diff(cfg, conf, protocmp.Transform()); len(diff) != 0 { t.Errorf("config should not update, (-want, +got): %s", diff) } }) } }
explode_data.jsonl/18522
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 858 }
[ 2830, 3393, 2648, 47174, 47393, 28651, 1155, 353, 8840, 836, 8, 341, 1903, 11, 13286, 11, 8358, 305, 11, 2369, 11, 1848, 1669, 6505, 30816, 22248, 2271, 3576, 340, 743, 1848, 961, 2092, 341, 197, 3244, 30762, 445, 15188, 30816, 22248, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestSensitiveValidateSingleword(t *testing.T) { filter := New() filter.AddWord("东") testcases := []struct { Text string ExpectPass bool ExpectFirst string }{ {"两个东西", false, "东"}, } for _, tc := range testcases { if pass, first := filter.Validate(tc.Text); pass != tc.ExpectPass || first != tc.ExpectFirst { t.Errorf("validate %s, got %v, %s, expect %v, %s", tc.Text, pass, first, tc.ExpectPass, tc.ExpectFirst) } } }
explode_data.jsonl/80992
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 195 }
[ 2830, 3393, 63316, 17926, 10888, 1158, 1155, 353, 8840, 836, 8, 341, 50108, 1669, 1532, 741, 50108, 1904, 10879, 445, 67364, 5130, 18185, 23910, 1669, 3056, 1235, 341, 197, 49635, 286, 914, 198, 197, 35911, 12187, 220, 1807, 198, 197, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestListOrgs(t *testing.T) { Convey("List Orgs", t, func() { setup(MockRoute{"GET", "/v2/organizations", []string{listOrgsPayload, listOrgsPayloadPage2}, "", 200, "", nil}, t) defer teardown() c := &Config{ ApiAddress: server.URL, Token: "foobar", } client, err := NewClient(c) So(err, ShouldBeNil) orgs, err := client.ListOrgs() So(err, ShouldBeNil) So(len(orgs), ShouldEqual, 4) So(orgs[0].Guid, ShouldEqual, "a537761f-9d93-4b30-af17-3d73dbca181b") So(orgs[0].Name, ShouldEqual, "demo") }) }
explode_data.jsonl/4426
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 254 }
[ 2830, 3393, 852, 42437, 82, 1155, 353, 8840, 836, 8, 341, 93070, 5617, 445, 852, 33706, 82, 497, 259, 11, 2915, 368, 341, 197, 84571, 66436, 4899, 4913, 3806, 497, 3521, 85, 17, 14, 69253, 497, 3056, 917, 90, 1607, 42437, 82, 29683,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNonEmptyStr(t *testing.T) { var err = errors.New("err") assert.Equal(t, err, validx.NonEmptyStr("", validx.Err(err))()) assert.Nil(t, validx.NonEmptyStr("foo", validx.Err(err))()) }
explode_data.jsonl/46880
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 81 }
[ 2830, 3393, 8121, 3522, 2580, 1155, 353, 8840, 836, 8, 341, 2405, 1848, 284, 5975, 7121, 445, 615, 1138, 6948, 12808, 1155, 11, 1848, 11, 2697, 87, 30706, 3522, 2580, 19814, 2697, 87, 27862, 3964, 593, 2398, 6948, 59678, 1155, 11, 269...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestContainerIndexer(t *testing.T) { var testConfig = common.NewConfig() conIndexer, err := NewContainerIndexer(*testConfig, metagen) assert.Nil(t, err) podName := "testpod" ns := "testns" container := "container" initContainer := "initcontainer" pod := Pod{ Metadata: ObjectMeta{ Name: podName, Namespace: ns, Labels: map[string]string{ "labelkey": "labelvalue", }, }, Status: PodStatus{ ContainerStatuses: make([]PodContainerStatus, 0), InitContainerStatuses: make([]PodContainerStatus, 0), }, } indexers := conIndexer.GetMetadata(&pod) indices := conIndexer.GetIndexes(&pod) assert.Equal(t, len(indexers), 0) assert.Equal(t, len(indices), 0) expected := common.MapStr{ "pod": common.MapStr{ "name": "testpod", }, "namespace": "testns", "labels": common.MapStr{ "labelkey": "labelvalue", }, } pod.Status.ContainerStatuses = []PodContainerStatus{ { Name: container, ContainerID: "docker://abcde", }, } pod.Status.InitContainerStatuses = []PodContainerStatus{ { Name: initContainer, ContainerID: "docker://fghij", }, } indexers = conIndexer.GetMetadata(&pod) assert.Equal(t, len(indexers), 2) assert.Equal(t, indexers[0].Index, "abcde") assert.Equal(t, indexers[1].Index, "fghij") indices = conIndexer.GetIndexes(&pod) assert.Equal(t, len(indices), 2) assert.Equal(t, indices[0], "abcde") assert.Equal(t, indices[1], "fghij") expected["container"] = common.MapStr{ "name": container, } assert.Equal(t, expected.String(), indexers[0].Data.String()) expected["container"] = common.MapStr{ "name": initContainer, } assert.Equal(t, expected.String(), indexers[1].Data.String()) }
explode_data.jsonl/80956
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 695 }
[ 2830, 3393, 4502, 1552, 261, 1155, 353, 8840, 836, 8, 341, 2405, 1273, 2648, 284, 4185, 7121, 2648, 2822, 37203, 1552, 261, 11, 1848, 1669, 1532, 4502, 1552, 261, 4071, 1944, 2648, 11, 2270, 8535, 340, 6948, 59678, 1155, 11, 1848, 692...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRouting_connectHandler(t *testing.T) { req, err := http.NewRequest("GET", "/", nil) if err != nil { t.Fatal(err) } rr := httptest.NewRecorder() router.ServeHTTP(rr, req) assert.Equal(t, http.StatusOK, rr.Code, fmt.Sprintf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)) }
explode_data.jsonl/62462
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 130 }
[ 2830, 3393, 24701, 15720, 3050, 1155, 353, 8840, 836, 8, 341, 24395, 11, 1848, 1669, 1758, 75274, 445, 3806, 497, 64657, 2092, 340, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 630, 197, 634, 1669, 54320, 70334, 7121, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLogger_AddContextFields(t *testing.T) { t.Parallel() tests := []struct { name string message string contextFields []Field field []Field expect string }{ {"success()", "test", []Field{String("field", "context")}, []Field{String("field", "onetime")}, `{"severity":"INFO","message":"test","field":"context","field":"onetime"}` + defaultLineSeparator}, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() buf := bytes.NewBuffer(nil) l := Must(New(buf, WithUseTimestampField(false), WithUseCallerField(false))) contextLogger := l.With(tt.contextFields...) contextLogger.Info(tt.message, tt.field...) actual := buf.String() FailIfNotEqual(t, tt.expect, actual) }) } }
explode_data.jsonl/71320
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 322 }
[ 2830, 3393, 7395, 21346, 1972, 8941, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 78216, 1669, 3056, 1235, 341, 197, 11609, 688, 914, 198, 197, 24753, 981, 914, 198, 197, 28413, 8941, 3056, 1877, 198, 197, 39250, 260, 3056, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRecreateSubteam(t *testing.T) { tc, _, admin, _, _, sub := memberSetupSubteam(t) defer tc.Cleanup() // switch to `admin` user tc.G.Logout(context.TODO()) if err := admin.Login(tc.G); err != nil { t.Fatal(err) } if err := Delete(context.Background(), tc.G, &teamsUI{}, sub); err != nil { t.Fatal(err) } // create the subteam again name, err := keybase1.TeamNameFromString(sub) if err != nil { t.Fatal(err) } parent, err := name.Parent() if err != nil { t.Fatal(err) } _, err = CreateSubteam(context.Background(), tc.G, string(name.LastPart()), parent, keybase1.TeamRole_NONE /* addSelfAs */) if err != nil { t.Fatal(err) } }
explode_data.jsonl/26471
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 279 }
[ 2830, 3393, 693, 3182, 3136, 9196, 1155, 353, 8840, 836, 8, 341, 78255, 11, 8358, 3986, 11, 8358, 8358, 1186, 1669, 4462, 21821, 3136, 9196, 1155, 340, 16867, 17130, 727, 60639, 2822, 197, 322, 3398, 311, 1565, 2882, 63, 1196, 198, 78...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestDatasource(t *testing.T) { mysqld, err := NewMysqld(nil) if err != nil { t.Errorf("Failed to start mysqld: %s", err) return } defer mysqld.Stop() dsn := mysqld.Datasource("", "", "", 0) re := "root:@unix\\(/.*mysql\\.sock\\)/test" match, _ := regexp.MatchString(re, dsn) if !match { t.Errorf("DSN %s should match %s", dsn, re) } }
explode_data.jsonl/77527
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 168 }
[ 2830, 3393, 47663, 919, 1155, 353, 8840, 836, 8, 341, 2109, 1047, 80, 507, 11, 1848, 1669, 1532, 44, 1047, 80, 507, 27907, 340, 743, 1848, 961, 2092, 341, 197, 3244, 13080, 445, 9408, 311, 1191, 9717, 80, 507, 25, 1018, 82, 497, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPropagateMaxDurationProcess(t *testing.T) { cancel, controller := newController() defer cancel() assert.NotNil(t, controller) wf := unmarshalWF(propagate) assert.NotNil(t, wf) woc := newWorkflowOperationCtx(wf, controller) assert.NotNil(t, woc) err := woc.setExecWorkflow() assert.NoError(t, err) assert.Zero(t, len(woc.wf.Status.Nodes)) // Add the parent node for retries. nodeName := "test-node" node := woc.initializeNode(nodeName, wfv1.NodeTypeRetry, "", &wfv1.WorkflowStep{}, "", wfv1.NodeRunning) retries := wfv1.RetryStrategy{ Limit: intstrutil.ParsePtr("2"), Backoff: &wfv1.Backoff{ Duration: "0", Factor: intstrutil.ParsePtr("1"), MaxDuration: "20", }, } woc.wf.Status.Nodes[woc.wf.NodeID(nodeName)] = *node childNode := fmt.Sprintf("child-node-%d", 0) woc.initializeNode(childNode, wfv1.NodeTypePod, "", &wfv1.WorkflowStep{}, "", wfv1.NodeFailed) woc.addChildNode(nodeName, childNode) var opts executeTemplateOpts n := woc.wf.GetNodeByName(nodeName) _, _, err = woc.processNodeRetries(n, retries, &opts) if assert.NoError(t, err) { assert.Equal(t, n.StartedAt.Add(20*time.Second).Round(time.Second).String(), opts.executionDeadline.Round(time.Second).String()) } }
explode_data.jsonl/71021
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 531 }
[ 2830, 3393, 2008, 46836, 5974, 12945, 7423, 1155, 353, 8840, 836, 8, 341, 84441, 11, 6461, 1669, 501, 2051, 741, 16867, 9121, 741, 6948, 93882, 1155, 11, 6461, 340, 6692, 69, 1669, 650, 27121, 32131, 30638, 46836, 340, 6948, 93882, 1155...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCreateReceiver(t *testing.T) { cfg := createDefaultConfig().(*Config) cfg.Endpoint = "localhost:0" // Endpoint is required, not going to be used here. params := componenttest.NewNopReceiverCreateSettings() tReceiver, err := createMetricsReceiver(context.Background(), params, cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, tReceiver, "receiver creation failed") }
explode_data.jsonl/73107
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 128 }
[ 2830, 3393, 4021, 25436, 1155, 353, 8840, 836, 8, 341, 50286, 1669, 1855, 3675, 2648, 1005, 4071, 2648, 340, 50286, 90409, 284, 330, 8301, 25, 15, 1, 442, 47269, 374, 2567, 11, 537, 2087, 311, 387, 1483, 1588, 382, 25856, 1669, 3692, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMySQLServerRepo_GetByHostInfo(t *testing.T) { asst := assert.New(t) entity, err := mysqlServerRepo.GetByHostInfo(testInitHostIP, testInitPortNum) asst.Nil(err, common.CombineMessageWithError("test GetByHostInfo() failed", err)) hostIP := entity.GetHostIP() asst.Equal(testInitHostIP, hostIP, "test GetByHostInfo() failed") portNum := entity.GetPortNum() asst.Equal(testInitPortNum, portNum, "test GetByHostInfo() failed") }
explode_data.jsonl/80040
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 59224, 5475, 25243, 13614, 1359, 9296, 1731, 1155, 353, 8840, 836, 8, 341, 60451, 267, 1669, 2060, 7121, 1155, 692, 52987, 11, 1848, 1669, 10564, 5475, 25243, 2234, 1359, 9296, 1731, 8623, 3803, 9296, 3298, 11, 1273, 3803, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseKeyBytes(t *testing.T) { testCases := []struct { input string expectedName string expectedTags map[string]string }{ {input: "m,k=v", expectedName: "m", expectedTags: map[string]string{"k": "v"}}, {input: "m\\ q,k=v", expectedName: "m q", expectedTags: map[string]string{"k": "v"}}, {input: "m,k\\ q=v", expectedName: "m", expectedTags: map[string]string{"k q": "v"}}, {input: "m\\ q,k\\ q=v", expectedName: "m q", expectedTags: map[string]string{"k q": "v"}}, } for _, testCase := range testCases { t.Run(testCase.input, func(t *testing.T) { name, tags := models.ParseKeyBytes([]byte(testCase.input)) if !bytes.Equal([]byte(testCase.expectedName), name) { t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) } if !tags.Equal(models.NewTags(testCase.expectedTags)) { t.Errorf("%s produced tags %s but expected %s", testCase.input, tags.String(), models.NewTags(testCase.expectedTags).String()) } }) } }
explode_data.jsonl/16978
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 406 }
[ 2830, 3393, 14463, 1592, 7078, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 22427, 286, 914, 198, 197, 42400, 675, 914, 198, 197, 42400, 15930, 2415, 14032, 30953, 198, 197, 59403, 197, 197, 90, 1355, 25, 33...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFlagStrings(t *testing.T) { f := NewFlagStrings() flag.Var(f, "t", "") flag.CommandLine.Parse([]string{"-t", "1", "-t", "2", "-t", "1"}) //nolint:errcheck if e := (FlagStrings{ Map: map[string]bool{ "1": true, "2": true, }, Slice: &[]string{"1", "2"}, }); !reflect.DeepEqual(e, f) { t.Errorf("expected %+v, got %+v", e, f) } }
explode_data.jsonl/55717
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 174 }
[ 2830, 3393, 12135, 20859, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 1532, 12135, 20859, 741, 30589, 87968, 955, 11, 330, 83, 497, 14676, 30589, 12714, 2460, 8937, 10556, 917, 4913, 12, 83, 497, 330, 16, 497, 6523, 83, 497, 330, 17, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStartHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) h, err := StartHost(api, defaultMachineConfig) if err != nil { t.Fatal("Error starting host.") } if h.Name != config.GetMachineName() { t.Fatalf("GetMachineName()=%q, want %q", config.GetMachineName(), h.Name) } if exists, _ := api.Exists(h.Name); !exists { t.Fatal("Machine not saved.") } if s, _ := h.Driver.GetState(); s != state.Running { t.Fatalf("Machine not started.") } // Provision regenerates Docker certs. This happens automatically during create, // so we should only call it again if the host already exists. if md.Provisioner.Provisioned { t.Fatalf("Did not expect Provision to be called") } }
explode_data.jsonl/4183
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 290 }
[ 2830, 3393, 3479, 9296, 1155, 353, 8840, 836, 8, 341, 79096, 11571, 11349, 1155, 340, 54299, 1669, 7032, 7121, 11571, 7082, 1155, 692, 84374, 1669, 609, 23841, 24664, 31606, 90, 1336, 13013, 261, 25, 609, 23841, 24664, 1336, 13013, 261, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestTwoChar2(t *testing.T) { expected := "lo" actual := twoChar("hello", 3) if strings.Compare(expected, actual) != 0 { t.Fatalf("expected \"%s\", but actual is \"%s\"", expected, actual) } }
explode_data.jsonl/75025
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 75 }
[ 2830, 3393, 11613, 4768, 17, 1155, 353, 8840, 836, 8, 341, 42400, 1669, 330, 385, 698, 88814, 1669, 1378, 4768, 445, 14990, 497, 220, 18, 340, 743, 9069, 32377, 15253, 11, 5042, 8, 961, 220, 15, 341, 197, 3244, 30762, 445, 7325, 323...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestCollectCallParameters(t *testing.T) { // works opts := &CollectOptions{ TokenID: tokenIDT, ExpectedCurrencyOwed0: core.FromRawAmount(token0T, big.NewInt(0)), ExpectedCurrencyOwed1: core.FromRawAmount(token1T, big.NewInt(0)), Recipient: recipientT, } params, err := CollectCallParameters(opts) assert.NoError(t, err) assert.Equal(t, "0xfc6f78650000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000ffffffffffffffffffffffffffffffff00000000000000000000000000000000ffffffffffffffffffffffffffffffff", hexutil.Encode(params.Calldata)) assert.Equal(t, "0x00", utils.ToHex(params.Value)) // works with eth opts = &CollectOptions{ TokenID: tokenIDT, ExpectedCurrencyOwed0: core.FromRawAmount(token1T, big.NewInt(0)), ExpectedCurrencyOwed1: core.FromRawAmount(ether, big.NewInt(0)), ExpectedTokenOwed0: token1T, ExpectedTokenOwed1: ether, Recipient: recipientT, } params, err = CollectCallParameters(opts) assert.NoError(t, err) assert.Equal(t, "0xac9650d8000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001a00000000000000000000000000000000000000000000000000000000000000084fc6f78650000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff00000000000000000000000000000000ffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004449404b7c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000064df2ab5bb00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000", hexutil.Encode(params.Calldata)) assert.Equal(t, "0x00", utils.ToHex(params.Value)) }
explode_data.jsonl/58636
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 601 }
[ 2830, 3393, 47504, 7220, 9706, 1155, 353, 8840, 836, 8, 341, 197, 322, 4278, 198, 64734, 1669, 609, 47504, 3798, 515, 197, 33299, 915, 25, 2290, 3950, 915, 51, 345, 197, 197, 18896, 26321, 46, 67367, 15, 25, 6200, 11439, 20015, 10093,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDMap_incrCommandHandler(t *testing.T) { cluster := testcluster.New(NewService) s := cluster.AddMember(nil).(*Service) defer cluster.Shutdown() var errGr errgroup.Group for i := 0; i < 100; i++ { errGr.Go(func() error { cmd := protocol.NewIncr("mydmap", "mykey", 1).Command(context.Background()) rc := s.client.Get(s.rt.This().String()) err := rc.Process(context.Background(), cmd) if err != nil { return err } _, err = cmd.Result() return err }) } require.NoError(t, errGr.Wait()) cmd := protocol.NewGet("mydmap", "mykey").Command(context.Background()) rc := s.client.Get(s.rt.This().String()) err := rc.Process(context.Background(), cmd) require.NoError(t, err) value, err := cmd.Bytes() require.NoError(t, err) v := new(int) err = resp.Scan(value, v) require.NoError(t, err) require.Equal(t, 100, *v) }
explode_data.jsonl/64504
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 347 }
[ 2830, 3393, 35, 2227, 1243, 5082, 4062, 3050, 1155, 353, 8840, 836, 8, 341, 197, 18855, 1669, 1273, 18855, 7121, 35063, 1860, 340, 1903, 1669, 10652, 1904, 9366, 27907, 568, 4071, 1860, 340, 16867, 10652, 10849, 18452, 2822, 2405, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPlaceMarginOrderLimit(t *testing.T) { TestSetRealOrderDefaults(t) request := okgroup.PlaceOrderRequest{ InstrumentID: spotCurrency, Type: order.Limit.Lower(), Side: order.Buy.Lower(), MarginTrading: "2", Price: "-100", Size: "100", } _, err := o.PlaceMarginOrder(&request) testStandardErrorHandling(t, err) }
explode_data.jsonl/30179
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 17371, 21681, 4431, 16527, 1155, 353, 8840, 836, 8, 341, 73866, 1649, 12768, 4431, 16273, 1155, 340, 23555, 1669, 5394, 4074, 86675, 4431, 1900, 515, 197, 197, 56324, 915, 25, 220, 7702, 26321, 345, 197, 27725, 25, 688, 1973...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIncident_CreateIncidentNoteWithResponse(t *testing.T) { setup() defer teardown() input := IncidentNote{ Content: "foo", } mux.HandleFunc("/incidents/1/notes", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") _, _ = w.Write([]byte(`{"note": {"id": "1","content": "foo"}}`)) }) client := defaultTestClient(server.URL, "foo") id := "1" res, err := client.CreateIncidentNoteWithResponse(id, input) want := &IncidentNote{ ID: "1", Content: "foo", } if err != nil { t.Fatal(err) } testEqual(t, want, res) }
explode_data.jsonl/76397
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 237 }
[ 2830, 3393, 39245, 1713, 34325, 39245, 1713, 9112, 2354, 2582, 1155, 353, 8840, 836, 8, 341, 84571, 741, 16867, 49304, 2822, 22427, 1669, 68735, 9112, 515, 197, 197, 2762, 25, 330, 7975, 756, 197, 630, 2109, 2200, 63623, 4283, 2840, 688...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestResolveMulti(t *testing.T) { rslv := MultiResolver( Servers{}, Servers{"A"}, Servers{"B", "C"}, ) res, err := rslv.Resolve(nil) sort.Strings(res) if err != nil { t.Error(err) } if !reflect.DeepEqual(res, []string{"A", "B", "C"}) { t.Error(res) } }
explode_data.jsonl/38862
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 133 }
[ 2830, 3393, 56808, 20358, 1155, 353, 8840, 836, 8, 341, 7000, 3226, 85, 1669, 17439, 18190, 1006, 197, 7568, 18729, 38837, 197, 7568, 18729, 4913, 32, 7115, 197, 7568, 18729, 4913, 33, 497, 330, 34, 7115, 197, 692, 10202, 11, 1848, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPackObjectReturnsObjectWithSingleBaseAtHighOffset(t *testing.T) { original := strings.Repeat("four", 64) compressed, _ := compress(original) p := &Packfile{ idx: IndexWith(map[string]uint32{ "cccccccccccccccccccccccccccccccccccccccc": 32, }), r: bytes.NewReader(append([]byte{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // (1001 0000) (msb=1, type=commit, size=0) 0x90, // (1000 0000) (msb=0, size=1 -> size=256) 0x10}, compressed..., )), } o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) assert.NoError(t, err) assert.Equal(t, TypeCommit, o.Type()) unpacked, err := o.Unpack() assert.Equal(t, []byte(original), unpacked) assert.NoError(t, err) }
explode_data.jsonl/30627
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 445 }
[ 2830, 3393, 30684, 1190, 16446, 1190, 2354, 10888, 3978, 1655, 11976, 6446, 1155, 353, 8840, 836, 8, 341, 197, 9889, 1669, 9069, 2817, 10979, 445, 34024, 497, 220, 21, 19, 340, 32810, 14318, 11, 716, 1669, 24611, 39809, 692, 3223, 1669,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEntityMatchers(t *testing.T) { configPath := filepath.Join(getConfigPath(), configTestEntityMatchersFile) backend, err := config.FromFile(configPath)() if err != nil { t.Fatal("Failed to get config backend") } identityConfig, err := ConfigFromBackend(backend...) assert.Nil(t, err, "Failed to get endpoint config from backend") assert.NotNil(t, identityConfig, "expected valid endpointconfig") configImpl := identityConfig.(*IdentityConfig) assert.Equal(t, 3, len(configImpl.caMatchers), "preloading matchers isn't working as expected") }
explode_data.jsonl/42247
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 178 }
[ 2830, 3393, 3030, 37862, 1155, 353, 8840, 836, 8, 1476, 25873, 1820, 1669, 26054, 22363, 5433, 2648, 1820, 1507, 2193, 2271, 3030, 37862, 1703, 340, 197, 20942, 11, 1848, 1669, 2193, 11439, 1703, 8754, 1820, 8, 741, 743, 1848, 961, 2092...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTrySend(t *testing.T) { p := newTestPeer() err := p.TrySend(&Hello{}) if err == nil { t.Fatal("Expected error") } ready := make(chan struct{}) go func() { close(ready) <-p.Recv() }() <-ready if err = p.TrySend(&Hello{}); err != nil { t.Fatal("Failed to send message") } p.Close() }
explode_data.jsonl/18475
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 139 }
[ 2830, 3393, 21453, 11505, 1155, 353, 8840, 836, 8, 341, 3223, 1669, 501, 2271, 30888, 741, 9859, 1669, 281, 19824, 11505, 2099, 9707, 37790, 743, 1848, 621, 2092, 341, 197, 3244, 26133, 445, 18896, 1465, 1138, 197, 630, 197, 2307, 1669,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCacheTag(t *testing.T) { assert.NoError(t, PrepareEngine()) type CacheDomain struct { Id int64 `xorm:"pk cache"` Name string } assert.NoError(t, testEngine.CreateTables(&CacheDomain{})) assert.True(t, testEngine.GetCacher(testEngine.TableName(&CacheDomain{})) != nil) }
explode_data.jsonl/19199
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 108 }
[ 2830, 3393, 8233, 5668, 1155, 353, 8840, 836, 8, 341, 6948, 35699, 1155, 11, 31166, 4571, 12367, 13158, 19479, 13636, 2036, 341, 197, 67211, 256, 526, 21, 19, 1565, 87, 493, 2974, 20819, 6500, 8805, 197, 21297, 914, 198, 197, 630, 694...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExecFailureWithArguments(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { _, err := conn.Exec(context.Background(), "selct $1;", 1) if err == nil { t.Fatal("Expected SQL syntax error") } assert.False(t, pgconn.SafeToRetry(err)) _, err = conn.Exec(context.Background(), "select $1::varchar(1);", "1", "2") require.Error(t, err) }) }
explode_data.jsonl/40006
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 171 }
[ 2830, 3393, 10216, 17507, 2354, 19139, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 18185, 2354, 3036, 26040, 4703, 802, 16374, 20689, 1155, 11, 2915, 1155, 353, 8840, 836, 11, 4534, 353, 3517, 87, 50422, 8, 341, 197, 197, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetCommonImage(t *testing.T) { flavorIds := []string{"BBC-S3-02"} queryArgs := &GetFlavorImageArgs{ FlavorIds: flavorIds, } if res, err := BBC_CLIENT.GetCommonImage(queryArgs); err != nil { fmt.Println("Get specific flavor common image failed: ", err) } else { fmt.Println("Get specific flavor common image success, result: ", res) } }
explode_data.jsonl/4070
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 130 }
[ 2830, 3393, 1949, 10839, 1906, 1155, 353, 8840, 836, 8, 341, 1166, 75, 3292, 12701, 1669, 3056, 917, 4913, 66755, 6222, 18, 12, 15, 17, 16707, 27274, 4117, 1669, 609, 1949, 3882, 3292, 1906, 4117, 515, 197, 197, 3882, 3292, 12701, 25,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestParseMirBSDKornConfirm(t *testing.T) { if testing.Short() { t.Skip("calling mksh is slow.") } if !hasMksh57 { t.Skip("mksh 57 required to run") } i := 0 for _, c := range append(fileTests, fileTestsNoPrint...) { if c.MirBSDKorn == nil { continue } for j, in := range c.Strs { t.Run(fmt.Sprintf("%03d-%d", i, j), confirmParse(in, "mksh", false)) } i++ } }
explode_data.jsonl/55118
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 187 }
[ 2830, 3393, 14463, 58461, 30029, 42, 1512, 16728, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 73726, 23789, 927, 374, 6301, 13053, 197, 532, 743, 753, 4648, 44, 74, 927, 20, 22, 341, 197, 3244, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestStreaming(t *testing.T) { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) te := testExporter{make(chan *trace.SpanData)} trace.RegisterExporter(&te) defer trace.UnregisterExporter(&te) client, cleanup := testpb.NewTestClient(t) stream, err := client.Multiple(context.Background()) if err != nil { t.Fatalf("Call failed: %v", err) } err = stream.Send(&testpb.FooRequest{}) if err != nil { t.Fatalf("Couldn't send streaming request: %v", err) } stream.CloseSend() for { _, err := stream.Recv() if err == io.EOF { break } if err != nil { t.Errorf("stream.Recv() = %v; want no errors", err) } } cleanup() s1 := <-te.ch s2 := <-te.ch checkSpanData(t, s1, s2, "testpb.Foo.Multiple", true) select { case <-te.ch: t.Fatal("received extra exported spans") case <-time.After(time.Second / 10): } }
explode_data.jsonl/27158
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 350 }
[ 2830, 3393, 76509, 1155, 353, 8840, 836, 8, 341, 65058, 36051, 2648, 55458, 10753, 90, 3675, 66048, 25, 11655, 9636, 2284, 17571, 96503, 197, 665, 1669, 1273, 88025, 90, 6927, 35190, 353, 15067, 85309, 1043, 10569, 65058, 19983, 88025, 20...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestRequestDurationSeconds(t *testing.T) { rds := &RequestDurationSeconds{ Histogram: generic.NewHistogram("foo", 2), } rds = rds.Module("m").Service("s").Route("r") rds.Observe(5) assert.Equal(t, 5.0, rds.Histogram.(*generic.Histogram).Quantile(0.5)) assert.ElementsMatch(t, []string{"module", "m", "service", "s", "route", "r"}, rds.Histogram.(*generic.Histogram).LabelValues()) f := http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { time.Sleep(time.Millisecond) }) h := Metrics(rds)(f) h.ServeHTTP(nil, httptest.NewRequest(http.MethodGet, "/", nil)) assert.GreaterOrEqual(t, 1.0, rds.Histogram.(*generic.Histogram).Quantile(0.5)) }
explode_data.jsonl/60387
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 267 }
[ 2830, 3393, 1900, 12945, 15343, 1155, 353, 8840, 836, 8, 341, 7000, 5356, 1669, 609, 1900, 12945, 15343, 515, 197, 13292, 28499, 25, 13954, 7121, 77210, 445, 7975, 497, 220, 17, 1326, 197, 532, 7000, 5356, 284, 435, 5356, 26958, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1