text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestUpdateDeploymentConfigMissingID(t *testing.T) { storage := REST{} channel, err := storage.Update(kapi.NewDefaultContext(), &api.DeploymentConfig{}) if channel != nil { t.Errorf("Expected nil, got %v", channel) } if strings.Index(err.Error(), "id is unspecified:") == -1 { t.Errorf("Expected 'id is unspecified' error, got %v", err) } }
explode_data.jsonl/66990
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 4289, 75286, 2648, 25080, 915, 1155, 353, 8840, 836, 8, 341, 197, 16172, 1669, 25414, 31483, 71550, 11, 1848, 1669, 5819, 16689, 5969, 2068, 7121, 3675, 1972, 1507, 609, 2068, 34848, 39130, 2648, 37790, 743, 5496, 961, 2092, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestParseFailure(t *testing.T) { // Test that the first parse error is returned. const url = "%gh&%ij" _, err := ParseQuery(url) errStr := fmt.Sprint(err) if !strings.Contains(errStr, "%gh") { t.Errorf(`ParseQuery(%q) returned error %q, want something containing %q"`, url, errStr, "%gh") } }
explode_data.jsonl/71730
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 14463, 17507, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 429, 279, 1156, 4715, 1465, 374, 5927, 624, 4777, 2515, 284, 5962, 866, 5, 4, 3172, 698, 197, 6878, 1848, 1669, 14775, 2859, 6522, 340, 9859, 2580, 1669, 8879, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestVetAnalyzersSetIsCorrect(t *testing.T) { vetAns, err := genflags.VetAnalyzers() if err != nil { t.Fatal(err) } want := make(map[string]bool) for _, a := range vetAns { want[a] = true } if !reflect.DeepEqual(want, passAnalyzersToVet) { t.Errorf("stale vet analyzers: want %v; got %v", want, passAnalyzersToVet) t.Logf("(Run 'go generate cmd/go/internal/test' to refresh the set of analyzers.)") } }
explode_data.jsonl/10845
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 175 }
[ 2830, 3393, 53, 295, 73307, 59619, 1649, 3872, 33092, 1155, 353, 8840, 836, 8, 341, 5195, 295, 69599, 11, 1848, 1669, 4081, 11161, 5058, 295, 73307, 59619, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 630, 50780, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestMySQLClusterService_GetDBsByID(t *testing.T) { asst := assert.New(t) s := initNewMySQLService() err := s.GetDBsByID(1) asst.Nil(err, "test GetDBsByID() failed") id := s.Databases[constant.ZeroInt].Identity() asst.Equal(1, id, "test GetByID() failed") }
explode_data.jsonl/6152
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 119 }
[ 2830, 3393, 59224, 28678, 1860, 13614, 3506, 82, 60572, 1155, 353, 8840, 836, 8, 341, 60451, 267, 1669, 2060, 7121, 1155, 692, 1903, 1669, 2930, 3564, 59224, 1860, 741, 9859, 1669, 274, 2234, 3506, 82, 60572, 7, 16, 340, 60451, 267, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHttpParser_Request_ContentLength_0(t *testing.T) { http := httpModForTests(nil) http.parserConfig.sendHeaders = true http.parserConfig.sendAllHeaders = true data := "POST / HTTP/1.1\r\n" + "user-agent: curl/7.35.0\r\n" + "host: localhost:9000\r\n" + "accept: */*\r\n" + "authorization: Company 1\r\n" + "content-length: 0\r\n" + "connection: close\r\n" + "\r\n" _, ok, complete := testParse(http, data) assert.True(t, ok) assert.True(t, complete) }
explode_data.jsonl/16490
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 213 }
[ 2830, 3393, 2905, 6570, 44024, 78383, 4373, 62, 15, 1155, 353, 8840, 836, 8, 341, 28080, 1669, 1758, 4459, 2461, 18200, 27907, 340, 28080, 25617, 2648, 5219, 10574, 284, 830, 198, 28080, 25617, 2648, 5219, 2403, 10574, 284, 830, 271, 89...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseBlock(t *testing.T) { checkParseStatements( t, ` let box = Box() box.get_value() `, "(block (let nil box (call Box)) (expression-statement (call (field-access box get_value))))", ) }
explode_data.jsonl/33519
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 14463, 4713, 1155, 353, 8840, 836, 8, 341, 25157, 14463, 93122, 1006, 197, 3244, 345, 197, 197, 3989, 197, 10217, 3745, 284, 8261, 741, 197, 58545, 670, 3142, 741, 197, 197, 12892, 197, 197, 29209, 4574, 320, 1149, 2092, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestProviderBaseURLs(t *testing.T) { machineHostname, err := os.Hostname() if err != nil { machineHostname = "127.0.0.1" } p := config.MustNew(t, logrusx.New("", ""), configx.SkipValidation()) assert.Equal(t, "https://"+machineHostname+":4433/", p.SelfPublicURL(nil).String()) assert.Equal(t, "https://"+machineHostname+":4434/", p.SelfAdminURL().String()) p.MustSet(config.ViperKeyPublicPort, 4444) p.MustSet(config.ViperKeyAdminPort, 4445) assert.Equal(t, "https://"+machineHostname+":4444/", p.SelfPublicURL(nil).String()) assert.Equal(t, "https://"+machineHostname+":4445/", p.SelfAdminURL().String()) p.MustSet(config.ViperKeyPublicHost, "public.ory.sh") p.MustSet(config.ViperKeyAdminHost, "admin.ory.sh") assert.Equal(t, "https://public.ory.sh:4444/", p.SelfPublicURL(nil).String()) assert.Equal(t, "https://admin.ory.sh:4445/", p.SelfAdminURL().String()) // Set to dev mode p.MustSet("dev", true) assert.Equal(t, "http://public.ory.sh:4444/", p.SelfPublicURL(nil).String()) assert.Equal(t, "http://admin.ory.sh:4445/", p.SelfAdminURL().String()) // Check domain aliases p.MustSet(config.ViperKeyPublicDomainAliases, []config.DomainAlias{ { MatchDomain: "www.google.com", BasePath: "/.ory/", Scheme: "https", }, { MatchDomain: "www.amazon.com", BasePath: "/", Scheme: "http", }, { MatchDomain: "ory.sh:1234", BasePath: "/", Scheme: "https", }, }) assert.Equal(t, "http://public.ory.sh:4444/", p.SelfPublicURL(nil).String()) assert.Equal(t, "http://public.ory.sh:4444/", p.SelfPublicURL(&http.Request{ URL: new(url.URL), Host: "www.not-google.com", }).String()) assert.Equal(t, "https://www.GooGle.com:312/.ory/", p.SelfPublicURL(&http.Request{ URL: new(url.URL), Host: "www.GooGle.com:312", }).String()) assert.Equal(t, "http://www.amazon.com/", p.SelfPublicURL(&http.Request{ URL: new(url.URL), Host: "www.amazon.com", }).String()) // Check domain aliases with alias query param assert.Equal(t, "http://www.amazon.com/", p.SelfPublicURL(&http.Request{ URL: &url.URL{RawQuery: url.Values{"alias": {"www.amazon.com"}}.Encode()}, Host: "www.GooGle.com:312", }).String()) assert.Equal(t, "https://ory.sh:1234/", p.SelfPublicURL(&http.Request{ URL: &url.URL{RawQuery: url.Values{"alias": {"ory.sh:1234"}}.Encode()}, Host: "www.amazon.com", }).String()) assert.Equal(t, "http://www.amazon.com:8181/", p.SelfPublicURL(&http.Request{ URL: new(url.URL), Host: "www.amazon.com:8181", }).String()) assert.Equal(t, "http://www.amazon.com:8181/", p.SelfPublicURL(&http.Request{ URL: &url.URL{RawQuery: url.Values{"alias": {"www.amazon.com:8181"}}.Encode()}, Host: "www.GooGle.com:312", }).String()) }
explode_data.jsonl/52909
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1174 }
[ 2830, 3393, 5179, 3978, 3144, 82, 1155, 353, 8840, 836, 8, 341, 2109, 3814, 88839, 11, 1848, 1669, 2643, 29840, 606, 741, 743, 1848, 961, 2092, 341, 197, 2109, 3814, 88839, 284, 330, 16, 17, 22, 13, 15, 13, 15, 13, 16, 698, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestClient_ImportExportETHKey_NoChains(t *testing.T) { t.Parallel() t.Cleanup(func() { deleteKeyExportFile(t) }) ethClient := newEthMock(t) ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) ethClient.On("GetLINKBalance", mock.Anything, mock.Anything).Return(assets.NewLinkFromJuels(42), nil) app := startNewApplication(t, withMocks(ethClient), withConfigSet(func(c *configtest.TestGeneralConfig) { c.Overrides.EVMEnabled = null.BoolFrom(true) c.Overrides.GlobalEvmNonceAutoSync = null.BoolFrom(false) c.Overrides.GlobalBalanceMonitorEnabled = null.BoolFrom(false) }), ) client, r := app.NewClientAndRenderer() ethKeyStore := app.GetKeyStore().Eth() set := flag.NewFlagSet("test", 0) set.String("file", "internal/fixtures/apicredentials", "") set.Bool("bypass-version-check", true, "") c := cli.NewContext(nil, set, nil) err := client.RemoteLogin(c) require.NoError(t, err) err = client.ListETHKeys(c) require.NoError(t, err) keys := *r.Renders[0].(*cmd.EthKeyPresenters) require.Len(t, keys, 1) address := keys[0].Address r.Renders = nil // Export the key testdir := filepath.Join(os.TempDir(), t.Name()) err = os.MkdirAll(testdir, 0700|os.ModeDir) require.NoError(t, err) defer os.RemoveAll(testdir) keyfilepath := filepath.Join(testdir, "key") set = flag.NewFlagSet("test", 0) set.String("oldpassword", "../internal/fixtures/correct_password.txt", "") set.String("newpassword", "../internal/fixtures/incorrect_password.txt", "") set.String("output", keyfilepath, "") set.Parse([]string{address}) c = cli.NewContext(nil, set, nil) err = client.ExportETHKey(c) require.NoError(t, err) // Delete the key set = flag.NewFlagSet("test", 0) set.Bool("hard", true, "") set.Bool("yes", true, "") set.Parse([]string{address}) c = cli.NewContext(nil, set, nil) err = client.DeleteETHKey(c) require.NoError(t, err) _, err = ethKeyStore.Get(address) require.Error(t, err) cltest.AssertCount(t, app.GetSqlxDB(), "eth_key_states", 0) // Import the key set = flag.NewFlagSet("test", 0) set.String("oldpassword", "../internal/fixtures/incorrect_password.txt", "") set.Parse([]string{keyfilepath}) c = cli.NewContext(nil, set, nil) err = client.ImportETHKey(c) require.NoError(t, err) r.Renders = nil set = flag.NewFlagSet("test", 0) c = cli.NewContext(nil, set, nil) err = client.ListETHKeys(c) require.NoError(t, err) require.Len(t, *r.Renders[0].(*cmd.EthKeyPresenters), 1) _, err = ethKeyStore.Get(address) require.NoError(t, err) // Export test invalid id keyName := keyNameForTest(t) set = flag.NewFlagSet("test Eth export invalid id", 0) set.Parse([]string{"999"}) set.String("newpassword", "../internal/fixtures/apicredentials", "") set.Bool("bypass-version-check", true, "") set.String("output", keyName, "") c = cli.NewContext(nil, set, nil) err = client.ExportETHKey(c) require.Error(t, err, "Error exporting") require.Error(t, utils.JustError(os.Stat(keyName))) }
explode_data.jsonl/79047
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1177 }
[ 2830, 3393, 2959, 62, 11511, 16894, 7625, 1592, 36989, 1143, 1735, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 3244, 727, 60639, 18552, 368, 314, 3698, 1592, 16894, 1703, 1155, 8, 9568, 197, 769, 2959, 1669, 501, 65390, 11571...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNamespaceTopics(t *testing.T) { name := generateRandomName() namespace := fmt.Sprintf("public/%s", name) namespaceURL := fmt.Sprintf("admin/v2/namespaces/%s", namespace) err := httpPut(namespaceURL, anonymousNamespacePolicy()) if err != nil { t.Fatal() } defer func() { _ = httpDelete(fmt.Sprintf("admin/v2/namespaces/%s", namespace)) }() // create topics topic1 := fmt.Sprintf("%s/topic-1", namespace) if err := httpPut("admin/v2/persistent/"+topic1, nil); err != nil { t.Fatal(err) } topic2 := fmt.Sprintf("%s/topic-2", namespace) if err := httpPut("admin/v2/persistent/"+topic2, namespace); err != nil { t.Fatal(err) } defer func() { _ = httpDelete("admin/v2/persistent/"+topic1, "admin/v2/persistent/"+topic2) }() c, err := NewClient(ClientOptions{ URL: serviceURL, }) if err != nil { t.Errorf("failed to create client error: %+v", err) return } defer c.Close() ci := c.(*client) topics, err := ci.namespaceTopics(namespace) if err != nil { t.Fatal(err) } assert.Equal(t, 2, len(topics)) // add a non-persistent topic topicName := fmt.Sprintf("non-persistent://%s/testNonPersistentTopic", namespace) client, err := NewClient(ClientOptions{ URL: serviceURL, }) assert.Nil(t, err) defer client.Close() producer, err := client.CreateProducer(ProducerOptions{ Topic: topicName, }) assert.Nil(t, err) defer producer.Close() topics, err = ci.namespaceTopics(namespace) if err != nil { t.Fatal(err) } assert.Equal(t, 2, len(topics)) }
explode_data.jsonl/69315
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 606 }
[ 2830, 3393, 22699, 45003, 1155, 353, 8840, 836, 8, 341, 11609, 1669, 6923, 13999, 675, 741, 56623, 1669, 8879, 17305, 445, 888, 12627, 82, 497, 829, 340, 56623, 3144, 1669, 8879, 17305, 445, 2882, 5457, 17, 9612, 971, 27338, 12627, 82, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHttpParser_censorPasswordPOST(t *testing.T) { if testing.Verbose() { logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"http", "httpdetailed"}) } http := HttpModForTests() http.Hide_keywords = []string{"password"} http.Send_headers = true http.Send_all_headers = true data1 := []byte( "POST /users/login HTTP/1.1\r\n" + "HOST: www.example.com\r\n" + "Content-Type: application/x-www-form-urlencoded\r\n" + "Content-Length: 28\r\n" + "\r\n" + "username=ME&password=secret\r\n") stream := &HttpStream{data: data1, message: new(HttpMessage)} ok, complete := http.messageParser(stream) if !ok { t.Errorf("Parsing returned error") } if !complete { t.Errorf("Expecting a complete message") } msg := stream.data[stream.message.start:stream.message.end] path, params, err := http.extractParameters(stream.message, msg) if err != nil { t.Errorf("Fail to parse parameters") } if path != "/users/login" { t.Errorf("Wrong path: %s", path) } if strings.Contains(params, "secret") { t.Errorf("Failed to censor the password: %s", msg) } }
explode_data.jsonl/6845
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 437 }
[ 2830, 3393, 2905, 6570, 666, 3805, 4876, 2946, 1155, 353, 8840, 836, 8, 1476, 743, 7497, 42505, 8297, 368, 341, 197, 6725, 79, 5247, 3803, 12531, 79, 36202, 11139, 11, 7342, 895, 11, 830, 11, 3056, 917, 4913, 1254, 497, 330, 1254, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestProcessorMetricsData(t *testing.T) { doneFn, err := obsreporttest.SetupRecordedMetricsTest() require.NoError(t, err) defer doneFn() const acceptedPoints = 29 const refusedPoints = 11 const droppedPoints = 17 obsrep := NewProcessor(ProcessorSettings{Level: configtelemetry.LevelNormal, ProcessorID: processor}) obsrep.MetricsAccepted(context.Background(), acceptedPoints) obsrep.MetricsRefused(context.Background(), refusedPoints) obsrep.MetricsDropped(context.Background(), droppedPoints) obsreporttest.CheckProcessorMetrics(t, processor, acceptedPoints, refusedPoints, droppedPoints) }
explode_data.jsonl/9553
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 179 }
[ 2830, 3393, 22946, 27328, 1043, 1155, 353, 8840, 836, 8, 341, 40495, 24911, 11, 1848, 1669, 7448, 11736, 1944, 39820, 6471, 291, 27328, 2271, 741, 17957, 35699, 1155, 11, 1848, 340, 16867, 2814, 24911, 2822, 4777, 11666, 11411, 284, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGoodName(t *testing.T) { g := gomega.NewGomegaWithT(t) isvc := makeTestInferenceService() isvc.Name = "abc-123" g.Expect(isvc.validate(c)).Should(gomega.Succeed()) }
explode_data.jsonl/1500
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 79 }
[ 2830, 3393, 15216, 675, 1155, 353, 8840, 836, 8, 341, 3174, 1669, 342, 32696, 7121, 38, 32696, 2354, 51, 1155, 340, 19907, 7362, 1669, 1281, 2271, 641, 2202, 1860, 741, 19907, 7362, 2967, 284, 330, 13683, 12, 16, 17, 18, 698, 3174, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestQueryAsync(t *testing.T) { ch := make(chan ret, 1) api := &testGraphQLAPI{} client := NewClient(api, WithSubscriberID(testDeviceID)) cancel, err := client.PostAsync(graphql.PostRequest{Query: "query "}, func(r *graphql.Response, err error) { ch <- ret{r, err} }) if err != nil { t.Fatalf("Post error: %v", err) } if cancel == nil { t.Fatal("PostAsync returns nil") } if _, ok := api.GetPostedHeader()["x-amz-subscriber-id"]; ok { t.Fatalf("GetPostedHeader error: %+v", api) } ret := <-ch raw, ok := ret.response.Data.(json.RawMessage) if !ok { t.Fatalf("Data error: %+v", ret.response.Data) } if !bytes.Equal(raw, testData) { t.Fatalf("Data error: %+v", ret.response.Data) } if ret.response.Errors != testResponse.Errors { t.Fatalf("Errors error: %+v", *ret.response.Errors) } if ret.response.Extensions != testResponse.Extensions { t.Fatalf("Extensions error: %+v", *ret.response.Extensions) } }
explode_data.jsonl/26619
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 378 }
[ 2830, 3393, 2859, 6525, 1155, 353, 8840, 836, 8, 341, 23049, 1669, 1281, 35190, 2112, 11, 220, 16, 692, 54299, 1669, 609, 1944, 88637, 7082, 16094, 25291, 1669, 1532, 2959, 24827, 11, 3085, 40236, 915, 8623, 6985, 915, 1171, 84441, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateDatabase(t *testing.T) { catalogStore, err := store.Open("catalog_create_db", store.DefaultOptions()) require.NoError(t, err) defer os.RemoveAll("catalog_create_db") dataStore, err := store.Open("sqldata_create_db", store.DefaultOptions()) require.NoError(t, err) defer os.RemoveAll("sqldata_create_db") engine, err := NewEngine(catalogStore, dataStore, DefaultOptions().WithPrefix(sqlPrefix)) require.NoError(t, err) err = engine.EnsureCatalogReady(nil) require.NoError(t, err) err = engine.EnsureCatalogReady(nil) require.NoError(t, err) err = engine.ReloadCatalog(nil) require.NoError(t, err) _, err = engine.ExecStmt("CREATE DATABASE db1", nil, true) require.NoError(t, err) _, err = engine.ExecStmt("CREATE DATABASE db1", nil, true) require.Equal(t, ErrDatabaseAlreadyExists, err) _, err = engine.ExecStmt("CREATE DATABASE db2", nil, true) require.NoError(t, err) err = engine.CloseSnapshot() require.NoError(t, err) err = engine.Close() require.NoError(t, err) }
explode_data.jsonl/64052
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 380 }
[ 2830, 3393, 4021, 5988, 1155, 353, 8840, 836, 8, 341, 1444, 7750, 6093, 11, 1848, 1669, 3553, 12953, 445, 26539, 8657, 8685, 497, 3553, 13275, 3798, 2398, 17957, 35699, 1155, 11, 1848, 340, 16867, 2643, 84427, 445, 26539, 8657, 8685, 51...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRouterMultiRoute(t *testing.T) { e := New() r := e.router // Routes r.Add(http.MethodGet, "/users", func(c Context) error { c.Set("path", "/users") return nil }) r.Add(http.MethodGet, "/users/:id", func(c Context) error { return nil }) c := e.NewContext(nil, nil).(*context) // Route > /users r.Find(http.MethodGet, "/users", c) c.handler(c) assert.Equal(t, "/users", c.Get("path")) // Route > /users/:id r.Find(http.MethodGet, "/users/1", c) assert.Equal(t, "1", c.Param("id")) // Route > /user c = e.NewContext(nil, nil).(*context) r.Find(http.MethodGet, "/user", c) he := c.handler(c).(*HTTPError) assert.Equal(t, http.StatusNotFound, he.Code) }
explode_data.jsonl/47124
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 291 }
[ 2830, 3393, 9523, 20358, 4899, 1155, 353, 8840, 836, 8, 341, 7727, 1669, 1532, 741, 7000, 1669, 384, 22125, 271, 197, 322, 22356, 198, 7000, 1904, 19886, 20798, 1949, 11, 3521, 4218, 497, 2915, 1337, 9608, 8, 1465, 341, 197, 1444, 420...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestListVolumePublicationsError(t *testing.T) { mockCtrl := gomock.NewController(t) // Create a mocked persistent store client mockStoreClient := mockpersistentstore.NewMockStoreClient(mockCtrl) // Set the store client behavior we don't care about for this testcase mockStoreClient.EXPECT().GetVolumeTransactions(gomock.Any()).Return([]*storage.VolumeTransaction{}, nil).AnyTimes() // Create a fake VolumePublication fakePub := &utils.VolumePublication{ Name: "foo/bar", NodeName: "bar", VolumeName: "foo", ReadOnly: true, AccessMode: 1, } // Create an instance of the orchestrator for this test orchestrator := getOrchestrator(t) // Add the mocked objects to the orchestrator orchestrator.storeClient = mockStoreClient // Populate volume publications orchestrator.addVolumePublicationToCache(fakePub) // Simulate a bootstrap error orchestrator.bootstrapError = fmt.Errorf("some error") actualPubs, err := orchestrator.ListVolumePublications(context.Background()) assert.NotNil(t, err, fmt.Sprintf("unexpected success listing volume publications")) assert.Empty(t, actualPubs, "non-empty publication list returned") }
explode_data.jsonl/62746
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 362 }
[ 2830, 3393, 852, 18902, 12676, 804, 1454, 1155, 353, 8840, 836, 8, 341, 77333, 15001, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 197, 322, 4230, 264, 46149, 24999, 3553, 2943, 198, 77333, 6093, 2959, 1669, 7860, 69389, 4314, 7121, 115...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSynproxyFileHeaderMismatch(t *testing.T) { tmpfile := makeFakeSynproxyFile([]byte(synproxyFileHeaderMismatch)) defer os.Remove(tmpfile) k := Synproxy{ statFile: tmpfile, } acc := testutil.Accumulator{} err := k.Gather(&acc) assert.Error(t, err) assert.Contains(t, err.Error(), "invalid number of columns in data") }
explode_data.jsonl/17567
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 37134, 22803, 1703, 4047, 82572, 1155, 353, 8840, 836, 8, 341, 20082, 1192, 1669, 1281, 52317, 37134, 22803, 1703, 10556, 3782, 1141, 1872, 22803, 1703, 4047, 82572, 1171, 16867, 2643, 13270, 10368, 1192, 692, 16463, 1669, 23153...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAbsCollection_ToInts(t *testing.T) { intColl := NewIntCollection([]int{1, 2, 2, 3}) arr, err := intColl.ToInts() if err != nil { t.Fatal(err) } if len(arr) != 4 { t.Fatal(errors.New("ToInts error")) } }
explode_data.jsonl/66464
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 101 }
[ 2830, 3393, 27778, 6482, 38346, 1072, 82, 1155, 353, 8840, 836, 8, 341, 2084, 15265, 1669, 1532, 1072, 6482, 10556, 396, 90, 16, 11, 220, 17, 11, 220, 17, 11, 220, 18, 3518, 36511, 11, 1848, 1669, 526, 15265, 15071, 82, 741, 743, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNodes_Clone(t *testing.T) { node := &ClusterNode{ ID: "", Name: "Something", PublicIP: "", PrivateIP: "", } ct := newClusterNodes() ct.PrivateNodes = append(ct.PrivateNodes, node) clonedCt, ok := ct.Clone().(*ClusterNodes) if !ok { t.Fail() } assert.Equal(t, ct, clonedCt) clonedCt.PrivateNodes[0].Name = "Else" areEqual := reflect.DeepEqual(ct, clonedCt) if areEqual { t.Error("It's a shallow clone !") t.Fail() } }
explode_data.jsonl/76491
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 220 }
[ 2830, 3393, 12288, 85110, 603, 1155, 353, 8840, 836, 8, 341, 20831, 1669, 609, 28678, 1955, 515, 197, 29580, 25, 286, 8324, 197, 21297, 25, 414, 330, 23087, 756, 197, 73146, 3298, 25, 220, 8324, 197, 197, 16787, 3298, 25, 8324, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestObjectRef(t *testing.T) { testCases := map[string]struct { obj metav1.Object gvk schema.GroupVersionKind }{ "Service": { obj: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: "my-ns", Name: "my-name", }, }, gvk: schema.GroupVersionKind{ Group: "", Version: "v1", Kind: "Service", }, }, "Broker": { obj: &v1alpha1.Broker{ ObjectMeta: metav1.ObjectMeta{ Namespace: "broker-ns", Name: "my-broker", }, }, gvk: schema.GroupVersionKind{ Group: "eventing.knative.dev", Version: "v1alpha1", Kind: "Broker", }, }, } for n, tc := range testCases { t.Run(n, func(t *testing.T) { or := ObjectRef(tc.obj, tc.gvk) expectedApiVersion := fmt.Sprintf("%s/%s", tc.gvk.Group, tc.gvk.Version) // Special case for v1. if tc.gvk.Group == "" { expectedApiVersion = tc.gvk.Version } if api, _ := tc.gvk.ToAPIVersionAndKind(); api != expectedApiVersion { t.Errorf("Expected APIVersion %q, actually %q", expectedApiVersion, api) } if kind := or.Kind; kind != tc.gvk.Kind { t.Errorf("Expected kind %q, actually %q", tc.gvk.Kind, kind) } if ns := or.Namespace; ns != tc.obj.GetNamespace() { t.Errorf("Expected namespace %q, actually %q", tc.obj.GetNamespace(), ns) } if n := or.Name; n != tc.obj.GetName() { t.Errorf("Expected name %q, actually %q", tc.obj.GetName(), n) } }) } }
explode_data.jsonl/53357
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 711 }
[ 2830, 3393, 1190, 3945, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 22671, 77520, 16, 8348, 198, 197, 3174, 48363, 10802, 5407, 5637, 10629, 198, 197, 59403, 197, 197, 1, 1860, 788, 341, 298, 226...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestServiceConversionWithEmptyServiceAccountsAnnotation(t *testing.T) { serviceName := "service1" namespace := "default" ip := "10.0.0.1" localSvc := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: namespace, Annotations: map[string]string{}, }, Spec: v1.ServiceSpec{ ClusterIP: ip, Ports: []v1.ServicePort{ { Name: "http", Port: 8080, Protocol: v1.ProtocolTCP, }, { Name: "https", Protocol: v1.ProtocolTCP, Port: 443, }, }, }, } service := convertService(localSvc, domainSuffix) if service == nil { t.Errorf("could not convert service") } sa := service.ServiceAccounts if len(sa) != 0 { t.Errorf("number of service accounts is incorrect: %d, expected 0", len(sa)) } }
explode_data.jsonl/26712
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 371 }
[ 2830, 3393, 1860, 48237, 2354, 3522, 1860, 41369, 19711, 1155, 353, 8840, 836, 8, 341, 52934, 675, 1669, 330, 7936, 16, 698, 56623, 1669, 330, 2258, 1837, 46531, 1669, 330, 16, 15, 13, 15, 13, 15, 13, 16, 1837, 8854, 92766, 1669, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStateOutputsBlockPartsStats(t *testing.T) { // create dummy peer cs, _ := randState(1) peer := p2pmock.NewPeer(nil) // 1) new block part parts := types.NewPartSetFromData(tmrand.Bytes(100), 10) msg := &BlockPartMessage{ Height: 1, Round: 0, Part: parts.GetPart(0), } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) cs.handleMsg(msgInfo{msg, peer.ID()}) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") require.Equal(t, peer.ID(), statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{msg, "peer2"}) // sending the part with the same height, but different round msg.Round = 1 cs.handleMsg(msgInfo{msg, peer.ID()}) // sending the part from the smaller height msg.Height = 0 cs.handleMsg(msgInfo{msg, peer.ID()}) // sending the part from the bigger height msg.Height = 3 cs.handleMsg(msgInfo{msg, peer.ID()}) select { case <-cs.statsMsgQueue: t.Errorf("should not output stats message after receiving the known block part!") case <-time.After(50 * time.Millisecond): } }
explode_data.jsonl/81665
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 397 }
[ 2830, 3393, 1397, 61438, 4713, 28921, 16635, 1155, 353, 8840, 836, 8, 341, 197, 322, 1855, 17292, 14397, 198, 71899, 11, 716, 1669, 10382, 1397, 7, 16, 340, 197, 16537, 1669, 281, 17, 5187, 1176, 7121, 30888, 27907, 692, 197, 322, 220...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFmtDotElapsed_RuneCountProperty(t *testing.T) { f := func(d time.Duration) bool { pkg := &Package{ Passed: []TestCase{{Elapsed: d}}, } actual := fmtDotElapsed(pkg) width := utf8.RuneCountInString(actual) if width == 7 { return true } t.Logf("actual %v (width %d)", actual, width) return false } seed := time.Now().Unix() t.Log("seed", seed) assert.Assert(t, quick.Check(f, &quick.Config{ MaxCountScale: 2000, Rand: rand.New(rand.NewSource(seed)), })) }
explode_data.jsonl/9535
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 219 }
[ 2830, 3393, 93322, 34207, 48935, 2568, 2886, 2507, 3052, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 2915, 1500, 882, 33795, 8, 1807, 341, 197, 3223, 7351, 1669, 609, 13100, 515, 298, 10025, 59004, 25, 3056, 16458, 2979, 48935, 25, 294, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestClient_Default(t *testing.T) { client := NewDefault("org", "proj") if got, want := client.BaseURL.String(), "https://dev.azure.com/"; got != want { t.Errorf("Want Client URL %q, got %q", want, got) } }
explode_data.jsonl/82299
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 82 }
[ 2830, 3393, 2959, 60336, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 1532, 3675, 445, 1775, 497, 330, 30386, 1138, 743, 2684, 11, 1366, 1669, 2943, 13018, 3144, 6431, 1507, 330, 2428, 1110, 3583, 70240, 905, 14, 5123, 2684, 961, 1366, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestPostSyncUserWithLDAPAPIEndpoint_WhenUserNotFound(t *testing.T) { sqlstoremock := mockstore.SQLStoreMock{ExpectedError: models.ErrUserNotFound} sc := postSyncUserWithLDAPContext(t, "/api/admin/ldap/sync/34", func(t *testing.T, sc *scenarioContext) { getLDAPConfig = func(*setting.Cfg) (*ldap.Config, error) { return &ldap.Config{}, nil } newLDAP = func(_ []*ldap.ServerConfig) multildap.IMultiLDAP { return &LDAPMock{} } }, &sqlstoremock) assert.Equal(t, http.StatusNotFound, sc.resp.Code) expected := ` { "message": "user not found" } ` assert.JSONEq(t, expected, sc.resp.Body.String()) }
explode_data.jsonl/34373
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 254 }
[ 2830, 3393, 4133, 12154, 1474, 2354, 93497, 7082, 27380, 62, 4498, 1474, 10372, 1155, 353, 8840, 836, 8, 341, 30633, 4314, 16712, 1669, 7860, 4314, 25095, 6093, 11571, 90, 18896, 1454, 25, 4119, 27862, 1474, 10372, 532, 29928, 1669, 1736,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_signersFromSSHAgentForKeys(t *testing.T) { type testCase struct { name string publicKey string privateKey string passphrase string signerCount int errMsg string } cases := []testCase{ { name: "rsa private key wrong passphrase", publicKey: rsaPublicKey, privateKey: rsaPrivateKey, passphrase: "wrong passphrase", signerCount: 0, errMsg: "error decrypting private key", }, { name: "rsa private key correct passphrase", publicKey: rsaPublicKey, privateKey: rsaPrivateKey, signerCount: 1, passphrase: "1234", }, { name: "ed25519 private key wrong passphrase", publicKey: ed25519PublicKey, privateKey: ed25519PriavteKey, passphrase: "wrong passphrase", signerCount: 0, errMsg: "error decrypting private key", }, { name: "ed25519 private key correct passphrase", publicKey: ed25519PublicKey, privateKey: ed25519PriavteKey, signerCount: 1, passphrase: "1234", }, { name: "no private key", signerCount: 0, errMsg: "SSH Agent does not contain any identities", }, } testSignersFromSSHAgent := func(t *testing.T, tc testCase) { c1, c2 := net.Pipe() defer c1.Close() defer c2.Close() client := agent.NewClient(c1) go func() { _ = agent.ServeAgent(agent.NewKeyring(), c2) }() var ( privateKeys []string privateKey = "" ) if tc.privateKey != "" && tc.publicKey != "" { dir := t.TempDir() privateKey = filepath.Join(dir, "private_key") if err := ioutil.WriteFile(privateKey, []byte(tc.privateKey), 0600); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(dir, "private_key.pub"), []byte(tc.publicKey), 0600); err != nil { t.Fatal(err) } privateKeys = []string{privateKey} } signers, err := signersFromSSHAgentForKeys(client, privateKeys, func(file string) ([]byte, error) { if want, got := privateKey, file; want != got { t.Fatalf("file mismatched, want=%s got=%s:\n%s", want, got, cmp.Diff(want, got)) } return []byte(tc.passphrase), nil }) if want, got := tc.signerCount, len(signers); want != got { t.Fatalf("number of signers mismatch: want=%d got=%d", want, got) } if err == nil && tc.errMsg != "" { t.Fatal("error shouldn't be nil") } if err != nil && tc.errMsg == "" { t.Fatalf("error should be nil but it's %s", err.Error()) } if err != nil && !strings.Contains(err.Error(), tc.errMsg) { t.Fatalf("unexpected error message, want=%q, got=%q", tc.errMsg, err.Error()) } } for _, cc := range cases { c := cc t.Run(c.name, func(t *testing.T) { t.Parallel() testSignersFromSSHAgent(t, c) }) } }
explode_data.jsonl/63806
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1200 }
[ 2830, 3393, 11172, 388, 3830, 62419, 16810, 2461, 8850, 1155, 353, 8840, 836, 8, 341, 13158, 54452, 2036, 341, 197, 11609, 286, 914, 198, 197, 1219, 1592, 256, 914, 198, 197, 2455, 1592, 220, 914, 198, 197, 41431, 27710, 220, 914, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSearchCommitLogs(t *testing.T) { var repoPath string var r *git.Repository cwd, _ := os.Getwd() currentEnv := os.Getenv("GOTESTENV") fmt.Println("Environment : " + currentEnv) mockRepoPath := path.Join(cwd, "../..") + "/starfleet" if currentEnv == "ci" { repoPath = mockRepoPath r, _ = git.OpenRepository(repoPath) } else { repoPath = path.Join(cwd, "../..") r, _ = git.OpenRepository(repoPath) } sampleCommits := git2.CommitLogStruct{ Repo: r, ReferenceCommit: "", }.CommitLogs() hash := *sampleCommits.Commits[0].Hash type args struct { repo *git.Repository searchType string searchKey string } tests := []struct { name string args args }{ {name: "Git commit log search test case", args: struct { repo *git.Repository searchType string searchKey string }{repo: r, searchType: "hash", searchKey: hash}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var testObj git2.SearchCommitInterface testObj = git2.SearchCommitStruct{ Repo: tt.args.repo, SearchType: tt.args.searchType, SearchKey: tt.args.searchKey, } got := testObj.SearchCommitLogs() assert.NotZero(t, len(got)) }) } }
explode_data.jsonl/61515
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 538 }
[ 2830, 3393, 5890, 33441, 51053, 1155, 353, 8840, 836, 8, 341, 2405, 15867, 1820, 914, 198, 2405, 435, 353, 12882, 25170, 198, 1444, 6377, 11, 716, 1669, 2643, 2234, 6377, 741, 20121, 14359, 1669, 2643, 64883, 445, 38, 1793, 5177, 30360,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUnblockOwnerReference(t *testing.T) { trueVar := true falseVar := false original := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: "100", OwnerReferences: []metav1.OwnerReference{ {UID: "1", BlockOwnerDeletion: &trueVar}, {UID: "2", BlockOwnerDeletion: &falseVar}, {UID: "3"}, }, }, } originalData := serilizeOrDie(t, original) expected := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: "100", OwnerReferences: []metav1.OwnerReference{ {UID: "1", BlockOwnerDeletion: &falseVar}, {UID: "2", BlockOwnerDeletion: &falseVar}, {UID: "3"}, }, }, } accessor, err := meta.Accessor(&original) if err != nil { t.Fatal(err) } n := node{ owners: accessor.GetOwnerReferences(), } patch, err := n.patchToUnblockOwnerReferences() if err != nil { t.Fatal(err) } patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{}) if err != nil { t.Fatal(err) } var got v1.Pod if err := json.Unmarshal(patched, &got); err != nil { t.Fatal(err) } if !reflect.DeepEqual(expected, got) { t.Errorf("expected: %#v,\ngot: %#v", expected, got) t.Errorf("expected: %#v,\ngot: %#v", expected.OwnerReferences, got.OwnerReferences) for _, ref := range got.OwnerReferences { t.Errorf("ref.UID=%s, ref.BlockOwnerDeletion=%v", ref.UID, *ref.BlockOwnerDeletion) } } }
explode_data.jsonl/1194
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 579 }
[ 2830, 3393, 1806, 4574, 13801, 8856, 1155, 353, 8840, 836, 8, 341, 42808, 3962, 1669, 830, 198, 36012, 3962, 1669, 895, 198, 197, 9889, 1669, 348, 16, 88823, 515, 197, 23816, 12175, 25, 77520, 16, 80222, 515, 298, 197, 6463, 25, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestAdminStats_String(t *testing.T) { v := AdminStats{ Issues: &IssueStats{}, Hooks: &HookStats{}, Milestones: &MilestoneStats{}, Orgs: &OrgStats{}, Comments: &CommentStats{}, Pages: &PageStats{}, Users: &UserStats{}, Gists: &GistStats{}, Pulls: &PullStats{}, Repos: &RepoStats{}, } want := `github.AdminStats{Issues:github.IssueStats{}, Hooks:github.HookStats{}, Milestones:github.MilestoneStats{}, Orgs:github.OrgStats{}, Comments:github.CommentStats{}, Pages:github.PageStats{}, Users:github.UserStats{}, Gists:github.GistStats{}, Pulls:github.PullStats{}, Repos:github.RepoStats{}}` if got := v.String(); got != want { t.Errorf("AdminStats.String = %v, want %v", got, want) } }
explode_data.jsonl/33216
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 321 }
[ 2830, 3393, 7210, 16635, 31777, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 7582, 16635, 515, 197, 24486, 778, 1137, 25, 257, 609, 42006, 16635, 38837, 197, 13292, 14685, 25, 414, 609, 31679, 16635, 38837, 197, 9209, 457, 32510, 25, 609, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDurationForPod(t *testing.T) { now := time.Now() tests := []struct { name string pod *corev1.Pod want wfv1.ResourcesDuration }{ {"Empty", &corev1.Pod{}, wfv1.ResourcesDuration{}}, {"ContainerWithCPURequest", &corev1.Pod{ Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "main", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2000m"), }, }}}}, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{ { Name: "main", State: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ StartedAt: metav1.Time{Time: now.Add(-1 * time.Minute)}, FinishedAt: metav1.Time{Time: now}, }, }, }, }, }, }, wfv1.ResourcesDuration{ corev1.ResourceCPU: wfv1.NewResourceDuration(2 * time.Minute), corev1.ResourceMemory: wfv1.NewResourceDuration(1 * time.Minute), }}, {"ContainerWithCPURequest", &corev1.Pod{ Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "main", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2000m"), }, Limits: corev1.ResourceList{ corev1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"), }, }}}}, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{ { Name: "main", State: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ StartedAt: metav1.Time{Time: now.Add(-3 * time.Minute)}, FinishedAt: metav1.Time{Time: now}, }, }, }, }, }, }, wfv1.ResourcesDuration{ corev1.ResourceCPU: wfv1.NewResourceDuration(6 * time.Minute), corev1.ResourceMemory: wfv1.NewResourceDuration(3 * time.Minute), corev1.ResourceName("nvidia.com/gpu"): wfv1.NewResourceDuration(3 * time.Minute), }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := DurationForPod(tt.pod) assert.Equal(t, tt.want, got) }) } }
explode_data.jsonl/79098
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 982 }
[ 2830, 3393, 12945, 2461, 23527, 1155, 353, 8840, 836, 8, 341, 80922, 1669, 882, 13244, 2822, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 3223, 347, 220, 353, 98645, 16, 88823, 198, 197, 50780, 289, 27890, 16, 21703, 12945, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGCControllerProcessQueueItem(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) tests := []struct { name string backup *api.Backup deleteBackupRequests []*api.DeleteBackupRequest expectDeletion bool createDeleteBackupRequestError bool expectError bool }{ { name: "can't find backup - no error", }, { name: "unexpired backup is not deleted", backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(1 * time.Minute)). Backup, expectDeletion: false, }, { name: "expired backup with no pending deletion requests is deleted", backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, expectDeletion: true, }, { name: "expired backup with a pending deletion request is not deleted", backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, deleteBackupRequests: []*api.DeleteBackupRequest{ { ObjectMeta: metav1.ObjectMeta{ Namespace: api.DefaultNamespace, Name: "foo", Labels: map[string]string{ api.BackupNameLabel: "backup-1", api.BackupUIDLabel: "", }, }, Status: api.DeleteBackupRequestStatus{ Phase: api.DeleteBackupRequestPhaseInProgress, }, }, }, expectDeletion: false, }, { name: "expired backup with only processed deletion requests is deleted", backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, deleteBackupRequests: []*api.DeleteBackupRequest{ { ObjectMeta: metav1.ObjectMeta{ Namespace: api.DefaultNamespace, Name: "foo", Labels: map[string]string{ api.BackupNameLabel: "backup-1", api.BackupUIDLabel: "", }, }, Status: api.DeleteBackupRequestStatus{ Phase: api.DeleteBackupRequestPhaseProcessed, }, }, }, expectDeletion: true, }, { name: "create DeleteBackupRequest error returns an error", backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, expectDeletion: true, createDeleteBackupRequestError: true, expectError: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) ) controller := NewGCController( velerotest.NewLogger(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().DeleteBackupRequests(), client.VeleroV1(), ).(*gcController) controller.clock = fakeClock var key string if test.backup != nil { key = kube.NamespaceAndName(test.backup) sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup) } for _, dbr := range test.deleteBackupRequests { sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(dbr) } if test.createDeleteBackupRequestError { client.PrependReactor("create", "deletebackuprequests", func(action core.Action) (bool, runtime.Object, error) { return true, nil, errors.New("foo") }) } err := controller.processQueueItem(key) gotErr := err != nil assert.Equal(t, test.expectError, gotErr) if test.expectDeletion { require.Len(t, client.Actions(), 1) createAction, ok := client.Actions()[0].(core.CreateAction) require.True(t, ok) assert.Equal(t, "deletebackuprequests", createAction.GetResource().Resource) } else { assert.Len(t, client.Actions(), 0) } }) } }
explode_data.jsonl/64499
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1715 }
[ 2830, 3393, 22863, 2051, 7423, 7554, 1234, 1155, 353, 8840, 836, 8, 341, 1166, 726, 26104, 1669, 8866, 7121, 52317, 26104, 9730, 13244, 12367, 78216, 1669, 3056, 1235, 341, 197, 11609, 5968, 914, 198, 197, 197, 31371, 5108, 353, 2068, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLocalDockerComposeWithVolume(t *testing.T) { path := "./testresources/docker-compose-volume.yml" identifier := strings.ToLower(uuid.New().String()) compose := NewLocalDockerCompose([]string{path}, identifier, WithLogger(TestLogger(t))) destroyFn := func() { err := compose.Down() checkIfError(t, err) assertVolumeDoesNotExist(t, fmt.Sprintf("%s_mydata", identifier)) } defer destroyFn() err := compose. WithCommand([]string{"up", "-d"}). Invoke() checkIfError(t, err) }
explode_data.jsonl/43637
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 186 }
[ 2830, 3393, 7319, 35, 13659, 70492, 2354, 18902, 1155, 353, 8840, 836, 8, 341, 26781, 1669, 5924, 1944, 12745, 61764, 65070, 66768, 33936, 1837, 197, 15909, 1669, 9069, 29983, 41458, 7121, 1005, 703, 12367, 32810, 2900, 1669, 1532, 7319, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUpdate_validateError(t *testing.T) { var ( ctx = context.TODO() repository = reltest.New() scores = &scorestest.Service{} service = New(repository, scores) todo = Todo{ID: 1, Title: "Sleep"} changes = rel.NewChangeset(&todo) ) todo.Title = "" assert.Equal(t, ErrTodoTitleBlank, service.Update(ctx, &todo, changes)) repository.AssertExpectations(t) scores.AssertExpectations(t) }
explode_data.jsonl/7577
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 192 }
[ 2830, 3393, 4289, 42681, 1454, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 20985, 286, 284, 2266, 90988, 741, 197, 17200, 3099, 284, 1351, 1944, 7121, 741, 197, 1903, 7701, 257, 284, 609, 12338, 267, 477, 13860, 16094, 197, 52934, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPgRepository_Upsert(t *testing.T) { t.Run("Success", func(t *testing.T) { // GIVEN tenantMappingModel := newModelBusinessTenantMapping(testID, testName) tenantMappingEntity := newEntityBusinessTenantMapping(testID, testName) mockConverter := &automock.Converter{} defer mockConverter.AssertExpectations(t) mockConverter.On("ToEntity", tenantMappingModel).Return(tenantMappingEntity).Once() db, dbMock := testdb.MockDatabase(t) defer dbMock.AssertExpectations(t) dbMock.ExpectExec(regexp.QuoteMeta(`INSERT INTO public.business_tenant_mappings ( id, external_name, external_tenant, parent, type, provider_name, status ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) ON CONFLICT ( external_tenant ) DO UPDATE SET external_name=EXCLUDED.external_name`)). WithArgs(fixTenantMappingCreateArgs(*tenantMappingEntity)...). WillReturnResult(sqlmock.NewResult(1, 1)) ctx := persistence.SaveToContext(context.TODO(), db) tenantMappingrepo := tenant.NewRepository(mockConverter) // WHEN err := tenantMappingrepo.Upsert(ctx, *tenantMappingModel) // THEN require.NoError(t, err) }) t.Run("Error when upserting", func(t *testing.T) { // GIVEN tenantModel := newModelBusinessTenantMapping(testID, testName) tenantEntity := newEntityBusinessTenantMapping(testID, testName) mockConverter := &automock.Converter{} defer mockConverter.AssertExpectations(t) mockConverter.On("ToEntity", tenantModel).Return(tenantEntity).Once() db, dbMock := testdb.MockDatabase(t) defer dbMock.AssertExpectations(t) dbMock.ExpectExec(regexp.QuoteMeta(`INSERT INTO public.business_tenant_mappings ( id, external_name, external_tenant, parent, type, provider_name, status ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) ON CONFLICT ( external_tenant ) DO UPDATE SET external_name=EXCLUDED.external_name`)). WithArgs(fixTenantMappingCreateArgs(*tenantEntity)...). WillReturnError(testError) ctx := persistence.SaveToContext(context.TODO(), db) tenantMappingRepo := tenant.NewRepository(mockConverter) // WHEN err := tenantMappingRepo.Upsert(ctx, *tenantModel) // THEN require.Error(t, err) assert.EqualError(t, err, "Internal Server Error: Unexpected error while executing SQL query") }) }
explode_data.jsonl/23358
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 824 }
[ 2830, 3393, 82540, 4624, 6665, 1690, 529, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 7188, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 197, 322, 89836, 198, 197, 197, 43919, 6807, 1712, 1669, 501, 1712, 22727, 71252, 6807, 8623...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCRUD(t *testing.T) { profile := NewProfile() profile.Age = 30 profile.Money = 1234.12 id, err := dORM.Insert(profile) throwFail(t, err) throwFail(t, AssertIs(id, 1)) user := NewUser() user.UserName = "slene" user.Email = "vslene@gmail.com" user.Password = "pass" user.Status = 3 user.IsStaff = true user.IsActive = true id, err = dORM.Insert(user) throwFail(t, err) throwFail(t, AssertIs(id, 1)) u := &User{ID: user.ID} err = dORM.Read(u) throwFail(t, err) throwFail(t, AssertIs(u.UserName, "slene")) throwFail(t, AssertIs(u.Email, "vslene@gmail.com")) throwFail(t, AssertIs(u.Password, "pass")) throwFail(t, AssertIs(u.Status, 3)) throwFail(t, AssertIs(u.IsStaff, true)) throwFail(t, AssertIs(u.IsActive, true)) assert.True(t, u.Created.In(DefaultTimeLoc).Sub(user.Created.In(DefaultTimeLoc)) <= time.Second) assert.True(t, u.Updated.In(DefaultTimeLoc).Sub(user.Updated.In(DefaultTimeLoc)) <= time.Second) user.UserName = "astaxie" user.Profile = profile num, err := dORM.Update(user) throwFail(t, err) throwFail(t, AssertIs(num, 1)) u = &User{ID: user.ID} err = dORM.Read(u) throwFailNow(t, err) throwFail(t, AssertIs(u.UserName, "astaxie")) throwFail(t, AssertIs(u.Profile.ID, profile.ID)) u = &User{UserName: "astaxie", Password: "pass"} err = dORM.Read(u, "UserName") throwFailNow(t, err) throwFailNow(t, AssertIs(id, 1)) u.UserName = "QQ" u.Password = "111" num, err = dORM.Update(u, "UserName") throwFail(t, err) throwFail(t, AssertIs(num, 1)) u = &User{ID: user.ID} err = dORM.Read(u) throwFailNow(t, err) throwFail(t, AssertIs(u.UserName, "QQ")) throwFail(t, AssertIs(u.Password, "pass")) num, err = dORM.Delete(profile) throwFail(t, err) throwFail(t, AssertIs(num, 1)) u = &User{ID: user.ID} err = dORM.Read(u) throwFail(t, err) throwFail(t, AssertIs(true, u.Profile == nil)) num, err = dORM.Delete(user) throwFail(t, err) throwFail(t, AssertIs(num, 1)) u = &User{ID: 100} err = dORM.Read(u) throwFail(t, AssertIs(err, ErrNoRows)) ub := UserBig{} ub.Name = "name" id, err = dORM.Insert(&ub) throwFail(t, err) throwFail(t, AssertIs(id, 1)) ub = UserBig{ID: 1} err = dORM.Read(&ub) throwFail(t, err) throwFail(t, AssertIs(ub.Name, "name")) num, err = dORM.Delete(&ub, "name") throwFail(t, err) throwFail(t, AssertIs(num, 1)) }
explode_data.jsonl/18124
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1045 }
[ 2830, 3393, 8973, 4656, 1155, 353, 8840, 836, 8, 341, 197, 5365, 1669, 1532, 8526, 741, 197, 5365, 92675, 284, 220, 18, 15, 198, 197, 5365, 1321, 2534, 284, 220, 16, 17, 18, 19, 13, 16, 17, 198, 15710, 11, 1848, 1669, 294, 4365, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContainerNames(t *testing.T) { ctx := context.Background() c, rollback := makeConnectionWithContainer(t) defer rollback() containers, err := c.ContainerNames(ctx, nil) if err != nil { t.Fatal(err) } ok := false for _, container := range containers { if container == CONTAINER { ok = true break } } if !ok { t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) } }
explode_data.jsonl/12668
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 158 }
[ 2830, 3393, 4502, 7980, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 1444, 11, 60414, 1669, 1281, 4526, 2354, 4502, 1155, 340, 16867, 60414, 741, 197, 39399, 11, 1848, 1669, 272, 33672, 7980, 7502, 11, 2092, 340, 743, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestMetadataV10Polkadot_Decode(t *testing.T) { metadata := NewMetadataV10() err := DecodeFromBytes(MustHexDecodeString(ExamplaryMetadataV10PolkadotString), metadata) assert.NoError(t, err) assert.Equal(t, *ExamplaryMetadataV10Polkadot, *metadata) }
explode_data.jsonl/59409
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 102 }
[ 2830, 3393, 14610, 53, 16, 15, 14658, 93275, 354, 78668, 534, 1155, 353, 8840, 836, 8, 341, 2109, 7603, 1669, 1532, 14610, 53, 16, 15, 2822, 9859, 1669, 50194, 3830, 7078, 3189, 590, 20335, 32564, 703, 7, 66694, 500, 658, 14610, 53, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestManager_expandString(t *testing.T) { ctx := context.Background() csp, err := NewManager(nil) require.NoError(t, err) csp.configSources = map[string]configsource.ConfigSource{ "tstcfgsrc": &testConfigSource{ ValueMap: map[string]valueEntry{ "str_key": {Value: "test_value"}, "int_key": {Value: 1}, }, }, } require.NoError(t, os.Setenv("envvar", "envvar_value")) defer func() { assert.NoError(t, os.Unsetenv("envvar")) }() require.NoError(t, os.Setenv("envvar_str_key", "str_key")) defer func() { assert.NoError(t, os.Unsetenv("envvar_str_key")) }() tests := []struct { name string input string want interface{} wantErr error }{ { name: "literal_string", input: "literal_string", want: "literal_string", }, { name: "escaped_$", input: "$$tstcfgsrc:int_key$$envvar", want: "$tstcfgsrc:int_key$envvar", }, { name: "cfgsrc_int", input: "$tstcfgsrc:int_key", want: 1, }, { name: "concatenate_cfgsrc_string", input: "prefix-$tstcfgsrc:str_key", want: "prefix-test_value", }, { name: "concatenate_cfgsrc_non_string", input: "prefix-$tstcfgsrc:int_key", want: "prefix-1", }, { name: "envvar", input: "$envvar", want: "envvar_value", }, { name: "prefixed_envvar", input: "prefix-$envvar", want: "prefix-envvar_value", }, { name: "envvar_treated_as_cfgsrc", input: "$envvar:suffix", wantErr: &errUnknownConfigSource{}, }, { name: "cfgsrc_using_envvar", input: "$tstcfgsrc:$envvar_str_key", want: "test_value", }, { name: "envvar_cfgsrc_using_envvar", input: "$envvar/$tstcfgsrc:$envvar_str_key", want: "envvar_value/test_value", }, { name: "delimited_cfgsrc", input: "${tstcfgsrc:int_key}", want: 1, }, { name: "unknown_delimited_cfgsrc", input: "${cfgsrc:int_key}", wantErr: &errUnknownConfigSource{}, }, { name: "delimited_cfgsrc_with_spaces", input: "${ tstcfgsrc: int_key }", want: 1, }, { name: "interpolated_and_delimited_cfgsrc", input: "0/${ tstcfgsrc: $envvar_str_key }/2/${tstcfgsrc:int_key}", want: "0/test_value/2/1", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := csp.expandString(ctx, tt.input) require.IsType(t, tt.wantErr, err) require.Equal(t, tt.want, got) }) } }
explode_data.jsonl/34673
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1185 }
[ 2830, 3393, 2043, 67875, 703, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 1444, 2154, 11, 1848, 1669, 1532, 2043, 27907, 340, 17957, 35699, 1155, 11, 1848, 340, 1444, 2154, 5423, 32200, 284, 2415, 14032, 60, 1676, 2427,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQuit(t *testing.T) { defer leaktest.AfterTest(t)() if testing.Short() { t.Skip("short flag") } c := newCLITest(cliTestParams{t: t}) defer c.cleanup() c.Run("quit") // Wait until this async command cleanups the server. <-c.Stopper().IsStopped() }
explode_data.jsonl/33194
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 110 }
[ 2830, 3393, 42856, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 8676, 5181, 1138, 197, 630, 1444, 1669, 501, 3140, 952, 477, 70249, 2271, 4870, 90, 83...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMessage(t *testing.T) { o := Message("test") assert.NotNil(t, o, "should not return a nil option") assert.Implements(t, (*Option)(nil), o, "it should implement the Option interface") assert.Equal(t, "sentry.interfaces.Message", o.Class(), "it should use the right option class") t.Run("MarshalJSON()", func(t *testing.T) { assert.Equal(t, map[string]interface{}{"message":"test"}, testOptionsSerialize(t, o), "it should serialize to an object") }) t.Run("parameters", func(t *testing.T) { o := Message("this is a %s", "test") assert.NotNil(t, o, "should not return a nil option") mi, ok := o.(*messageOption) assert.True(t, ok, "it should actually be a *messageOption") assert.Equal(t, "this is a %s", mi.Message, "it should use the right message") assert.Equal(t, []interface{}{"test"}, mi.Params, "it should have the correct parameters") assert.Equal(t, "this is a test", mi.Formatted, "it should format the message when requested") }) }
explode_data.jsonl/564
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 341 }
[ 2830, 3393, 2052, 1155, 353, 8840, 836, 8, 341, 22229, 1669, 4856, 445, 1944, 1138, 6948, 93882, 1155, 11, 297, 11, 330, 5445, 537, 470, 264, 2092, 2999, 1138, 6948, 26914, 4674, 1155, 11, 4609, 5341, 2376, 8385, 701, 297, 11, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfigDescriptorValidate(t *testing.T) { badLabel := strings.Repeat("a", dns1123LabelMaxLength+1) goodLabel := strings.Repeat("a", dns1123LabelMaxLength-1) cases := []struct { name string descriptor ConfigDescriptor wantErr bool }{{ name: "Valid ConfigDescriptor (IstioConfig)", descriptor: IstioConfigTypes, wantErr: false, }, { name: "Invalid DNS11234Label in ConfigDescriptor", descriptor: ConfigDescriptor{ProtoSchema{ Type: badLabel, MessageName: "istio.networking.v1alpha3.Gateway", }}, wantErr: true, }, { name: "Bad MessageName in ProtoMessage", descriptor: ConfigDescriptor{ProtoSchema{ Type: goodLabel, MessageName: "nonexistent", }}, wantErr: true, }, { name: "Missing key function", descriptor: ConfigDescriptor{ProtoSchema{ Type: "service-entry", MessageName: "istio.networking.v1alpha3.ServiceEtrny", }}, wantErr: true, }, { name: "Duplicate type and message", descriptor: ConfigDescriptor{DestinationRule, DestinationRule}, wantErr: true, }} for _, c := range cases { if err := c.descriptor.Validate(); (err != nil) != c.wantErr { t.Errorf("%v failed: got %v but wantErr=%v", c.name, err, c.wantErr) } } }
explode_data.jsonl/56886
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 540 }
[ 2830, 3393, 2648, 11709, 17926, 1155, 353, 8840, 836, 8, 341, 2233, 329, 2476, 1669, 9069, 2817, 10979, 445, 64, 497, 44077, 16, 16, 17, 18, 2476, 35601, 10, 16, 340, 3174, 1386, 2476, 1669, 9069, 2817, 10979, 445, 64, 497, 44077, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUserNamespaceForPod(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet // Runtime supports user namespaces uidMappings := []*kubecontainer.UserNSMapping{ &kubecontainer.UserNSMapping{ ContainerID: 0, HostID: 1000, Size: 65536, }, } userNSConfig := kubecontainer.UserNamespaceConfigInfo{ UidMappings: uidMappings, GidMappings: uidMappings, } // TODO: how to avoid creating a real class and mocking it instead? testKubelet.fakeRuntime.RuntimeConfigInfo = &kubecontainer.RuntimeConfigInfo{UserNamespaceConfig: userNSConfig} for desc, test := range map[string]struct { input *v1.Pod expected runtimeapi.NamespaceMode expectedError bool }{ "nil pod -> default v1 namespaces": { nil, runtimeapi.NamespaceMode_POD, false, }, "v1.Pod default namespaces": { &v1.Pod{}, runtimeapi.NamespaceMode_POD, false, }, "User ns node mode": { &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ kinvolkUsernsAnn: "node", }, }, }, runtimeapi.NamespaceMode_NODE, false, }, "Host Namespaces": { &v1.Pod{ Spec: v1.PodSpec{ HostIPC: true, HostNetwork: true, HostPID: true, }, }, runtimeapi.NamespaceMode_NODE, false, }, "Host Namespaces and user ns pod mode": { &v1.Pod{ Spec: v1.PodSpec{ HostIPC: true, HostNetwork: true, HostPID: true, }, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ kinvolkUsernsAnn: "pod", }, }, }, runtimeapi.NamespaceMode_NODE, true, }, "Privileged container": { &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ v1.Container{ SecurityContext: &v1.SecurityContext { Privileged: &[]bool{true}[0], }, }, }, }, }, runtimeapi.NamespaceMode_NODE, false, }, "Privileged container user ns pod mode": { &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ v1.Container{ SecurityContext: &v1.SecurityContext { Privileged: &[]bool{true}[0], }, }, }, }, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ kinvolkUsernsAnn: "pod", }, }, }, runtimeapi.NamespaceMode_NODE, true, }, "Non namespaced capability": { &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ v1.Container{ SecurityContext: &v1.SecurityContext { Capabilities: &v1.Capabilities { Add: []v1.Capability { "MKNOD", "SYS_TIME", "SYS_MODULE", }, }, }, }, }, }, }, runtimeapi.NamespaceMode_NODE, false, }, "Non namespaced capability with userns pod": { &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ v1.Container{ SecurityContext: &v1.SecurityContext { Capabilities: &v1.Capabilities { Add: []v1.Capability { "MKNOD", "SYS_TIME", "SYS_MODULE", }, }, }, }, }, }, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ kinvolkUsernsAnn: "pod", }, }, }, runtimeapi.NamespaceMode_NODE, true, }, "Host path volume": { &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume { v1.Volume { VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: "/tmp/anything", }, }, }, }, }, }, runtimeapi.NamespaceMode_NODE, false, }, "Host path volume with user ns pod": { &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume { v1.Volume { VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ Path: "/tmp/anything", }, }, }, }, }, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ kinvolkUsernsAnn: "pod", }, }, }, runtimeapi.NamespaceMode_NODE, true, }, "Bad userns annotation": { &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ kinvolkUsernsAnn: "itsbad", }, }, }, runtimeapi.NamespaceMode_NODE, true, }, } { t.Logf("TestCase: %s", desc) actual, err := kubelet.UserNamespaceForPod(test.input) if test.expectedError { assert.Error(t, err) } else { assert.NoError(t, err) assert.Equal(t, test.expected, actual) } } }
explode_data.jsonl/49895
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2328 }
[ 2830, 3393, 1474, 22699, 2461, 23527, 1155, 353, 8840, 836, 8, 341, 18185, 42, 3760, 1149, 1669, 501, 2271, 42, 3760, 1149, 1155, 11, 895, 1391, 6461, 30485, 89306, 5462, 639, 340, 16867, 1273, 42, 3760, 1149, 727, 60639, 741, 16463, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestQuickQueueWithComparator(t *testing.T) { q := NewQuickQueue(WithComparator(&student{})) q.Add(&student{name: "benjamin", age: 34}) q.Add(&student{name: "alice", age: 21}) q.Add(&student{name: "john", age: 42}) q.Add(&student{name: "roy", age: 28}) q.Add(&student{name: "moss", age: 25}) assert.Equal(t, 5, q.Len()) assert.True(t, q.Contains(&student{name: "alice", age: 21})) // Peek v, ok := q.Peek().(*student) require.True(t, ok) require.True(t, v.name == "benjamin" && v.age == 34) v, ok = q.Poll().(*student) require.True(t, ok) require.True(t, v.name == "benjamin" && v.age == 34) v, ok = q.Poll().(*student) require.True(t, ok) require.True(t, v.name == "alice" && v.age == 21) v, ok = q.Poll().(*student) require.True(t, ok) require.True(t, v.name == "john" && v.age == 42) v, ok = q.Poll().(*student) require.True(t, ok) require.True(t, v.name == "roy" && v.age == 28) v, ok = q.Poll().(*student) require.True(t, ok) require.True(t, v.name == "moss" && v.age == 25) // The queue should be empty now require.Zero(t, q.Len()) require.Nil(t, q.Peek()) require.Nil(t, q.Poll()) }
explode_data.jsonl/42918
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 490 }
[ 2830, 3393, 24318, 7554, 2354, 38658, 1155, 353, 8840, 836, 8, 341, 18534, 1669, 1532, 24318, 7554, 7, 2354, 38658, 2099, 12038, 6257, 4390, 18534, 1904, 2099, 12038, 47006, 25, 330, 7964, 25246, 497, 4231, 25, 220, 18, 19, 3518, 18534,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestErrorWhenSeqNotClosed(t *testing.T) { input := "(+ 1 2 3 4" lex := lexer.NewLexer(input) tokens, _ := lex.GetTokens() parser := NewParser(tokens) _, err := parser.GetExpressions() assertString(t, "parse error. missing ')' to close sequence", err.Error()) }
explode_data.jsonl/43722
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 101 }
[ 2830, 3393, 1454, 4498, 20183, 2623, 26884, 1155, 353, 8840, 836, 8, 341, 22427, 1669, 11993, 10, 220, 16, 220, 17, 220, 18, 220, 19, 698, 197, 2571, 1669, 53259, 7121, 92847, 5384, 340, 3244, 9713, 11, 716, 1669, 22429, 2234, 29300, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPlugin_GetInventory_withNoOstOnOneHost(t *testing.T) { brickAllocations := []datamodel.Brick{ {BrickHostName: "dac1", Device: "nvme1n1"}, {BrickHostName: "dac2", Device: "nvme2n1"}, {BrickHostName: "dac2", Device: "nvme3n1"}, } fsUuid := "abcdefgh" result := getInventory(Lustre, fsUuid, brickAllocations) expected := `dacs: children: abcdefgh: hosts: dac1: mgs: sdb mdts: {nvme1n1: 0} osts: {nvme1n1: 0} dac2: mdts: {nvme2n1: 1, nvme3n1: 2} osts: {nvme2n1: 1, nvme3n1: 2} vars: fs_name: abcdefgh lnet_suffix: "" mdt_size_mb: "20480" mgsnode: dac1 ` assert.Equal(t, expected, result) }
explode_data.jsonl/15870
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 407 }
[ 2830, 3393, 11546, 13614, 22319, 6615, 2753, 46, 267, 1925, 3966, 9296, 1155, 353, 8840, 836, 8, 341, 197, 69673, 25154, 804, 1669, 3056, 5911, 40259, 1785, 12649, 515, 197, 197, 90, 6828, 865, 85305, 25, 330, 82549, 16, 497, 13903, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetMilestoneByRepoID(t *testing.T) { assert.NoError(t, PrepareTestDatabase()) milestone, err := GetMilestoneByRepoID(1, 1) assert.NoError(t, err) assert.EqualValues(t, 1, milestone.ID) assert.EqualValues(t, 1, milestone.RepoID) _, err = GetMilestoneByRepoID(NonexistentID, NonexistentID) assert.True(t, IsErrMilestoneNotExist(err)) }
explode_data.jsonl/21821
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 139 }
[ 2830, 3393, 1949, 44, 93028, 1359, 25243, 915, 1155, 353, 8840, 836, 8, 341, 6948, 35699, 1155, 11, 31166, 2271, 5988, 12367, 2109, 93028, 11, 1848, 1669, 2126, 44, 93028, 1359, 25243, 915, 7, 16, 11, 220, 16, 340, 6948, 35699, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIDJSONUnmarshaling(t *testing.T) { data := []byte(`{"ID":"9m4e2mr0ui3e8a215n4g","Str":"test"}`) v := jsonType{} err := json.Unmarshal(data, &v) assert.NoError(t, err) assert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, *v.ID) }
explode_data.jsonl/58924
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 154 }
[ 2830, 3393, 915, 5370, 1806, 36239, 6132, 1155, 353, 8840, 836, 8, 341, 8924, 1669, 3056, 3782, 5809, 4913, 915, 3252, 24, 76, 19, 68, 17, 20946, 15, 1963, 18, 68, 23, 64, 17, 16, 20, 77, 19, 70, 2198, 2580, 3252, 1944, 1, 27085...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestVerifyChallengeTxThreshold_validServerAndMultipleClientKeyMeetingThreshold(t *testing.T) { serverKP := newKeypair0() clientKP1 := newKeypair1() clientKP2 := newKeypair2() txSource := NewSimpleAccount(serverKP.Address(), -1) op := ManageData{ SourceAccount: clientKP1.Address(), Name: "testanchor.stellar.org auth", Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), } webAuthDomainOp := ManageData{ SourceAccount: serverKP.Address(), Name: "web_auth_domain", Value: []byte("testwebauth.stellar.org"), } tx64, err := newSignedTransaction( TransactionParams{ SourceAccount: &txSource, IncrementSequenceNum: true, Operations: []Operation{&op, &webAuthDomainOp}, BaseFee: MinBaseFee, Timebounds: NewTimeout(1000), }, network.TestNetworkPassphrase, serverKP, clientKP1, clientKP2, ) assert.NoError(t, err) threshold := Threshold(3) signerSummary := map[string]int32{ clientKP1.Address(): 1, clientKP2.Address(): 2, } wantSigners := []string{ clientKP1.Address(), clientKP2.Address(), } signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) assert.ElementsMatch(t, wantSigners, signersFound) assert.NoError(t, err) }
explode_data.jsonl/20728
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 581 }
[ 2830, 3393, 32627, 62078, 31584, 37841, 8337, 5475, 3036, 32089, 2959, 1592, 64576, 37841, 1155, 353, 8840, 836, 8, 341, 41057, 65036, 1669, 501, 6608, 1082, 1310, 15, 741, 25291, 65036, 16, 1669, 501, 6608, 1082, 1310, 16, 741, 25291, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestKeeperDB_EligibleUpkeeps_BlockCountPerTurn(t *testing.T) { t.Parallel() store, orm, cleanup := setupKeeperDB(t) defer cleanup() db := store.DB ethKeyStore := cltest.NewKeyStore(t, store.DB).Eth() blockheight := int64(63) gracePeriod := int64(10) registry, _ := cltest.MustInsertKeeperRegistry(t, store, ethKeyStore) upkeeps := [5]keeper.UpkeepRegistration{ newUpkeep(registry, 0), newUpkeep(registry, 1), newUpkeep(registry, 2), newUpkeep(registry, 3), newUpkeep(registry, 4), } upkeeps[0].LastRunBlockHeight = 0 // Never run upkeeps[1].LastRunBlockHeight = 41 // Run last turn, outside grade period upkeeps[2].LastRunBlockHeight = 46 // Run last turn, outside grade period upkeeps[3].LastRunBlockHeight = 59 // Run last turn, inside grace period (EXCLUDE) upkeeps[4].LastRunBlockHeight = 61 // Run this turn, inside grace period (EXCLUDE) for _, upkeep := range upkeeps { err := orm.UpsertUpkeep(context.Background(), &upkeep) require.NoError(t, err) } cltest.AssertCount(t, db, &keeper.UpkeepRegistration{}, 5) eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(context.Background(), registry.ContractAddress, blockheight, gracePeriod) assert.NoError(t, err) require.Len(t, eligibleUpkeeps, 3) assert.Equal(t, int64(0), eligibleUpkeeps[0].UpkeepID) assert.Equal(t, int64(1), eligibleUpkeeps[1].UpkeepID) assert.Equal(t, int64(2), eligibleUpkeeps[2].UpkeepID) // preloads registry data assert.Equal(t, registry.ID, eligibleUpkeeps[0].RegistryID) assert.Equal(t, registry.ID, eligibleUpkeeps[1].RegistryID) assert.Equal(t, registry.ID, eligibleUpkeeps[2].RegistryID) assert.Equal(t, registry.CheckGas, eligibleUpkeeps[0].Registry.CheckGas) assert.Equal(t, registry.CheckGas, eligibleUpkeeps[1].Registry.CheckGas) assert.Equal(t, registry.CheckGas, eligibleUpkeeps[2].Registry.CheckGas) assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[0].Registry.ContractAddress) assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[1].Registry.ContractAddress) assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[2].Registry.ContractAddress) }
explode_data.jsonl/27009
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 778 }
[ 2830, 3393, 77233, 3506, 2089, 7708, 1238, 2324, 440, 7124, 51779, 2507, 3889, 19389, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 57279, 11, 67602, 11, 21290, 1669, 6505, 77233, 3506, 1155, 340, 16867, 21290, 741, 20939, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestFindCommitsFields(t *testing.T) { windows1251Message, err := ioutil.ReadFile("testdata/commit-c809470461118b7bcab850f6e9a7ca97ac42f8ea-message.txt") require.NoError(t, err) server, serverSocketPath := startTestServices(t) defer server.Stop() client, conn := newCommitServiceClient(t, serverSocketPath) defer conn.Close() testRepo, _, cleanupFn := testhelper.NewTestRepo(t) defer cleanupFn() testCases := []struct { id string commit *gitalypb.GitCommit }{ { id: "b83d6e391c22777fca1ed3012fce84f633d7fed0", commit: &gitalypb.GitCommit{ Id: "b83d6e391c22777fca1ed3012fce84f633d7fed0", Subject: []byte("Merge branch 'branch-merged' into 'master'"), Body: []byte("Merge branch 'branch-merged' into 'master'\r\n\r\nadds bar folder and branch-test text file to check Repository merged_to_root_ref method\r\n\r\n\r\n\r\nSee merge request !12"), Author: &gitalypb.CommitAuthor{ Name: []byte("Job van der Voort"), Email: []byte("job@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1474987066}, }, Committer: &gitalypb.CommitAuthor{ Name: []byte("Job van der Voort"), Email: []byte("job@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1474987066}, }, ParentIds: []string{ "1b12f15a11fc6e62177bef08f47bc7b5ce50b141", "498214de67004b1da3d820901307bed2a68a8ef6", }, BodySize: 162, }, }, { id: "c809470461118b7bcab850f6e9a7ca97ac42f8ea", commit: &gitalypb.GitCommit{ Id: "c809470461118b7bcab850f6e9a7ca97ac42f8ea", Subject: windows1251Message[:len(windows1251Message)-1], Body: windows1251Message, Author: &gitalypb.CommitAuthor{ Name: []byte("Jacob Vosmaer"), Email: []byte("jacob@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1512132977}, }, Committer: &gitalypb.CommitAuthor{ Name: []byte("Jacob Vosmaer"), Email: []byte("jacob@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1512132977}, }, ParentIds: []string{"e63f41fe459e62e1228fcef60d7189127aeba95a"}, BodySize: 49, }, }, { id: "0999bb770f8dc92ab5581cc0b474b3e31a96bf5c", commit: &gitalypb.GitCommit{ Id: "0999bb770f8dc92ab5581cc0b474b3e31a96bf5c", Subject: []byte("Hello\xf0world"), Body: []byte("Hello\xf0world\n"), Author: &gitalypb.CommitAuthor{ Name: []byte("Jacob Vosmaer"), Email: []byte("jacob@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1517328273}, }, Committer: &gitalypb.CommitAuthor{ Name: []byte("Jacob Vosmaer"), Email: []byte("jacob@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1517328273}, }, ParentIds: []string{"60ecb67744cb56576c30214ff52294f8ce2def98"}, BodySize: 12, }, }, { id: "77e835ef0856f33c4f0982f84d10bdb0567fe440", commit: &gitalypb.GitCommit{ Id: "77e835ef0856f33c4f0982f84d10bdb0567fe440", Subject: []byte("Add file larger than 1 mb"), Body: []byte("Add file larger than 1 mb\n\nIn order to test Max File Size push rule we need a file larger than 1 MB\n"), Author: &gitalypb.CommitAuthor{ Name: []byte("Ruben Davila"), Email: []byte("rdavila84@gmail.com"), Date: &timestamp.Timestamp{Seconds: 1523247267}, }, Committer: &gitalypb.CommitAuthor{ Name: []byte("Jacob Vosmaer"), Email: []byte("jacob@gitlab.com"), Date: &timestamp.Timestamp{Seconds: 1527855450}, }, ParentIds: []string{"60ecb67744cb56576c30214ff52294f8ce2def98"}, BodySize: 100, }, }, } for _, tc := range testCases { t.Run(tc.id, func(t *testing.T) { request := &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte(tc.id), Limit: 1, } ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := client.FindCommits(ctx, request) require.NoError(t, err) resp, err := stream.Recv() require.NoError(t, err) require.Equal(t, 1, len(resp.Commits), "expected exactly one commit in the first message") firstCommit := resp.Commits[0] require.Equal(t, tc.commit, firstCommit, "mismatched commits") _, err = stream.Recv() require.Equal(t, io.EOF, err, "there should be no further messages in the stream") }) } }
explode_data.jsonl/26117
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2043 }
[ 2830, 3393, 9885, 17977, 1199, 8941, 1155, 353, 8840, 836, 8, 341, 6692, 1491, 16, 17, 20, 16, 2052, 11, 1848, 1669, 43144, 78976, 445, 92425, 14, 17413, 1786, 23, 15, 24, 19, 22, 15, 19, 21, 16, 16, 16, 23, 65, 22, 8904, 370, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStopperRunWorker(t *testing.T) { s := stop.NewStopper() s.RunWorker(func() { select { case <-s.ShouldStop(): return } }) closer := make(chan struct{}) go func() { s.Stop() close(closer) }() select { case <-closer: // Success. case <-time.After(100 * time.Millisecond): t.Fatal("stopper should be ready to stop") } }
explode_data.jsonl/2422
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 156 }
[ 2830, 3393, 10674, 712, 6727, 21936, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 2936, 7121, 10674, 712, 741, 1903, 16708, 21936, 18552, 368, 341, 197, 38010, 341, 197, 2722, 9119, 82, 26693, 10674, 3932, 298, 853, 198, 197, 197, 532, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestHandler(t *testing.T) { db, mock, err := sqlmock.New() require.NoError(t, err) mock.ExpectQuery(regexp.QuoteMeta( "SELECT COUNT(*) FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = ? LIMIT 1", )). WithArgs("tenant_foo"). WillReturnRows( mock.NewRows([]string{"COUNT"}). AddRow(1), ). RowsWillBeClosed() handler := graphql.NewHandler( graphql.HandlerConfig{ DB: db, Dialect: strutil.Stringer(dialect.SQLite), Tenancy: viewer.NewFixedTenancy( viewertest.NewTestClient(t), ), }, ) c := client.New(handler, client.Path("/query")) var rsp struct{ Tenant struct{ ID, Name string } } err = c.Post(`query { tenant(name: "foo") { id name } }`, &rsp) require.NoError(t, err) require.NotEmpty(t, rsp.Tenant.ID) require.Equal(t, "foo", rsp.Tenant.Name) }
explode_data.jsonl/58902
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 351 }
[ 2830, 3393, 3050, 1155, 353, 8840, 836, 8, 341, 20939, 11, 7860, 11, 1848, 1669, 5704, 16712, 7121, 741, 17957, 35699, 1155, 11, 1848, 340, 77333, 81893, 2859, 18390, 4580, 13, 19466, 12175, 1006, 197, 197, 1, 4858, 23989, 28671, 4295, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewHealResult(t *testing.T) { testCases := []struct { healedDisks int offlineDisks int state healState }{ // 1. No disks healed, no disks offline. {0, 0, healNone}, // 2. No disks healed, non-zero disks offline. {0, 1, healNone}, // 3. Non-zero disks healed, no disks offline. {1, 0, healOK}, // 4. Non-zero disks healed, non-zero disks offline. {1, 1, healPartial}, } for i, test := range testCases { actual := newHealResult(test.healedDisks, test.offlineDisks) if actual.State != test.state { t.Errorf("Test %d: Expected %v but received %v", i+1, test.state, actual.State) } } }
explode_data.jsonl/7935
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 264 }
[ 2830, 3393, 3564, 1519, 278, 2077, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 197, 383, 5838, 4839, 2787, 220, 526, 198, 197, 197, 63529, 4839, 2787, 526, 198, 197, 24291, 286, 26563, 1397, 198, 197, 59403...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUnmarshal_WithCustomPrimitiveType(t *testing.T) { type ( String string Int int Bool bool ) type X struct { S String I Int B Bool } input := ` s = "string" i = 1 b = true ` testUnmarshal(t, []testcase{ {input, nil, &X{"string", 1, true}}, }) }
explode_data.jsonl/52969
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 131 }
[ 2830, 3393, 1806, 27121, 62, 2354, 10268, 33313, 929, 1155, 353, 8840, 836, 8, 341, 13158, 2399, 197, 4980, 914, 198, 197, 57152, 262, 526, 198, 197, 197, 11233, 256, 1807, 198, 197, 340, 13158, 1599, 2036, 341, 197, 7568, 923, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestXbitsString(t *testing.T) { for _, tt := range []struct { name string input *Xbit want string }{ { name: "basic set", input: &Xbit{ Action: "set", Name: "foo", Track: "ip_src", }, want: `xbits:set,foo,track ip_src;`, }, { name: "with expire set", input: &Xbit{ Action: "set", Name: "foo", Track: "ip_src", Expire: "5", }, want: `xbits:set,foo,track ip_src,expire 5;`, }, } { got := tt.input.String() if got != tt.want { t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) } } }
explode_data.jsonl/59690
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 307 }
[ 2830, 3393, 55, 11516, 703, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 3056, 1235, 341, 197, 11609, 220, 914, 198, 197, 22427, 353, 55, 4489, 198, 197, 50780, 220, 914, 198, 197, 59403, 197, 197, 515, 298, 11609, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestServiceDataSource_Service(t *testing.T) { dc := testDaskCluster() t.Run("scheduler", func(t *testing.T) { ds := serviceDS{dc: dc, comp: ComponentScheduler} actual := ds.Service() expected := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test-dask-scheduler", Namespace: "ns", Labels: map[string]string{ "app.kubernetes.io/component": "scheduler", "app.kubernetes.io/instance": "test", "app.kubernetes.io/managed-by": "distributed-compute-operator", "app.kubernetes.io/name": "dask", "app.kubernetes.io/version": "test-tag", }, }, Spec: corev1.ServiceSpec{ ClusterIP: corev1.ClusterIPNone, Selector: map[string]string{ "app.kubernetes.io/component": "scheduler", "app.kubernetes.io/instance": "test", "app.kubernetes.io/name": "dask", }, Ports: []corev1.ServicePort{ { Name: "tcp-serve", Port: 8786, TargetPort: intstr.FromString("serve"), }, { Name: "tcp-dashboard", Port: 8787, TargetPort: intstr.FromString("dashboard"), }, }, }, } assert.Equal(t, expected, actual) }) t.Run("worker", func(t *testing.T) { ds := serviceDS{dc: dc, comp: ComponentWorker} actual := ds.Service() expected := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test-dask-worker", Namespace: "ns", Labels: map[string]string{ "app.kubernetes.io/component": "worker", "app.kubernetes.io/instance": "test", "app.kubernetes.io/managed-by": "distributed-compute-operator", "app.kubernetes.io/name": "dask", "app.kubernetes.io/version": "test-tag", }, }, Spec: corev1.ServiceSpec{ ClusterIP: corev1.ClusterIPNone, Selector: map[string]string{ "app.kubernetes.io/component": "worker", "app.kubernetes.io/instance": "test", "app.kubernetes.io/name": "dask", }, Ports: []corev1.ServicePort{ { Name: "tcp-worker", Port: 3000, TargetPort: intstr.FromString("worker"), }, { Name: "tcp-nanny", Port: 3001, TargetPort: intstr.FromString("nanny"), }, { Name: "tcp-dashboard", Port: 8787, TargetPort: intstr.FromString("dashboard"), }, }, }, } assert.Equal(t, expected, actual) }) }
explode_data.jsonl/76043
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1246 }
[ 2830, 3393, 1860, 17173, 52548, 1155, 353, 8840, 836, 8, 341, 87249, 1669, 1273, 35, 1073, 28678, 2822, 3244, 16708, 445, 63122, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 83336, 1669, 2473, 5936, 90, 7628, 25, 19402, 11, 1367, 25,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRequestPreservesBaseTrailingSlash(t *testing.T) { r := &Request{baseURL: &url.URL{}, pathPrefix: "/path/"} if s := r.URL().String(); s != "/path/" { t.Errorf("trailing slash should be preserved: %s", s) } }
explode_data.jsonl/13248
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 84 }
[ 2830, 3393, 1900, 14367, 13280, 3978, 1282, 14277, 88004, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 609, 1900, 90, 3152, 3144, 25, 609, 1085, 20893, 22655, 1815, 14335, 25, 3521, 2343, 11225, 532, 743, 274, 1669, 435, 20893, 1005, 703, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNoOverwriteInputFileError(t *testing.T) { default_suite.expectBundled(t, bundled{ files: map[string]string{ "/entry.js": ` console.log(123) `, }, entryPaths: []string{"/entry.js"}, options: config.Options{ Mode: config.ModeBundle, AbsOutputDir: "/", }, expectedCompileLog: `error: Refusing to overwrite input file "entry.js" (use "AllowOverwrite: true" to allow this) `, }) }
explode_data.jsonl/38544
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 175 }
[ 2830, 3393, 2753, 1918, 4934, 2505, 1703, 1454, 1155, 353, 8840, 836, 8, 341, 11940, 57239, 25952, 33, 1241, 832, 1155, 11, 51450, 515, 197, 74075, 25, 2415, 14032, 30953, 515, 298, 197, 3115, 4085, 2857, 788, 22074, 571, 12160, 1665, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDiskErrorLines(t *testing.T) { splitLines := func(text string) []string { return strings.Split(text, "\n") } for _, tc := range []struct { name string lines []string want []string }{ { name: "ATA errors", lines: splitLines(`[ 49.135097] ata1.00: exception Emask 0x0 SAct 0x10000000 SErr 0x0 action 0x6 frozen [ 49.135112] ata1.00: failed command: READ FPDMA QUEUED [ 49.135126] ata1.00: cmd 60/40:e0:f0:f1:b5/00:00:00:00:00/40 tag 28 ncq dma 32768 in [ 49.135126] res 40/00:00:00:00:00/00:00:00:00:00/00 Emask 0x4 (timeout) [ 49.135133] ata1.00: status: { DRDY } [ 49.135142] ata1: hard resetting link [ 49.445043] ata1: SATA link up 6.0 Gbps (SStatus 133 SControl 300) [ 49.448140] ata1.00: configured for UDMA/133 [ 49.448173] ata1.00: device reported invalid CHS sector 0 [ 49.448196] ata1: EH complete`), want: splitLines(`[ 49.135097] ata1.00: exception Emask 0x0 SAct 0x10000000 SErr 0x0 action 0x6 frozen [ 49.135142] ata1: hard resetting link`), }, { name: "SCSI errors", lines: splitLines(`[ 241.378165] sd 0:0:0:0: [sda] 30031872 512-byte logical blocks: (15.4 GB/14.3 GiB) [ 241.378905] sd 0:0:0:0: [sda] Write Protect is off [ 241.378910] sd 0:0:0:0: [sda] Mode Sense: 43 00 00 00 [ 241.379429] sd 0:0:0:0: [sda] Write cache: disabled, read cache: enabled, doesn't support DPO or FUA [ 241.414705] sd 0:0:0:0: [sda] Attached SCSI removable disk [ 241.614066] sd 0:0:0:0: [sda] tag#0 FAILED Result: hostbyte=DID_ERROR driverbyte=DRIVER_OK [ 241.614076] sd 0:0:0:0: [sda] tag#0 CDB: Read(10) 28 00 00 05 d0 80 00 00 08 00 [ 241.614080] print_req_error: I/O error, dev sda, sector 381056 [ 241.654058] sd 0:0:0:0: [sda] tag#0 FAILED Result: hostbyte=DID_ERROR driverbyte=DRIVER_OK [ 241.654068] sd 0:0:0:0: [sda] tag#0 CDB: Read(10) 28 00 00 01 50 48 00 00 30 00`), want: splitLines(`[ 241.614066] sd 0:0:0:0: [sda] tag#0 FAILED Result: hostbyte=DID_ERROR driverbyte=DRIVER_OK [ 241.654058] sd 0:0:0:0: [sda] tag#0 FAILED Result: hostbyte=DID_ERROR driverbyte=DRIVER_OK`), }, // Block device errors should be ignored because some of them are // produced for loopback devices, in which case errors are harmless. { name: "Block device errors (blk_update_request)", lines: splitLines(`[ 16.076930] blk_update_request: I/O error, dev loop9, sector 0 [ 16.076941] blk_update_request: I/O error, dev loop9, sector 0`), want: nil, }, { name: "Block device errors (print_req_error)", lines: splitLines(`[ 112.866869] print_req_error: I/O error, dev loop9, sector 0 [ 112.866888] print_req_error: I/O error, dev loop9, sector 0 [ 112.866893] Buffer I/O error on dev loop9, logical block 0, async page read`), want: nil, }, } { got := diskErrorLines(tc.lines) if diff := cmp.Diff(got, tc.want); diff != "" { t.Errorf("%s: mismatch (-got +want):\n%s", tc.name, diff) } } }
explode_data.jsonl/5838
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1298 }
[ 2830, 3393, 47583, 1454, 16794, 1155, 353, 8840, 836, 8, 341, 1903, 2292, 16794, 1669, 2915, 7235, 914, 8, 3056, 917, 341, 197, 853, 9069, 19823, 7235, 11, 2917, 77, 1138, 197, 630, 2023, 8358, 17130, 1669, 2088, 3056, 1235, 341, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildCreateRequest(t *testing.T) { participant1 := &og.Participant{Type: og.User, Username: "p1"} participant2 := &og.Participant{Type: og.Team, Name: "t2"} participants := make([]og.Participant, 2) participants[0] = *participant1 participants[1] = *participant2 restriction1 := og.Restriction{StartDay: og.Saturday, StartHour: 5, StartMin: 3, EndDay: og.Friday, EndMin: 5, EndHour: 2} restriction2 := og.Restriction{StartDay: og.Monday, StartHour: 12, StartMin: 33, EndDay: og.Friday, EndMin: 6, EndHour: 20} restrictions := make([]og.Restriction, 2) restrictions[0] = restriction1 restrictions[1] = restriction2 startDate := time.Now() timeRestriction := og.TimeRestriction{Type: og.WeekdayAndTimeOfDay, RestrictionList: restrictions} ownerTeam := &og.OwnerTeam{Name: "aTeam", Id: "id"} rotation1 := &og.Rotation{Name: "rot1", StartDate: &startDate, EndDate: nil, Type: og.Weekly, Length: 5, Participants: participants, TimeRestriction: &timeRestriction} rotation2 := &og.Rotation{Name: "rot2", StartDate: &startDate, EndDate: nil, Type: og.Weekly, Length: 5, Participants: participants, TimeRestriction: &timeRestriction} rotations := []og.Rotation{ *rotation1, *rotation2, } expectedCreateRequest := &CreateRequest{Name: "sch1", Description: "desc", Timezone: "aZone", Enabled: true, OwnerTeam: ownerTeam, Rotations: rotations} tr := og.TimeRestriction{Type: og.WeekdayAndTimeOfDay} tr.WithRestrictions(restriction1, restriction2) createRequest := &CreateRequest{Name: "sch1", Description: "desc", Timezone: "aZone", Enabled: true, OwnerTeam: ownerTeam} createRequest.WithRotation(rotation1.WithParticipants(*participant1, *participant2)). WithRotation(rotation2.WithParticipants(*participant1, *participant2). WithTimeRestriction(tr)) assert.Equal(t, expectedCreateRequest, createRequest) err := createRequest.Validate() assert.Nil(t, err) }
explode_data.jsonl/64414
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 656 }
[ 2830, 3393, 11066, 4021, 1900, 1155, 353, 8840, 836, 8, 341, 72872, 21757, 16, 1669, 609, 538, 52250, 21757, 90, 929, 25, 7350, 7344, 11, 29545, 25, 330, 79, 16, 16707, 72872, 21757, 17, 1669, 609, 538, 52250, 21757, 90, 929, 25, 73...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetCredsFromConfig(t *testing.T) { tests := []struct { name string authConfig AuthConfig }{ { "success_case_without_role", AuthConfig{Region: "region", Service: "service"}, }, { "success_case_with_role", AuthConfig{Region: "region", Service: "service", RoleArn: "arn:aws:iam::123456789012:role/IAMRole"}, }, } // run tests for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { creds, err := getCredsFromConfig(tt.authConfig) require.NoError(t, err, "Failed getCredsFromConfig") require.NotNil(t, creds) }) } }
explode_data.jsonl/19761
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 251 }
[ 2830, 3393, 1949, 34, 53369, 3830, 2648, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 981, 914, 198, 197, 78011, 2648, 7366, 2648, 198, 197, 59403, 197, 197, 515, 298, 197, 1, 5630, 19096, 39904, 19792, 756,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHttpGetNon200Response(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() ctx := context.Background() response := GetJobsResponse{ Jobs: []FlinkJob{ { JobID: "j1", }, }, } responder, _ := httpmock.NewJsonResponder(500, response) httpmock.RegisterResponder("GET", fakeJobsURL, responder) client := getTestJobManagerClient() _, err := client.GetJobs(ctx, testURL) assert.NotNil(t, err) assert.EqualError(t, err, "GetJobs call failed with status 500 and message ''") }
explode_data.jsonl/32374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 202 }
[ 2830, 3393, 29774, 8121, 17, 15, 15, 2582, 1155, 353, 8840, 836, 8, 341, 28080, 16712, 14140, 731, 741, 16867, 1758, 16712, 8934, 16856, 3036, 14828, 741, 20985, 1669, 2266, 19047, 741, 21735, 1669, 2126, 40667, 2582, 515, 197, 17100, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDiveKeys(t *testing.T) { type Test struct { Map map[string]string `s:"dive,keys,default,endkeys,default"` } set := New() set.SetTagName("s") set.Register("default", func(ctx context.Context, t *Transformer, value reflect.Value, param string) error { value.Set(reflect.ValueOf("after")) return nil }) set.Register("err", func(ctx context.Context, t *Transformer, value reflect.Value, param string) error { return errors.New("err") }) test := Test{ Map: map[string]string{ "b4": "b4", }, } err := set.Struct(context.Background(), &test) Equal(t, err, nil) val := test.Map["after"] Equal(t, val, "after") m := map[string]string{ "b4": "b4", } err = set.Field(context.Background(), &m, "dive,keys,default,endkeys,default") Equal(t, err, nil) val = m["after"] Equal(t, val, "after") err = set.Field(context.Background(), &m, "keys,endkeys,default") Equal(t, err, ErrInvalidKeysTag) err = set.Field(context.Background(), &m, "dive,endkeys,default") Equal(t, err, ErrUndefinedKeysTag) err = set.Field(context.Background(), &m, "dive,keys,undefinedtag") Equal(t, err, ErrUndefinedTag{tag: "undefinedtag"}) err = set.Field(context.Background(), &m, "dive,keys,err,endkeys") NotEqual(t, err, nil) m = map[string]string{ "b4": "b4", } err = set.Field(context.Background(), &m, "dive,keys,default,endkeys,err") NotEqual(t, err, nil) }
explode_data.jsonl/43623
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 552 }
[ 2830, 3393, 35, 533, 8850, 1155, 353, 8840, 836, 8, 1476, 13158, 3393, 2036, 341, 197, 26873, 2415, 14032, 30953, 1565, 82, 2974, 67, 533, 11, 10563, 87696, 42717, 10563, 87696, 8805, 197, 630, 8196, 1669, 1532, 741, 8196, 4202, 22616, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateSignature(t *testing.T) { // Test case: Happy case with programatically generated values // 1. create ECDSA cert and key template := x509.Certificate{ Subject: pkix.Name{ CommonName: "example-a.com", }, SerialNumber: big.NewInt(1337), } certBytes, key, err := createECDSACertAndKeyFromTemplate(template) require.NoError(t, err) // 2. Sign a message with the ECDSA key random := rand.Reader hashed, err := computeSHA2Hash([]byte("localhost:9080/network1/mychannel:interop:Read:anonce"), key.PublicKey.Params().BitSize) require.NoError(t, err) signature, err := ecdsa.SignASN1(random, key, hashed) require.NoError(t, err) // 3. Generate PEM cert from DER format // https://gist.github.com/samuel/8b500ddd3f6118d052b5e6bc16bc4c09 out := &bytes.Buffer{} pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: certBytes}) x509Cert, err := parseCert(string(out.Bytes())) require.NoError(t, err) err = validateSignature("localhost:9080/network1/mychannel:interop:Read:anonce", x509Cert, string(signature)) require.NoError(t, err) // Test case: Trying to validate hashed message with unhashed signature msg := "localhost:9080/network1/mychannel:interop:Read:anonce" r := rand.Reader invalidSignature, err := ecdsa.SignASN1(r, key, []byte(msg)) require.NoError(t, err) err = validateSignature("localhost:9080/network1/mychannel:interop:Read:anonce", x509Cert, string(invalidSignature)) require.EqualError(t, err, "Signature Verification failed. ECDSA VERIFY") }
explode_data.jsonl/48210
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 555 }
[ 2830, 3393, 17926, 25088, 1155, 353, 8840, 836, 8, 341, 197, 322, 3393, 1142, 25, 23355, 1142, 448, 2025, 7022, 7907, 2750, 198, 197, 322, 220, 16, 13, 1855, 20633, 72638, 2777, 323, 1376, 198, 22832, 1669, 856, 20, 15, 24, 727, 209...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWaitForV1alpha1Failure(t *testing.T) { w := &waitFailSourcer{ newSourcer(), } err := Pivot(w, &target{}, "") if err == nil { t.Fatal("expected an error but got nil") } }
explode_data.jsonl/72724
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 81 }
[ 2830, 3393, 14190, 2461, 53, 16, 7141, 16, 17507, 1155, 353, 8840, 836, 8, 1476, 6692, 1669, 609, 11489, 19524, 50, 413, 3828, 515, 197, 8638, 50, 413, 3828, 3148, 197, 532, 9859, 1669, 97893, 3622, 11, 609, 5657, 22655, 14676, 743, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestRoleNotControlledByUs(t *testing.T) { f := newFixture(t) startTime := metav1.Now() completionTime := metav1.Now() var replicas int32 = 64 mpiJob := newMPIJob("test", &replicas, 1, gpuResourceName, &startTime, &completionTime) f.setUpMPIJob(mpiJob) configMap := newConfigMap(mpiJob, replicas, isGPULauncher(mpiJob)) updateDiscoverHostsInConfigMap(configMap, mpiJob, nil, isGPULauncher(mpiJob)) f.setUpConfigMap(configMap) f.setUpServiceAccount(newLauncherServiceAccount(mpiJob)) role := newLauncherRole(mpiJob, replicas) role.OwnerReferences = nil f.setUpRole(role) f.runExpectError(getKey(mpiJob, t)) }
explode_data.jsonl/29956
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 243 }
[ 2830, 3393, 9030, 2623, 3273, 832, 1359, 3558, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 501, 18930, 1155, 340, 21375, 1462, 1669, 77520, 16, 13244, 741, 32810, 14386, 1462, 1669, 77520, 16, 13244, 2822, 2405, 80801, 526, 18, 17, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEc2Instance_E2E(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() ec2Instance, mockEC2ApiHelper := getMockInstance(ctrl) mockEC2ApiHelper.EXPECT().GetInstanceDetails(&instanceID).Return(nwInterfaces, nil) mockEC2ApiHelper.EXPECT().GetSubnet(&subnetID).Return(subnet, nil) // Assert no error on loading the instance details err := ec2Instance.LoadDetails(mockEC2ApiHelper) assert.NoError(t, err) // Check index is not used, assign index and verify index is used now assert.False(t, ec2Instance.deviceIndexes[1]) index, err := ec2Instance.GetHighestUnusedDeviceIndex() assert.NoError(t, err) assert.Equal(t, int64(1), index) assert.True(t, ec2Instance.deviceIndexes[1]) // Check index is used and then free that index assert.True(t, ec2Instance.deviceIndexes[1]) ec2Instance.FreeDeviceIndex(deviceIndex0) assert.False(t, ec2Instance.deviceIndexes[deviceIndex0]) }
explode_data.jsonl/762
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 321 }
[ 2830, 3393, 50730, 17, 2523, 2089, 17, 36, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 197, 757, 17, 2523, 11, 7860, 7498, 17, 6563, 5511, 1669, 633, 11571, 2523, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDoubleClose(t *testing.T) { a, _ := memPipe() err := a.Close() if err != nil { t.Errorf("Close: %v", err) } err = a.Close() if err != io.EOF { t.Errorf("expect EOF on double close.") } }
explode_data.jsonl/62917
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 118 }
[ 2830, 3393, 7378, 7925, 1155, 353, 8840, 836, 8, 341, 262, 264, 11, 716, 1669, 1833, 34077, 741, 262, 1848, 1669, 264, 10421, 741, 262, 421, 1848, 961, 2092, 341, 286, 259, 13080, 445, 7925, 25, 1018, 85, 497, 1848, 340, 262, 456, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFieldErrors_ViaIndex(t *testing.T) { expected := cli.FieldErrors{ &field.Error{Field: "[2]"}, &field.Error{Field: "[2].field"}, &field.Error{Field: "[2][0]"}, } actual := cli.FieldErrors{ &field.Error{Field: "[]"}, &field.Error{Field: "field"}, &field.Error{Field: "[0]"}, }.ViaIndex(2) if diff := cmp.Diff(expected, actual); diff != "" { t.Errorf("(-expected, +actual): %s", diff) } }
explode_data.jsonl/13212
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 176 }
[ 2830, 3393, 1877, 13877, 2334, 685, 1552, 1155, 353, 8840, 836, 8, 341, 42400, 1669, 21348, 17087, 13877, 515, 197, 197, 5, 2566, 6141, 90, 1877, 25, 10545, 17, 60, 7115, 197, 197, 5, 2566, 6141, 90, 1877, 25, 10545, 17, 936, 2566, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnitHandleDeletePractitioner(t *testing.T) { err := os.Chdir("..") if err != nil { log.ErrorR(nil, fmt.Errorf("error accessing root directory")) } Convey("Must need a transactionID in the URL", t, func() { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() res := serveDeletePractitionerRequest(mock_dao.NewMockService(mockCtrl), false, true) So(res.Code, ShouldEqual, http.StatusBadRequest) }) Convey("Must need a practitionerID in the URL", t, func() { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() res := serveDeletePractitionerRequest(mock_dao.NewMockService(mockCtrl), true, false) So(res.Code, ShouldEqual, http.StatusBadRequest) }) Convey("Error checking if transaction is closed against transaction api", t, func() { httpmock.Activate() mockCtrl := gomock.NewController(t) defer httpmock.DeactivateAndReset() defer mockCtrl.Finish() // Expect the transaction api to be called and return an error httpmock.RegisterResponder(http.MethodGet, "https://api.companieshouse.gov.uk/transactions/12345678", httpmock.NewStringResponder(http.StatusInternalServerError, "")) res := serveDeletePractitionerRequest(mock_dao.NewMockService(mockCtrl), true, true) So(res.Code, ShouldEqual, http.StatusInternalServerError) }) Convey("Transaction is already closed and cannot be updated", t, func() { httpmock.Activate() mockCtrl := gomock.NewController(t) defer httpmock.DeactivateAndReset() defer mockCtrl.Finish() // Expect the transaction api to be called and return an already closed transaction httpmock.RegisterResponder(http.MethodGet, "https://api.companieshouse.gov.uk/transactions/12345678", httpmock.NewStringResponder(http.StatusOK, transactionProfileResponseClosed)) res := serveDeletePractitionerRequest(mock_dao.NewMockService(mockCtrl), true, true) So(res.Code, ShouldEqual, http.StatusForbidden) }) Convey("Generic error when deleting practitioner resource from mongo", t, func() { httpmock.Activate() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() defer httpmock.DeactivateAndReset() // Expect the transaction api to be called and return an open transaction httpmock.RegisterResponder(http.MethodGet, "https://api.companieshouse.gov.uk/transactions/12345678", httpmock.NewStringResponder(http.StatusOK, transactionProfileResponse)) mockService := mock_dao.NewMockService(mockCtrl) // Expect DeletePractitioner to be called once and return an error mockService.EXPECT().DeletePractitioner(practitionerID, transactionID).Return(fmt.Errorf("there was a problem handling your request for transaction %s", transactionID), http.StatusBadRequest).Times(1) res := serveDeletePractitionerRequest(mockService, true, true) So(res.Code, ShouldEqual, http.StatusBadRequest) }) Convey("Error when retrieving practitioner resources from mongo - insolvency case or practitioner not found", t, func() { httpmock.Activate() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() defer httpmock.DeactivateAndReset() // Expect the transaction api to be called and return an open transaction httpmock.RegisterResponder(http.MethodGet, "https://api.companieshouse.gov.uk/transactions/12345678", httpmock.NewStringResponder(http.StatusOK, transactionProfileResponse)) mockService := mock_dao.NewMockService(mockCtrl) // Expect DeletePractitioner to be called once and return nil, 404 mockService.EXPECT().DeletePractitioner(practitionerID, transactionID).Return(nil, http.StatusNotFound).Times(1) res := serveDeletePractitionerRequest(mockService, true, true) So(res.Code, ShouldEqual, http.StatusNotFound) }) Convey("Successfully retrieve practitioners for insolvency case", t, func() { httpmock.Activate() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() defer httpmock.DeactivateAndReset() // Expect the transaction api to be called and return an open transaction httpmock.RegisterResponder(http.MethodGet, "https://api.companieshouse.gov.uk/transactions/12345678", httpmock.NewStringResponder(http.StatusOK, transactionProfileResponse)) mockService := mock_dao.NewMockService(mockCtrl) // Expect DeletePractitioner to be called once and return http status NoContent, nil mockService.EXPECT().DeletePractitioner(practitionerID, transactionID).Return(nil, http.StatusNoContent).Times(1) res := serveDeletePractitionerRequest(mockService, true, true) So(res.Code, ShouldEqual, http.StatusNoContent) }) }
explode_data.jsonl/25958
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1451 }
[ 2830, 3393, 4562, 6999, 6435, 3533, 531, 71246, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 2643, 6353, 3741, 95032, 1138, 743, 1848, 961, 2092, 341, 197, 6725, 6141, 49, 27907, 11, 8879, 13080, 445, 841, 31788, 3704, 6220, 5455, 197, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewIngressSource(t *testing.T) { for _, ti := range []struct { title string annotationFilter string fqdnTemplate string combineFQDNAndAnnotation bool expectError bool }{ { title: "invalid template", expectError: true, fqdnTemplate: "{{.Name", }, { title: "valid empty template", expectError: false, }, { title: "valid template", expectError: false, fqdnTemplate: "{{.Name}}-{{.Namespace}}.ext-dns.test.com", }, { title: "valid template", expectError: false, fqdnTemplate: "{{.Name}}-{{.Namespace}}.ext-dns.test.com, {{.Name}}-{{.Namespace}}.ext-dna.test.com", }, { title: "valid template", expectError: false, fqdnTemplate: "{{.Name}}-{{.Namespace}}.ext-dns.test.com, {{.Name}}-{{.Namespace}}.ext-dna.test.com", combineFQDNAndAnnotation: true, }, { title: "non-empty annotation filter label", expectError: false, annotationFilter: "kubernetes.io/ingress.class=nginx", }, } { t.Run(ti.title, func(t *testing.T) { _, err := NewIngressSource( fake.NewSimpleClientset(), "", ti.annotationFilter, ti.fqdnTemplate, ti.combineFQDNAndAnnotation, false, ) if ti.expectError { assert.Error(t, err) } else { assert.NoError(t, err) } }) } }
explode_data.jsonl/29930
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 733 }
[ 2830, 3393, 3564, 641, 2483, 3608, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 8988, 1669, 2088, 3056, 1235, 341, 197, 24751, 503, 914, 198, 197, 197, 24674, 5632, 260, 914, 198, 197, 1166, 80, 17395, 7275, 1797, 914, 198, 197, 197, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLocalDirectory(t *testing.T) { if runtime.GOOS == "darwin" { switch runtime.GOARCH { case "arm", "arm64": t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH) } } cwd, err := os.Getwd() if err != nil { t.Fatal(err) } p, err := ImportDir(cwd, 0) if err != nil { t.Fatal(err) } if p.ImportPath != "go/build" { t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "go/build") } }
explode_data.jsonl/516
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 200 }
[ 2830, 3393, 7319, 9310, 1155, 353, 8840, 836, 8, 341, 743, 15592, 97574, 3126, 621, 330, 98765, 1, 341, 197, 8961, 15592, 97574, 10790, 341, 197, 2722, 330, 2178, 497, 330, 2178, 21, 19, 4660, 298, 3244, 57776, 69, 445, 4886, 5654, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestGetUsersNotInChannel(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() teamId := th.BasicTeam.Id channelId := th.BasicChannel.Id user := th.CreateUser() th.LinkUserToTeam(user, th.BasicTeam) rusers, _, err := th.Client.GetUsersNotInChannel(teamId, channelId, 0, 60, "") require.NoError(t, err) for _, u := range rusers { CheckUserSanitization(t, u) } rusers, _, err = th.Client.GetUsersNotInChannel(teamId, channelId, 0, 1, "") require.NoError(t, err) require.Len(t, rusers, 1, "should be 1 per page") rusers, _, err = th.Client.GetUsersNotInChannel(teamId, channelId, 10000, 100, "") require.NoError(t, err) require.Empty(t, rusers, "should be no users") th.Client.Logout() _, resp, err := th.Client.GetUsersNotInChannel(teamId, channelId, 0, 60, "") require.Error(t, err) CheckUnauthorizedStatus(t, resp) th.Client.Login(user.Email, user.Password) _, resp, err = th.Client.GetUsersNotInChannel(teamId, channelId, 0, 60, "") require.Error(t, err) CheckForbiddenStatus(t, resp) _, _, err = th.SystemAdminClient.GetUsersNotInChannel(teamId, channelId, 0, 60, "") require.NoError(t, err) }
explode_data.jsonl/47522
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 447 }
[ 2830, 3393, 1949, 7137, 2623, 641, 9629, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 741, 197, 9196, 764, 1669, 270, 48868, 14597, 6444, 198, 71550, 764, 1669, 270, 48868, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestEntryAddUntimedFutureMetadata(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() nowNanos := time.Now().UnixNano() inputMetadatas := metadata.StagedMetadatas{ metadata.StagedMetadata{ CutoverNanos: nowNanos + 100, Tombstoned: false, Metadata: metadata.Metadata{Pipelines: testPipelines}, }, } e, _, now := testEntry(ctrl, testEntryOptions{}) *now = time.Unix(0, nowNanos) require.Equal(t, errNoApplicableMetadata, e.AddUntimed(testCounter, inputMetadatas)) }
explode_data.jsonl/24211
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 208 }
[ 2830, 3393, 5874, 2212, 20250, 75485, 24206, 14610, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 80922, 45, 43605, 1669, 882, 13244, 1005, 55832, 83819, 741, 22427, 3467...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClientReplaceProjectParametersInherited(t *testing.T) { client, err := NewRealTestClient(t) require.NoError(t, err, "Expected no error") err = client.DeleteProject("Empty3") require.NoError(t, err, "Expected no error") err = client.CreateProject(&types.Project{ Name: "Empty3", }) require.NoError(t, err, "Expected no error") err = client.ReplaceAllProjectParameters("Empty3", &types.Parameters{ "env.MUH": types.Parameter{ Value: "Hush", Spec: &types.ParameterSpec{ Label: "Muh value", Description: "The Muh value that does all the Muhing", Type: types.PasswordType{}, }, }, "config.inherited": types.Parameter{ Value: "Parent", Spec: &types.ParameterSpec{ Label: "AWW", Type: types.CheckboxType{"Hello", "Copperhead"}, }, }, "config.inherited2": types.Parameter{ Value: "Parent", }, "env.AWW": types.Parameter{ Value: "Parent", Spec: &types.ParameterSpec{ Label: "AWW", Type: types.TextType{"any"}, }, }, "env.DAMM": types.Parameter{ Value: "Parent", }, }) err = client.DeleteProject("Empty3_TestClientReplaceProjectParametersInherited") require.NoError(t, err, "Expected no error") err = client.CreateProject(&types.Project{ ID: "Empty3_TestClientReplaceProjectParametersInherited", ParentProjectID: "Empty3", Name: "TestClientReplaceProjectParametersInherited", }) require.NoError(t, err, "Expected no error") err = client.ReplaceAllProjectParameters("Empty3_TestClientReplaceProjectParametersInherited", &types.Parameters{ "config.remove": types.Parameter{ Value: "Hello", }, "config.replace": types.Parameter{ Value: "Dink", Spec: &types.ParameterSpec{ Label: "Buhhhhh", Type: types.TextType{"any"}, }, }, "config.inherited": types.Parameter{ Value: "Dink", Spec: &types.ParameterSpec{ Label: "Buhhhhh", Type: types.TextType{"any"}, }, }, "config.inherited2": types.Parameter{ Value: "Dink", }, "env.AWW": types.Parameter{ Value: "BuildConf", }, "env.DAMM": types.Parameter{ Value: "BuildConf", }, }) require.NoError(t, err, "Expected no error") parameters := types.Parameters{ "env.HELLO": types.Parameter{"Good job", nil}, "config.replace": types.Parameter{ Value: "Mink", Spec: &types.ParameterSpec{ Label: "Minker", Type: types.CheckboxType{ Checked: "Flunk", }, }, }, "config.inherited": types.Parameter{ Value: "Dink", Spec: &types.ParameterSpec{ Label: "Buhhhhh", Type: types.TextType{"any"}, }, }, "config.inherited2": types.Parameter{ Value: "Dink", Spec: &types.ParameterSpec{ Label: "Buhhhhh", Type: types.TextType{"any"}, }, }, "aws.hush": types.Parameter{ Value: "Bad Job", Spec: &types.ParameterSpec{ Type: types.PasswordType{}, }, }, "env.AWW": types.Parameter{ Value: "", Spec: &types.ParameterSpec{ Type: types.CheckboxType{ Checked: "BuildCD", }, }, }, "env.MUH": types.Parameter{ Value: "Hello", Spec: &types.ParameterSpec{ Label: "Plunk", Type: types.PasswordType{}, }, }, } err = client.ReplaceAllProjectParameters("Empty3_TestClientReplaceProjectParametersInherited", &parameters) require.NoError(t, err, "Expected no error") require.NotNil(t, parameters, "Update to return parameters") expected := types.Parameters{ "env.HELLO": types.Parameter{"Good job", nil}, "config.replace": types.Parameter{ Value: "Mink", Spec: &types.ParameterSpec{ Label: "Minker", Type: types.CheckboxType{ Checked: "Flunk", }, }, }, "config.inherited": types.Parameter{ Value: "Dink", Spec: &types.ParameterSpec{ Label: "AWW", Type: types.CheckboxType{"Hello", "Copperhead"}, }, }, "config.inherited2": types.Parameter{ Value: "Dink", Spec: &types.ParameterSpec{ Label: "Buhhhhh", Type: types.TextType{"any"}, }, }, "aws.hush": types.Parameter{ Value: "", Spec: &types.ParameterSpec{ Type: types.PasswordType{}, }, }, "env.AWW": types.Parameter{ Value: "", Spec: &types.ParameterSpec{ Label: "AWW", Type: types.TextType{"any"}, }, }, "env.MUH": types.Parameter{ Value: "", Spec: &types.ParameterSpec{ Label: "Muh value", Description: "The Muh value that does all the Muhing", Type: types.PasswordType{}, }, }, "env.DAMM": types.Parameter{ Value: "Parent", }, } assert.Equal(t, expected, parameters) config, err := client.GetProject("Empty3_TestClientReplaceProjectParametersInherited") require.NoError(t, err, "Expected no error") require.NotNil(t, config, "Get to return config") assert.Equal(t, expected, config.Parameters) }
explode_data.jsonl/5823
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2015 }
[ 2830, 3393, 2959, 23107, 7849, 9706, 641, 48394, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 1532, 12768, 2271, 2959, 1155, 340, 17957, 35699, 1155, 11, 1848, 11, 330, 18896, 902, 1465, 1138, 9859, 284, 2943, 18872, 7849, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetModuleMajorVersion(t *testing.T) { type testcase struct { version string expectedMajorVersion string } tests := []testcase{ { "v0.0.100", "", }, { "v1.0.0-alpha.sdqq.dirty", "", }, { "v2.0.0", "v2", }, { "v3.147.2-alpha.sdqq.dirty", "v3", }, } for _, test := range tests { majorVersion := GetModuleMajorVersion(test.version) assert.Equal(t, test.expectedMajorVersion, majorVersion) } }
explode_data.jsonl/25437
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 231 }
[ 2830, 3393, 1949, 3332, 34475, 5637, 1155, 353, 8840, 836, 8, 341, 13158, 70080, 2036, 341, 197, 74954, 1060, 914, 198, 197, 42400, 34475, 5637, 914, 198, 197, 630, 78216, 1669, 3056, 1944, 5638, 515, 197, 197, 515, 298, 197, 1, 85, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLocalResource(t *testing.T) { dir := t.TempDir() mockStore, err := storage.NewLocalStorage(dir) require.NoError(t, err) err = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/common/GetStorageSize", "return(2048)") require.NoError(t, err) defer func() { _ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/common/GetStorageSize") }() cfg := config.NewConfig() cfg.Mydumper.SourceDir = dir cfg.TikvImporter.SortedKVDir = dir cfg.TikvImporter.Backend = "local" rc := &Controller{ cfg: cfg, store: mockStore, ioWorkers: worker.NewPool(context.Background(), 1, "io"), } // 1. source-size is smaller than disk-size, won't trigger error information rc.checkTemplate = NewSimpleTemplate() err = rc.localResource(1000) require.NoError(t, err) tmpl := rc.checkTemplate.(*SimpleTemplate) require.Equal(t, 1, tmpl.warnFailedCount) require.Equal(t, 0, tmpl.criticalFailedCount) require.Equal(t, "local disk resources are rich, estimate sorted data size 1000B, local available is 2KiB", tmpl.normalMsgs[1]) // 2. source-size is bigger than disk-size, with default disk-quota will trigger a critical error rc.checkTemplate = NewSimpleTemplate() err = rc.localResource(4096) require.NoError(t, err) tmpl = rc.checkTemplate.(*SimpleTemplate) require.Equal(t, 1, tmpl.warnFailedCount) require.Equal(t, 1, tmpl.criticalFailedCount) require.Equal(t, "local disk space may not enough to finish import, estimate sorted data size is 4KiB, but local available is 2KiB, please set `tikv-importer.disk-quota` to a smaller value than 2KiB or change `mydumper.sorted-kv-dir` to another disk with enough space to finish imports", tmpl.criticalMsgs[0]) // 3. source-size is bigger than disk-size, with a vaild disk-quota will trigger a warning rc.checkTemplate = NewSimpleTemplate() rc.cfg.TikvImporter.DiskQuota = config.ByteSize(1024) err = rc.localResource(4096) require.NoError(t, err) tmpl = rc.checkTemplate.(*SimpleTemplate) require.Equal(t, 1, tmpl.warnFailedCount) require.Equal(t, 0, tmpl.criticalFailedCount) require.Equal(t, "local disk space may not enough to finish import, estimate sorted data size is 4KiB, but local available is 2KiB,we will use disk-quota (size: 1KiB) to finish imports, which may slow down import", tmpl.normalMsgs[1]) }
explode_data.jsonl/7889
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 829 }
[ 2830, 3393, 7319, 4783, 1155, 353, 8840, 836, 8, 341, 48532, 1669, 259, 65009, 6184, 2822, 77333, 6093, 11, 1848, 1669, 5819, 7121, 90464, 14161, 340, 17957, 35699, 1155, 11, 1848, 692, 9859, 284, 3690, 2768, 32287, 445, 5204, 905, 4322...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeriveNotNullConds(t *testing.T) { var ( input []string output []struct { Plan string Left string Right string } ) planSuiteUnexportedData.GetTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() for i, ca := range input { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagDecorrelate, p.(LogicalPlan)) require.NoError(t, err, comment) testdata.OnRecord(func() { output[i].Plan = ToString(p) }) require.Equal(t, output[i].Plan, ToString(p), comment) join := p.(LogicalPlan).Children()[0].(*LogicalJoin) left := join.Children()[0].(*DataSource) right := join.Children()[1].(*DataSource) leftConds := fmt.Sprintf("%s", left.pushedDownConds) rightConds := fmt.Sprintf("%s", right.pushedDownConds) testdata.OnRecord(func() { output[i].Left, output[i].Right = leftConds, rightConds }) require.Equal(t, output[i].Left, leftConds, comment) require.Equal(t, output[i].Right, rightConds, comment) } }
explode_data.jsonl/50206
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 512 }
[ 2830, 3393, 22171, 533, 11005, 1109, 5356, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 22427, 220, 3056, 917, 198, 197, 21170, 3056, 1235, 341, 298, 197, 20485, 220, 914, 198, 298, 197, 5415, 220, 914, 198, 298, 197, 5979, 914, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPingButton(t *testing.T) { var expectedPing = "PONG" fmt.Println("[WHAT TO DO] Click on \"Confirm\"") t.Log("We need to test the PING.") { t.Logf("\tChecking PING for response \"%s\"", expectedPing) { str, msgType := common.Call(client, client.Ping(expectedPing, false, false, true)) if msgType != 2 { t.Errorf("\t\tExpected msgType=2, received %d", msgType) } if str != expectedPing { t.Errorf("\t\tExpected str=\"%s\", received\"%s\"", expectedPing, str) } if msgType == 2 && str == expectedPing { t.Log("\t\tEverything went fine, \\ʕ◔ϖ◔ʔ/ YAY!") } } } }
explode_data.jsonl/46195
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 271 }
[ 2830, 3393, 69883, 1567, 1155, 353, 8840, 836, 8, 1476, 2405, 3601, 69883, 284, 330, 47, 7539, 1837, 11009, 12419, 10937, 59860, 5146, 9319, 60, 9189, 389, 7245, 16728, 2105, 1138, 3244, 5247, 445, 1654, 1184, 311, 1273, 279, 393, 1718,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestParseBashConfirm(t *testing.T) { if testing.Short() { t.Skip("calling bash is slow.") } if !hasBash44 { t.Skip("bash 4.4 required to run") } i := 0 for _, c := range append(fileTests, fileTestsNoPrint...) { if c.Bash == nil { continue } for j, in := range c.Strs { t.Run(fmt.Sprintf("%03d-%d", i, j), confirmParse(in, "bash", false)) } i++ } }
explode_data.jsonl/31430
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 182 }
[ 2830, 3393, 14463, 33, 988, 16728, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 73726, 27023, 374, 6301, 13053, 197, 532, 743, 753, 4648, 33, 988, 19, 19, 341, 197, 3244, 57776, 445, 46216, 220, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestComputeChaincodeEndpoint(t *testing.T) { /**场景1:未设置chaincodeaddress和chaincodelistenaddress**/ viper.Set(chaincodeAddrKey, nil) viper.Set(chaincodeListenAddrKey, nil) //场景1.1:对等地址为0.0.0.0 //ComputechaincodeEndpoint将返回错误 peerAddress0 := "0.0.0.0" ccEndpoint, err := computeChaincodeEndpoint(peerAddress0) assert.Error(t, err) assert.Equal(t, "", ccEndpoint) //场景1.2:对等地址不是0.0.0.0 //ChaincodeEndpoint将是对等地址:7052 peerAddress := "127.0.0.1" ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.NoError(t, err) assert.Equal(t, peerAddress+":7052", ccEndpoint) /**场景2:仅设置chaincodelistenaddress**/ //场景2.1:chaincodeListenAddress为0.0.0.0 chaincodeListenPort := "8052" settingChaincodeListenAddress0 := "0.0.0.0:" + chaincodeListenPort viper.Set(chaincodeListenAddrKey, settingChaincodeListenAddress0) viper.Set(chaincodeAddrKey, nil) //场景2.1.1:对等地址为0.0.0.0 //ComputechaincodeEndpoint将返回错误 ccEndpoint, err = computeChaincodeEndpoint(peerAddress0) assert.Error(t, err) assert.Equal(t, "", ccEndpoint) //场景2.1.2:对等地址不是0.0.0.0 //chaincodeEndpoint将是peerAddress:chaincodeListenPort ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.NoError(t, err) assert.Equal(t, peerAddress+":"+chaincodeListenPort, ccEndpoint) //场景2.2:chaincodeListenAddress不是0.0.0.0 //chaincodeEndpoint将是chaincodeListenAddress settingChaincodeListenAddress := "127.0.0.1:" + chaincodeListenPort viper.Set(chaincodeListenAddrKey, settingChaincodeListenAddress) viper.Set(chaincodeAddrKey, nil) ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.NoError(t, err) assert.Equal(t, settingChaincodeListenAddress, ccEndpoint) //场景2.3:chaincodeListenAddress无效 //ComputechaincodeEndpoint将返回错误 settingChaincodeListenAddressInvalid := "abc" viper.Set(chaincodeListenAddrKey, settingChaincodeListenAddressInvalid) viper.Set(chaincodeAddrKey, nil) ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.Error(t, err) assert.Equal(t, "", ccEndpoint) /**场景3:仅设置链码地址**/ //场景3.1:链码地址为0.0.0.0 //ComputechaincodeEndpoint将返回错误 chaincodeAddressPort := "9052" settingChaincodeAddress0 := "0.0.0.0:" + chaincodeAddressPort viper.Set(chaincodeListenAddrKey, nil) viper.Set(chaincodeAddrKey, settingChaincodeAddress0) ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.Error(t, err) assert.Equal(t, "", ccEndpoint) //场景3.2:chaincodeaddress不是0.0.0.0 //chaincodeEndpoint将是chaincodeAddress settingChaincodeAddress := "127.0.0.2:" + chaincodeAddressPort viper.Set(chaincodeListenAddrKey, nil) viper.Set(chaincodeAddrKey, settingChaincodeAddress) ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.NoError(t, err) assert.Equal(t, settingChaincodeAddress, ccEndpoint) //场景3.3:chaincodeaddress无效 //ComputechaincodeEndpoint将返回错误 settingChaincodeAddressInvalid := "bcd" viper.Set(chaincodeListenAddrKey, nil) viper.Set(chaincodeAddrKey, settingChaincodeAddressInvalid) ccEndpoint, err = computeChaincodeEndpoint(peerAddress) assert.Error(t, err) assert.Equal(t, "", ccEndpoint) /**场景4:同时设置chaincodeaddress和chaincodelistenaddress**/ //此方案与方案3相同:仅设置chaincodeaddress。 }
explode_data.jsonl/67971
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1409 }
[ 2830, 3393, 46254, 18837, 1851, 27380, 1155, 353, 8840, 836, 8, 341, 4165, 102122, 16, 5122, 38342, 43918, 8819, 1851, 4995, 33108, 8819, 66, 720, 37122, 4995, 31729, 5195, 12858, 4202, 62591, 1851, 13986, 1592, 11, 2092, 340, 5195, 12858...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRestartRemoteSpend(t *testing.T) { ctx := createSweeperTestContext(t) // Sweep input. input1 := spendableInputs[0] if _, err := ctx.sweeper.SweepInput(input1, defaultFeePref); err != nil { t.Fatal(err) } // Sweep another input. input2 := spendableInputs[1] if _, err := ctx.sweeper.SweepInput(input2, defaultFeePref); err != nil { t.Fatal(err) } ctx.tick() sweepTx := ctx.receiveTx() // Restart sweeper. ctx.restartSweeper() // Expect last tx to be republished. ctx.receiveTx() // Replace the sweep tx with a remote tx spending input 1. ctx.backend.deleteUnconfirmed(sweepTx.TxHash()) remoteTx := &wire.MsgTx{ TxIn: []*wire.TxIn{ { PreviousOutPoint: *(input2.OutPoint()), }, }, } if err := ctx.backend.publishTransaction(remoteTx); err != nil { t.Fatal(err) } // Mine remote spending tx. ctx.backend.mine() // Simulate other subsystem (eg contract resolver) re-offering input 0. spendChan, err := ctx.sweeper.SweepInput(input1, defaultFeePref) if err != nil { t.Fatal(err) } // Expect sweeper to construct a new tx, because input 1 was spend // remotely. ctx.tick() ctx.receiveTx() ctx.backend.mine() ctx.expectResult(spendChan, nil) ctx.finish(1) }
explode_data.jsonl/34228
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 483 }
[ 2830, 3393, 59354, 24703, 50, 3740, 1155, 353, 8840, 836, 8, 1476, 20985, 1669, 1855, 50, 896, 10436, 2271, 1972, 1155, 692, 197, 322, 85022, 1946, 624, 22427, 16, 1669, 8329, 480, 31946, 58, 15, 921, 743, 8358, 1848, 1669, 5635, 514,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test(t *testing.T) { t.Log("Current test is [a]") examples := [][]string{ { `"52"`, `"5"`, }, { `"4206"`, `""`, }, { `"35427"`, `"35427"`, }, } targetCaseNum := 0 // -1 if err := testutil.RunLeetCodeFuncWithExamples(t, largestOddNumber, examples, targetCaseNum); err != nil { t.Fatal(err) } }
explode_data.jsonl/78224
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 180 }
[ 2830, 3393, 1155, 353, 8840, 836, 8, 341, 3244, 5247, 445, 5405, 1273, 374, 508, 64, 47915, 8122, 4023, 1669, 52931, 917, 515, 197, 197, 515, 298, 197, 63, 1, 20, 17, 1, 7808, 715, 298, 197, 63, 1, 20, 1, 12892, 197, 197, 1583, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetListener(t *testing.T) { th.SetupHTTP() defer th.TeardownHTTP() HandleListenerGetSuccessfully(t) client := fake.ServiceClient() actual, err := listeners.Get(client, "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract() if err != nil { t.Fatalf("Unexpected Get error: %v", err) } th.CheckDeepEquals(t, ListenerDb, *actual) }
explode_data.jsonl/27552
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 138 }
[ 2830, 3393, 1949, 2743, 1155, 353, 8840, 836, 8, 341, 70479, 39820, 9230, 741, 16867, 270, 94849, 37496, 9230, 741, 197, 6999, 2743, 1949, 35959, 1155, 692, 25291, 1669, 12418, 13860, 2959, 741, 88814, 11, 1848, 1669, 23562, 2234, 12805, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTCPListenerName(t *testing.T) { testenv.MustHaveExternalNetwork(t) for _, tt := range tcpListenerNameTests { ln, err := ListenTCP(tt.net, tt.laddr) if err != nil { t.Fatal(err) } defer ln.Close() la := ln.Addr() if a, ok := la.(*TCPAddr); !ok || a.Port == 0 { t.Fatalf("got %v; expected a proper address with non-zero port number", la) } } }
explode_data.jsonl/18972
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 49896, 2743, 675, 1155, 353, 8840, 836, 8, 341, 18185, 3160, 50463, 12116, 25913, 12320, 1155, 692, 2023, 8358, 17853, 1669, 2088, 28051, 2743, 675, 18200, 341, 197, 197, 2261, 11, 1848, 1669, 32149, 49896, 47152, 5071, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestState_UserModes(t *testing.T) { t.Parallel() st := setupNewState() st.addUser(users[0]) if got, ok := st.UserModes(users[0], channels[0]); ok { t.Errorf("Expected: %v to be nil.", got) } st.addChannel(channels[0]) if got, ok := st.UserModes(users[0], channels[0]); ok { t.Errorf("Expected: %v to be nil.", got) } st.addToChannel(users[0], channels[0]) if _, ok := st.UserModes(users[0], channels[0]); !ok { t.Error("Unexpected nil.") } }
explode_data.jsonl/32079
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 199 }
[ 2830, 3393, 1397, 31339, 70035, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 18388, 1669, 6505, 3564, 1397, 741, 18388, 1364, 1474, 35438, 58, 15, 2546, 743, 2684, 11, 5394, 1669, 357, 7344, 70035, 35438, 58, 15, 1125, 11744, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRDSCaching(t *testing.T) { errCh := make(chan error, 1) ops := []testOp{ // Add an RDS watch for a resource name (goodRouteName1), which returns one // matching resource in the response. { target: goodRouteName1, responseToSend: &fakexds.Response{Resp: goodRDSResponse1}, wantRDSCache: map[string]string{goodRouteName1: goodClusterName1}, wantWatchCallback: true, }, // Push an RDS response with a new resource. This resource is considered // good because its domain field matches our LDS watch target, but the // routeConfigName does not match our RDS watch (so the watch callback will // not be invoked). But this should still be cached. { responseToSend: &fakexds.Response{Resp: goodRDSResponse2}, wantRDSCache: map[string]string{ goodRouteName1: goodClusterName1, goodRouteName2: goodClusterName2, }, }, // Push an uninteresting RDS response. This should cause handleRDSResponse // to return an error. But the watch callback should not be invoked, and // the cache should not be updated. { responseToSend: &fakexds.Response{Resp: uninterestingRDSResponse}, wantOpErr: true, wantRDSCache: map[string]string{ goodRouteName1: goodClusterName1, goodRouteName2: goodClusterName2, }, }, // Switch the watch target to goodRouteName2, which was already cached. No // response is received from the server (as expected), but we want the // callback to be invoked with the new clusterName. { target: goodRouteName2, wantRDSCache: map[string]string{ goodRouteName1: goodClusterName1, goodRouteName2: goodClusterName2, }, wantWatchCallback: true, }, } go testRDSCaching(t, ops, errCh) timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Timeout when expecting RDS update") case err := <-errCh: timer.Stop() if err != nil { t.Fatal(err) } } }
explode_data.jsonl/51252
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 701 }
[ 2830, 3393, 36690, 3540, 11829, 1155, 353, 8840, 836, 8, 341, 9859, 1143, 1669, 1281, 35190, 1465, 11, 220, 16, 340, 197, 3721, 1669, 3056, 1944, 7125, 515, 197, 197, 322, 2691, 458, 431, 5936, 3736, 369, 264, 5101, 829, 320, 18536, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestTransportBodyReadErrorType(t *testing.T) { doPanic := make(chan bool, 1) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { w.(http.Flusher).Flush() // force headers out <-doPanic panic("boom") }, optOnlyServer, optQuiet, ) defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} res, err := c.Get(st.ts.URL) if err != nil { t.Fatal(err) } defer res.Body.Close() doPanic <- true buf := make([]byte, 100) n, err := res.Body.Read(buf) want := StreamError{StreamID: 0x1, Code: 0x2} if !reflect.DeepEqual(want, err) { t.Errorf("Read = %v, %#v; want error %#v", n, err, want) } }
explode_data.jsonl/16109
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 315 }
[ 2830, 3393, 27560, 5444, 4418, 1454, 929, 1155, 353, 8840, 836, 8, 341, 19935, 47, 31270, 1669, 1281, 35190, 1807, 11, 220, 16, 340, 18388, 1669, 501, 5475, 58699, 1155, 345, 197, 29244, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConsistencyDelayMetaFilter_Filter_0(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() u := &ulidBuilder{} now := time.Now() input := map[ulid.ULID]*metadata.Meta{ // Fresh blocks. u.ULID(now): {Thanos: metadata.Thanos{Source: metadata.SidecarSource}}, u.ULID(now.Add(-1 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.SidecarSource}}, u.ULID(now.Add(-1 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.ReceiveSource}}, u.ULID(now.Add(-1 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.RulerSource}}, // For now non-delay delete sources, should be ignored by consistency delay. u.ULID(now.Add(-1 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.BucketRepairSource}}, u.ULID(now.Add(-1 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.CompactorSource}}, u.ULID(now.Add(-1 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.CompactorRepairSource}}, // 29m. u.ULID(now.Add(-29 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.SidecarSource}}, u.ULID(now.Add(-29 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.ReceiveSource}}, u.ULID(now.Add(-29 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.RulerSource}}, // For now non-delay delete sources, should be ignored by consistency delay. u.ULID(now.Add(-29 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.BucketRepairSource}}, u.ULID(now.Add(-29 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.CompactorSource}}, u.ULID(now.Add(-29 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.CompactorRepairSource}}, // 30m. u.ULID(now.Add(-30 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.SidecarSource}}, u.ULID(now.Add(-30 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.ReceiveSource}}, u.ULID(now.Add(-30 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.RulerSource}}, u.ULID(now.Add(-30 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.BucketRepairSource}}, u.ULID(now.Add(-30 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.CompactorSource}}, u.ULID(now.Add(-30 * time.Minute)): {Thanos: metadata.Thanos{Source: metadata.CompactorRepairSource}}, // 30m+. u.ULID(now.Add(-20 * time.Hour)): {Thanos: metadata.Thanos{Source: metadata.SidecarSource}}, u.ULID(now.Add(-20 * time.Hour)): {Thanos: metadata.Thanos{Source: metadata.ReceiveSource}}, u.ULID(now.Add(-20 * time.Hour)): {Thanos: metadata.Thanos{Source: metadata.RulerSource}}, u.ULID(now.Add(-20 * time.Hour)): {Thanos: metadata.Thanos{Source: metadata.BucketRepairSource}}, u.ULID(now.Add(-20 * time.Hour)): {Thanos: metadata.Thanos{Source: metadata.CompactorSource}}, u.ULID(now.Add(-20 * time.Hour)): {Thanos: metadata.Thanos{Source: metadata.CompactorRepairSource}}, } t.Run("consistency 0 (turned off)", func(t *testing.T) { m := newTestFetcherMetrics() expected := map[ulid.ULID]*metadata.Meta{} // Copy all. for _, id := range u.created { expected[id] = input[id] } reg := prometheus.NewRegistry() f := NewConsistencyDelayMetaFilter(nil, 0*time.Second, reg) testutil.Equals(t, map[string]float64{"consistency_delay_seconds": 0.0}, extprom.CurrentGaugeValuesFor(t, reg, "consistency_delay_seconds")) testutil.Ok(t, f.Filter(ctx, input, m.synced)) testutil.Equals(t, 0.0, promtest.ToFloat64(m.synced.WithLabelValues(tooFreshMeta))) testutil.Equals(t, expected, input) }) t.Run("consistency 30m.", func(t *testing.T) { m := newTestFetcherMetrics() expected := map[ulid.ULID]*metadata.Meta{} // Only certain sources and those with 30m or more age go through. for i, id := range u.created { // Younger than 30m. if i < 13 { if input[id].Thanos.Source != metadata.BucketRepairSource && input[id].Thanos.Source != metadata.CompactorSource && input[id].Thanos.Source != metadata.CompactorRepairSource { continue } } expected[id] = input[id] } reg := prometheus.NewRegistry() f := NewConsistencyDelayMetaFilter(nil, 30*time.Minute, reg) testutil.Equals(t, map[string]float64{"consistency_delay_seconds": (30 * time.Minute).Seconds()}, extprom.CurrentGaugeValuesFor(t, reg, "consistency_delay_seconds")) testutil.Ok(t, f.Filter(ctx, input, m.synced)) testutil.Equals(t, float64(len(u.created)-len(expected)), promtest.ToFloat64(m.synced.WithLabelValues(tooFreshMeta))) testutil.Equals(t, expected, input) }) }
explode_data.jsonl/67641
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1771 }
[ 2830, 3393, 15220, 47094, 20039, 12175, 5632, 68935, 62, 15, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 7636, 5378, 19047, 1507, 220, 16, 17, 15, 77053, 32435, 340, 16867, 9121, 2822, 10676, 1669, 609, 360, 307, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestValidateDNSPolicy(t *testing.T) { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)() successCases := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault, core.DNSPolicy(core.DNSClusterFirst), core.DNSNone} for _, policy := range successCases { if errs := validateDNSPolicy(&policy, field.NewPath("field")); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } errorCases := []core.DNSPolicy{core.DNSPolicy("invalid")} for _, policy := range errorCases { if errs := validateDNSPolicy(&policy, field.NewPath("field")); len(errs) == 0 { t.Errorf("expected failure for %v", policy) } } }
explode_data.jsonl/1025
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 253 }
[ 2830, 3393, 17926, 61088, 13825, 1155, 353, 8840, 836, 8, 341, 16867, 4094, 12753, 8840, 4202, 13859, 42318, 16014, 2271, 1155, 11, 4094, 12753, 13275, 13859, 42318, 11, 4419, 27649, 23527, 61088, 11, 830, 8, 2822, 30553, 37302, 1669, 305...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestOTLPTraceExporter_Default_ReturnError(t *testing.T) { td := consumerdata.OTLPTraceData{} want := errors.New("my_error") te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, want)) require.Nil(t, err) require.NotNil(t, te) err = te.ConsumeOTLPTrace(context.Background(), td) require.Equalf(t, want, err, "ConsumeTraceData returns: Want %v Got %v", want, err) }
explode_data.jsonl/1581
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 155 }
[ 2830, 3393, 1793, 12567, 6550, 88025, 60336, 53316, 1454, 1155, 353, 8840, 836, 8, 341, 76373, 1669, 11502, 691, 13, 1793, 12567, 6550, 1043, 16094, 50780, 1669, 5975, 7121, 445, 2408, 4096, 1138, 197, 665, 11, 1848, 1669, 1532, 1793, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUser_GetUserTeamIDs(t *testing.T) { assert.NoError(t, PrepareTestDatabase()) org := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User) testSuccess := func(userID int64, expected []int64) { teamIDs, err := org.GetUserTeamIDs(userID) assert.NoError(t, err) assert.Equal(t, expected, teamIDs) } testSuccess(2, []int64{1, 2}) testSuccess(4, []int64{2}) testSuccess(NonexistentID, []int64{}) }
explode_data.jsonl/71072
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 166 }
[ 2830, 3393, 1474, 13614, 1474, 14597, 30466, 1155, 353, 8840, 836, 8, 341, 6948, 35699, 1155, 11, 31166, 2271, 5988, 2398, 87625, 1669, 5319, 15575, 3036, 5879, 10437, 1155, 11, 609, 1474, 90, 915, 25, 220, 18, 16630, 4071, 1474, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParser_ParseDeck(t *testing.T) { var tests = []struct { s string d *anki.Deck err string }{ { s: ` %% This is a field 001 %% This is a field 002 --- %% This is a field 003 %% This is a field 004 ---`, d: &anki.Deck{ Cards: []anki.Card{ anki.Card{ Fields: []anki.Field{ anki.Field{ Content: "This is a field 001\n", }, anki.Field{ Content: "This is a field 002\n", }, }, }, anki.Card{ Fields: []anki.Field{ anki.Field{ Content: "This is a field 003\n", }, anki.Field{ Content: "This is a field 004\n", }, }, }, }, }, }, } for i, tt := range tests { d, err := anki.NewParser(strings.NewReader(tt.s)).ParseDeck() if !reflect.DeepEqual(tt.err, errstring(err)) { t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err) } else if tt.err == "" && !reflect.DeepEqual(tt.d, d) { t.Errorf("%d. %q\n\ndeck mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.d, d) } } }
explode_data.jsonl/40546
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 604 }
[ 2830, 3393, 6570, 77337, 39368, 1155, 353, 8840, 836, 8, 341, 2405, 7032, 284, 3056, 1235, 341, 197, 1903, 256, 914, 198, 197, 2698, 256, 353, 1180, 72, 8934, 377, 198, 197, 9859, 914, 198, 197, 59403, 197, 197, 515, 298, 1903, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestSetVolumeID(t *testing.T) { b := &blockStore{} pv := &unstructured.Unstructured{ Object: map[string]interface{}{}, } // missing spec.gcePersistentDisk -> error updatedPV, err := b.SetVolumeID(pv, "abc123") require.Error(t, err) // happy path gce := map[string]interface{}{} pv.Object["spec"] = map[string]interface{}{ "gcePersistentDisk": gce, } updatedPV, err = b.SetVolumeID(pv, "123abc") require.NoError(t, err) actual, err := collections.GetString(updatedPV.UnstructuredContent(), "spec.gcePersistentDisk.pdName") require.NoError(t, err) assert.Equal(t, "123abc", actual) }
explode_data.jsonl/71438
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 239 }
[ 2830, 3393, 1649, 18902, 915, 1155, 353, 8840, 836, 8, 341, 2233, 1669, 609, 4574, 6093, 31483, 3223, 85, 1669, 609, 359, 51143, 10616, 51143, 515, 197, 23816, 25, 2415, 14032, 31344, 6257, 38837, 197, 630, 197, 322, 7402, 1398, 1302, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestShouldMigrateEmptyApplicationConfigStateFromVersion0To1(t *testing.T) { rawData := make(map[string]interface{}) meta := "dummy" ctx := context.Background() result, err := NewApplicationConfigResourceHandle().StateUpgraders()[0].Upgrade(ctx, rawData, meta) require.Nil(t, err) require.Nil(t, result[ApplicationConfigFieldFullLabel]) }
explode_data.jsonl/64936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 111 }
[ 2830, 3393, 14996, 44, 34479, 3522, 4988, 2648, 1397, 3830, 5637, 15, 1249, 16, 1155, 353, 8840, 836, 8, 341, 76559, 1043, 1669, 1281, 9147, 14032, 31344, 37790, 84004, 1669, 330, 31390, 698, 20985, 1669, 2266, 19047, 2822, 9559, 11, 18...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExternalModuleExclusionPackage(t *testing.T) { default_suite.expectBundled(t, bundled{ files: map[string]string{ "/index.js": ` import { S3 } from 'aws-sdk'; import { DocumentClient } from 'aws-sdk/clients/dynamodb'; export const s3 = new S3(); export const dynamodb = new DocumentClient(); `, }, entryPaths: []string{"/index.js"}, options: config.Options{ Mode: config.ModeBundle, AbsOutputFile: "/out.js", ExternalModules: config.ExternalModules{ NodeModules: map[string]bool{ "aws-sdk": true, }, }, }, }) }
explode_data.jsonl/38522
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 255 }
[ 2830, 3393, 25913, 3332, 840, 8957, 13100, 1155, 353, 8840, 836, 8, 341, 11940, 57239, 25952, 33, 1241, 832, 1155, 11, 51450, 515, 197, 74075, 25, 2415, 14032, 30953, 515, 298, 197, 3115, 1252, 2857, 788, 22074, 571, 21918, 314, 328, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCache(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() mockUser := &core.User{ Login: "octocat", } mockOrgService := mock.NewMockOrganizationService(controller) mockOrgService.EXPECT().Membership(gomock.Any(), gomock.Any(), "github").Return(true, true, nil).Times(1) service := NewCache(mockOrgService, 10, time.Minute).(*cacher) admin, member, err := service.Membership(noContext, mockUser, "github") if err != nil { t.Error(err) } if got, want := service.cache.Len(), 1; got != want { t.Errorf("Expect cache size %d, got %d", want, got) } if admin == false { t.Errorf("Expect admin true, got false") } if member == false { t.Errorf("Expect member true, got false") } admin, member, err = service.Membership(noContext, mockUser, "github") if err != nil { t.Error(err) } if got, want := service.cache.Len(), 1; got != want { t.Errorf("Expect cache size still %d, got %d", want, got) } if admin == false { t.Errorf("Expect cached admin true, got false") } if member == false { t.Errorf("Expect cached member true, got false") } }
explode_data.jsonl/42271
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 417 }
[ 2830, 3393, 8233, 1155, 353, 8840, 836, 8, 341, 61615, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 6461, 991, 18176, 2822, 77333, 1474, 1669, 609, 2153, 7344, 515, 197, 197, 6231, 25, 330, 41692, 509, 266, 756, 197, 630, 77333...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestRequestEmptyArguments(t *testing.T) { assert := assert.New(t) params := []string{"Khadgar"} options := map[string]string{} r := resource{Region: "eu", Endpoint: "wow/auction/data/", Params: params, Options: options} request, _ := r.buildRequest() expectedURL := "https://eu.api.battle.net/wow/auction/data/Khadgar" assert.Equal(expectedURL, request.URL.String()) }
explode_data.jsonl/20032
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 134 }
[ 2830, 3393, 1900, 3522, 19139, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 25856, 1669, 3056, 917, 4913, 42, 31245, 12164, 16707, 35500, 1669, 2415, 14032, 30953, 31483, 7000, 1669, 5101, 90, 14091, 25, 330, 20128, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQueueEnqueue(t *testing.T) { t.Run("empty queue", func(t *testing.T) { q := emptyQueue() q.Enqueue(1) if q.list.Front == nil { t.Error("front is nil") } if q.list.Front.Value != 1 { t.Errorf("got %v, want %v for front value", q.list.Front.Value, 1) } if q.list.Back == nil { t.Error("back is nil") } if q.list.Front != q.list.Back { t.Error(("front and back are not the same")) } }) t.Run("non-empty queue", func(t *testing.T) { q := nonEmptyQueue() q.Enqueue(3) if q.list.Front.Value != 1 { t.Errorf("got %v, want %v for front value", q.list.Front.Value, 1) } if q.list.Back == nil { t.Error("back is nil") } if q.list.Back.Value != 3 { t.Errorf("got %v, want %v for back value", q.list.Back.Value, 3) } }) }
explode_data.jsonl/52730
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 361 }
[ 2830, 3393, 7554, 1702, 4584, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 3194, 7177, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 18534, 1669, 4287, 7554, 2822, 197, 18534, 65625, 7, 16, 692, 197, 743, 2804, 6420, 991, 9411, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestReconcileDirectVolumeMigrationProgress_validateSpec(t *testing.T) { type args struct { srcClient compat.Client pvProgress *migapi.DirectVolumeMigrationProgress } tests := []struct { name string args args wantCondition *migapi.Condition dontWantCondition *migapi.Condition wantErr bool }{ { name: "when podRef is set and podselector is not set, should not have blocker condition", args: args{ srcClient: getFakeCompatClient(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "ns-1"}, Status: corev1.PodStatus{Phase: corev1.PodSucceeded}}), pvProgress: &migapi.DirectVolumeMigrationProgress{ ObjectMeta: metav1.ObjectMeta{Name: "dvmp", Namespace: "openshift-migration"}, Spec: migapi.DirectVolumeMigrationProgressSpec{ PodRef: &corev1.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, }, }, }, wantErr: false, wantCondition: nil, dontWantCondition: &migapi.Condition{Type: InvalidPod, Category: migapi.Critical}, }, { name: "when podRef is not set and podselector is set but podNamespace is not set, should have blocker condition", args: args{ srcClient: getFakeCompatClient(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "ns-1"}, Status: corev1.PodStatus{Phase: corev1.PodSucceeded}}), pvProgress: &migapi.DirectVolumeMigrationProgress{ ObjectMeta: metav1.ObjectMeta{Name: "dvmp", Namespace: "openshift-migration"}, Spec: migapi.DirectVolumeMigrationProgressSpec{ PodSelector: map[string]string{migapi.RsyncPodIdentityLabel: "pod-1"}, }, }, }, wantErr: false, wantCondition: &migapi.Condition{Type: InvalidSpec, Category: migapi.Critical}, dontWantCondition: nil, }, { name: "when podRef is not set and podselector and podNamespace are set, should not have blocker condition", args: args{ srcClient: getFakeCompatClient(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "ns-1"}, Status: corev1.PodStatus{Phase: corev1.PodSucceeded}}), pvProgress: &migapi.DirectVolumeMigrationProgress{ ObjectMeta: metav1.ObjectMeta{Name: "dvmp", Namespace: "openshift-migration"}, Spec: migapi.DirectVolumeMigrationProgressSpec{ PodSelector: map[string]string{migapi.RsyncPodIdentityLabel: "pod-1"}, }, }, }, wantErr: false, wantCondition: nil, dontWantCondition: &migapi.Condition{Type: InvalidPod, Category: migapi.Critical}, }, { name: "when required specs are missing, should have blocker condition", args: args{ srcClient: getFakeCompatClient(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "ns-1"}, Status: corev1.PodStatus{Phase: corev1.PodSucceeded}}), pvProgress: &migapi.DirectVolumeMigrationProgress{ ObjectMeta: metav1.ObjectMeta{Name: "dvmp", Namespace: "openshift-migration"}, Spec: migapi.DirectVolumeMigrationProgressSpec{ PodRef: &corev1.ObjectReference{Namespace: "ns"}, PodSelector: map[string]string{"invalidLabel": "val"}, }, }, }, wantErr: false, wantCondition: &migapi.Condition{Type: InvalidSpec, Category: migapi.Critical}, dontWantCondition: nil, }, { name: "when podselector is set but doesn't have pod identity label in it, should have blocker condition", args: args{ srcClient: getFakeCompatClient(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "ns-1"}, Status: corev1.PodStatus{Phase: corev1.PodSucceeded}}), pvProgress: &migapi.DirectVolumeMigrationProgress{ ObjectMeta: metav1.ObjectMeta{Name: "dvmp", Namespace: "openshift-migration"}, Spec: migapi.DirectVolumeMigrationProgressSpec{ PodSelector: map[string]string{}, PodNamespace: "ns", }, }, }, wantErr: false, wantCondition: &migapi.Condition{Type: InvalidPodSelector, Category: migapi.Critical}, dontWantCondition: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &ReconcileDirectVolumeMigrationProgress{} if err := r.validateSpec(tt.args.srcClient, tt.args.pvProgress); (err != nil) != tt.wantErr { t.Errorf("ReconcileDirectVolumeMigrationProgress.validateSpec() error = %v, wantErr %v", err, tt.wantErr) } if tt.wantCondition != nil && !tt.args.pvProgress.Status.HasCondition(tt.wantCondition.Type) { t.Errorf("ReconcileDirectVolumeMigrationProgress.validateSpec() expected condition of type %s not found", tt.wantCondition.Type) } if tt.dontWantCondition != nil && tt.args.pvProgress.Status.HasCondition(tt.dontWantCondition.Type) { t.Errorf("ReconcileDirectVolumeMigrationProgress.validateSpec() unexpected condition of type %s found", tt.wantCondition.Type) } }) } }
explode_data.jsonl/24073
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1946 }
[ 2830, 3393, 693, 40446, 457, 16027, 18902, 20168, 9496, 42681, 8327, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 41144, 2959, 220, 12139, 11716, 198, 197, 3223, 85, 9496, 353, 76, 343, 2068, 89592, 18902, 20168, 9496, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6