text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestGetOrdererEndpointFail(t *testing.T) { initMSP() signer, err := common.GetDefaultSigner() assert.NoError(t, err) mockchain := "mockchain" factory.InitFactories(nil) mockResponse := &pb.ProposalResponse{ Response: &pb.Response{Status: 404, Payload: []byte{}}, Endorsement: &pb.Endorsement{}, } mockEndorserClient := common.GetMockEndorserClient(mockResponse, nil) _, err = common.GetOrdererEndpointOfChain(mockchain, signer, mockEndorserClient) assert.Error(t, err, "GetOrdererEndpointOfChain from invalid response") }
explode_data.jsonl/22367
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 203 }
[ 2830, 3393, 1949, 4431, 261, 27380, 19524, 1155, 353, 8840, 836, 8, 341, 28248, 44, 4592, 2822, 69054, 261, 11, 1848, 1669, 4185, 2234, 3675, 7264, 261, 741, 6948, 35699, 1155, 11, 1848, 692, 77333, 8819, 1669, 330, 16712, 8819, 698, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOption_WithTokenFactory(t *testing.T) { client, err := NewClient("test", "http://httpbin.org/") if err != nil { t.Fatal(err) } client.CommonOpts = append(client.CommonOpts, WithTokenFactory(func() string { return "testauth" })) resp := &respCarrier{} err = client.Get(resp, "/headers") if err != nil { t.Fatal(err) } t.Log(resp.String()) n := strings.Index(resp.String(), "testauth") if n == -1 { t.Fatal("Auth is not set in request") } }
explode_data.jsonl/68691
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 193 }
[ 2830, 3393, 5341, 62, 2354, 3323, 4153, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 1532, 2959, 445, 1944, 497, 330, 1254, 1110, 1254, 6863, 2659, 53006, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 25291...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWhereObjxMap(t *testing.T) { v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} selected := v.WhereObjxMap(func(i int, val Map) bool { return i%2 == 0 }).MustObjxMapSlice() assert.Equal(t, 3, len(selected)) }
explode_data.jsonl/23396
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 129 }
[ 2830, 3393, 9064, 5261, 87, 2227, 1155, 353, 8840, 836, 8, 1476, 5195, 1669, 609, 1130, 90, 691, 25, 39444, 2227, 6098, 7, 2227, 2376, 3564, 7, 16, 5731, 320, 2227, 2376, 3564, 7, 16, 5731, 320, 2227, 2376, 3564, 7, 16, 5731, 320,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestResourceSortByExpirationDate(t *testing.T) { certs := []*iam.ServerCertificateMetadata{ { ServerCertificateName: aws.String("oldest"), Expiration: aws.Time(time.Now()), }, { ServerCertificateName: aws.String("latest"), Expiration: aws.Time(time.Now().Add(3 * time.Hour)), }, { ServerCertificateName: aws.String("in between"), Expiration: aws.Time(time.Now().Add(2 * time.Hour)), }, } sort.Sort(certificateByExpiration(certs)) if *certs[0].ServerCertificateName != "latest" { t.Fatalf("Expected first item to be %q, but was %q", "latest", *certs[0].ServerCertificateName) } }
explode_data.jsonl/6771
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 280 }
[ 2830, 3393, 4783, 10231, 1359, 66301, 1916, 1155, 353, 8840, 836, 8, 341, 1444, 15546, 1669, 29838, 4932, 22997, 33202, 14610, 515, 197, 197, 515, 298, 92075, 33202, 675, 25, 31521, 6431, 445, 337, 4979, 4461, 298, 197, 66301, 25, 310, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() config := configSetup(t) // create a list of vote extensions, one for each validator. voteExtensions := [][]byte{ []byte("extension 0"), []byte("extension 1"), []byte("extension 2"), []byte("extension 3"), } // m := abcimocks.NewApplication(t) m := &abcimocks.Application{} m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ VoteExtension: voteExtensions[0], }, nil) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) // capture the prepare proposal request. rpp := &abci.RequestPrepareProposal{} m.On("PrepareProposal", mock.Anything, mock.MatchedBy(func(r *abci.RequestPrepareProposal) bool { rpp = r return true })).Return(&abci.ResponsePrepareProposal{}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil).Once() m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil) m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil) cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) height, round := cs1.Height, cs1.Round newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(ctx, t, cs1, addr) startTestRound(ctx, cs1, height, round) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() blockID := types.BlockID{ Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) // create a precommit for each validator with the associated vote extension. for i, vs := range vss[1:] { signAddPrecommitWithExtension(ctx, t, cs1, config.ChainID(), blockID, voteExtensions[i+1], vs) } ensurePrevote(t, voteCh, height, round) // ensure that the height is committed. ensurePrecommitMatch(t, voteCh, height, round, blockID.Hash) incrementHeight(vss[1:]...) height++ round = 0 ensureNewRound(t, newRoundCh, height, round) incrementRound(vss[1:]...) incrementRound(vss[1:]...) incrementRound(vss[1:]...) round = 3 signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vss[1:]...) ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) // ensure that the proposer received the list of vote extensions from the // previous height. require.Len(t, rpp.LocalLastCommit.Votes, len(vss)) for i := range vss { require.Equal(t, rpp.LocalLastCommit.Votes[i].VoteExtension, voteExtensions[i]) } }
explode_data.jsonl/54282
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1136 }
[ 2830, 3393, 50590, 98637, 693, 346, 1886, 41412, 31282, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 16867, 9121, 2822, 25873, 1669, 2193, 21821, 1155, 692, 197, 322, 1855, 264, 1140, 315, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDontRotateTokensOnCancelledRequests(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) reqContext, _, err := initTokenRotationTest(ctx) require.NoError(t, err) tryRotateCallCount := 0 uts := &auth.FakeUserAuthTokenService{ TryRotateTokenProvider: func(ctx context.Context, token *models.UserToken, clientIP net.IP, userAgent string) (bool, error) { tryRotateCallCount++ return false, nil }, } token := &models.UserToken{AuthToken: "oldtoken"} fn := rotateEndOfRequestFunc(reqContext, uts, token) cancel() fn(reqContext.Resp) assert.Equal(t, 0, tryRotateCallCount, "Token rotation was attempted") }
explode_data.jsonl/19358
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 230 }
[ 2830, 3393, 35, 544, 34540, 29300, 1925, 39473, 35295, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 24395, 1972, 11, 8358, 1848, 1669, 2930, 3323, 18440, 2271, 7502, 340, 17957, 35699, 1155, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOmitEmptyHalfFull(t *testing.T) { var s string var oeA OmitEmptyHalfFull s = mustEncodeToJSON(&oeA) if s != `{"field01":"","field03":""}` { t.Errorf("wrong result: %s", s) } var oeB OmitEmptyHalfFull oeB.Field02 = "val2" s = mustEncodeToJSON(&oeB) if s != `{"field01":"","field02":"val2","field03":""}` { t.Errorf("wrong result: %s", s) } var oeC OmitEmptyHalfFull oeC.Field03 = "val3" s = mustEncodeToJSON(&oeC) if s != `{"field01":"","field03":"val3"}` { t.Errorf("wrong result: %s", s) } }
explode_data.jsonl/59183
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 231 }
[ 2830, 3393, 46, 1763, 3522, 42627, 9432, 1155, 353, 8840, 836, 8, 1476, 2405, 274, 914, 271, 2405, 68383, 32, 506, 1763, 3522, 42627, 9432, 271, 1903, 284, 1969, 32535, 1249, 5370, 2099, 4644, 32, 340, 743, 274, 961, 1565, 4913, 2566,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestFailureScenarios(t *testing.T) { // Create a new database to run tests against. dbPath := filepath.Join(os.TempDir(), "ffldb-failurescenarios") _ = os.RemoveAll(dbPath) idb, err := database.Create(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Failed to create test database (%s) %v", dbType, err) return } defer os.RemoveAll(dbPath) defer idb.Close() // Create a test context to pass around. tc := &testContext{ t: t, db: idb, files: make(map[uint32]*lockableFile), maxFileSizes: make(map[uint32]int64), } // Change the maximum file size to a small value to force multiple flat // files with the test data set and replace the file-related functions // to make use of mock files in memory. This allows injection of // various file-related errors. store := idb.(*db).store store.maxBlockFileSize = 1024 // 1KiB store.openWriteFileFunc = func(fileNum uint32) (filer, error) { if file, ok := tc.files[fileNum]; ok { // "Reopen" the file. file.Lock() mock := file.file.(*mockFile) mock.Lock() mock.closed = false mock.Unlock() file.Unlock() return mock, nil } // Limit the max size of the mock file as specified in the test // context. maxSize := int64(-1) if maxFileSize, ok := tc.maxFileSizes[fileNum]; ok { maxSize = int64(maxFileSize) } file := &mockFile{maxSize: int64(maxSize)} tc.files[fileNum] = &lockableFile{file: file} return file, nil } store.openFileFunc = func(fileNum uint32) (*lockableFile, error) { // Force error when trying to open max file num. if fileNum == ^uint32(0) { return nil, makeDbErr(database.ErrDriverSpecific, "test", nil) } if file, ok := tc.files[fileNum]; ok { // "Reopen" the file. file.Lock() mock := file.file.(*mockFile) mock.Lock() mock.closed = false mock.Unlock() file.Unlock() return file, nil } file := &lockableFile{file: &mockFile{}} tc.files[fileNum] = file return file, nil } store.deleteFileFunc = func(fileNum uint32) error { if file, ok := tc.files[fileNum]; ok { file.Lock() file.file.Close() file.Unlock() delete(tc.files, fileNum) return nil } str := fmt.Sprintf("file %d does not exist", fileNum) return makeDbErr(database.ErrDriverSpecific, str, nil) } // Load the test blocks and save in the test context for use throughout // the tests. blocks, err := loadBlocks(t, blockDataFile, blockDataNet) if err != nil { t.Errorf("loadBlocks: Unexpected error: %v", err) return } tc.blocks = blocks // Test various failures paths when writing to the block files. if !testWriteFailures(tc) { return } // Test various file-related issues such as closed and missing files. if !testBlockFileErrors(tc) { return } // Test various corruption scenarios. testCorruption(tc) }
explode_data.jsonl/53813
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1104 }
[ 2830, 3393, 17507, 3326, 60494, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 264, 501, 4625, 311, 1598, 7032, 2348, 624, 20939, 1820, 1669, 26054, 22363, 9638, 65009, 6184, 1507, 330, 542, 56925, 2220, 604, 1413, 47322, 10100, 1138, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSplitFullName(t *testing.T) { resource.Require(t, resource.UnitTest) fullName := "john doe" firstName, lastName := account.SplitFullName(fullName) assert.Equal(t, "john", firstName) assert.Equal(t, "doe", lastName) fullName = "john doe mike brown" firstName, lastName = account.SplitFullName(fullName) assert.Equal(t, "john", firstName) assert.Equal(t, "doe mike brown", lastName) fullName = "john, doe mike brown" firstName, lastName = account.SplitFullName(fullName) assert.Equal(t, "john,", firstName) assert.Equal(t, "doe mike brown", lastName) fullName = "john" firstName, lastName = account.SplitFullName(fullName) assert.Equal(t, "john", firstName) assert.Equal(t, "", lastName) fullName = "" firstName, lastName = account.SplitFullName(fullName) assert.Equal(t, "", firstName) assert.Equal(t, "", lastName) }
explode_data.jsonl/47474
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 20193, 36217, 1155, 353, 8840, 836, 8, 341, 50346, 81288, 1155, 11, 5101, 25159, 2271, 340, 94042, 675, 1669, 330, 47817, 98453, 698, 42190, 675, 11, 20460, 1669, 2692, 19823, 36217, 28907, 675, 340, 6948, 12808, 1155, 11, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUpdateNewNodeStatus(t *testing.T) { // generate one more than maxImagesInNodeStatus in inputImageList inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) testKubelet := newTestKubeletWithImageList( t, inputImageList, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), }, capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), }, } kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 10E9, // 10G } mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) expectedNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeDiskPressure, Status: v1.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeReady, Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, }, NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, Addresses: []v1.NodeAddress{ {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, {Type: v1.NodeHostName, Address: testKubeletHostname}, }, Images: expectedImageList, }, } kubelet.updateRuntimeUp() assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) assert.NoError(t, err) for i, cond := range updatedNode.Status.Conditions { assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, "NotReady should be last") assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus) assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) }
explode_data.jsonl/69715
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2348 }
[ 2830, 3393, 4289, 3564, 1955, 2522, 1155, 353, 8840, 836, 8, 341, 197, 322, 6923, 825, 803, 1091, 1932, 14228, 641, 1955, 2522, 304, 1946, 1906, 852, 198, 22427, 1906, 852, 11, 3601, 1906, 852, 1669, 6923, 16451, 1906, 852, 8739, 1422...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNoData(t *testing.T) { db := openTestConn(t) defer db.Close() st, err := db.Prepare("SELECT 1 WHERE true = false") if err != nil { t.Fatal(err) } defer st.Close() r, err := st.Query() if err != nil { t.Fatal(err) } defer r.Close() if r.Next() { if r.Err() != nil { t.Fatal(r.Err()) } t.Fatal("unexpected row") } _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20) if err == nil { t.Fatal("Should have raised an error on non existent table") } _, err = db.Query("SELECT * FROM nonexistenttable") if err == nil { t.Fatal("Should have raised an error on non existent table") } }
explode_data.jsonl/63425
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 265 }
[ 2830, 3393, 2753, 1043, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 1787, 2271, 9701, 1155, 340, 16867, 2927, 10421, 2822, 18388, 11, 1848, 1669, 2927, 28770, 3380, 445, 4858, 220, 16, 5288, 830, 284, 895, 1138, 743, 1848, 961, 2092, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestLocateIP(t *testing.T) { tests := []struct { name string ipPools []*antreacrds.ExternalIPPool ipToCheck net.IP expectedIPPool string expectedError bool }{ { name: "check for known IP 1", ipPools: []*antreacrds.ExternalIPPool{ newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"), newExternalIPPool("eip2", "", "10.10.11.2", "10.10.11.3"), }, ipToCheck: net.ParseIP("10.10.10.2"), expectedIPPool: "eip1", expectedError: false, }, { name: "check for known IP 2", ipPools: []*antreacrds.ExternalIPPool{ newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"), newExternalIPPool("eip2", "", "10.10.11.2", "10.10.11.3"), }, ipToCheck: net.ParseIP("10.10.11.2"), expectedIPPool: "eip2", expectedError: false, }, { name: "check for unknown IP", ipPools: []*antreacrds.ExternalIPPool{ newExternalIPPool("eip1", "", "10.10.10.2", "10.10.10.3"), newExternalIPPool("eip2", "", "10.10.11.2", "10.10.11.3"), }, ipToCheck: net.ParseIP("10.10.13.1"), expectedIPPool: "", expectedError: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) var fakeCRDObjects []runtime.Object for _, p := range tt.ipPools { fakeCRDObjects = append(fakeCRDObjects, p) } controller := newController(fakeCRDObjects) controller.crdInformerFactory.Start(stopCh) controller.crdInformerFactory.WaitForCacheSync(stopCh) go controller.Run(stopCh) require.True(t, cache.WaitForCacheSync(stopCh, controller.HasSynced)) pool, err := controller.LocateIP(tt.ipToCheck) assert.Equal(t, tt.expectedIPPool, pool) assert.Equal(t, tt.expectedError, err != nil) }) } }
explode_data.jsonl/10265
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 853 }
[ 2830, 3393, 9152, 349, 3298, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 1843, 914, 198, 197, 46531, 47, 6178, 286, 29838, 517, 265, 64748, 5356, 5121, 15342, 3298, 10551, 198, 197, 46531, 1249, 3973, 414, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNavigation_EnterDoesNotAddSpaceAtStartOfBuffer(t *testing.T) { f, cleanup := setupNav() defer cleanup() f.TTYCtrl.Inject(term.K('N', ui.Ctrl)) // begin navigation mode f.TTYCtrl.Inject(term.K(ui.Enter)) // insert the "a" file name f.TestTTY(t, filepath.Join("~", "d"), "> ", "a", Styles, "!", term.DotHere, ) }
explode_data.jsonl/49813
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 16399, 91105, 21468, 2623, 2212, 9914, 1655, 3479, 2124, 4095, 1155, 353, 8840, 836, 8, 341, 1166, 11, 21290, 1669, 6505, 10096, 741, 16867, 21290, 2822, 1166, 836, 22098, 15001, 41046, 44654, 11352, 492, 45, 516, 7505, 727, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTasks_UpdateTask(t *testing.T) { task, err := tm.UpdateTask(1, "Go to USA") t.Log(task) if err != nil { t.Error("Unable to update task") } }
explode_data.jsonl/66640
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 67 }
[ 2830, 3393, 25449, 47393, 6262, 1155, 353, 8840, 836, 8, 341, 49115, 11, 1848, 1669, 17333, 16689, 6262, 7, 16, 11, 330, 10850, 311, 7279, 1138, 3244, 5247, 17483, 340, 743, 1848, 961, 2092, 341, 197, 3244, 6141, 445, 17075, 311, 2647...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestNewXegerWithSeed(t *testing.T) { pattern := "abc[a-ce-gy-z]{5}" xeg, err := NewXegerWithSeed(pattern, 123456) if err != nil { t.Errorf("Failed on valid pattern %s: %s\n", pattern, err.Error()) } if result := xeg.Generate(); result != "abczzyfz" { t.Errorf("Result with set seed doesn't meet prediction: %s is not abczzyfz", result) } }
explode_data.jsonl/1412
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 3564, 55, 1878, 2354, 41471, 1155, 353, 8840, 836, 8, 341, 3223, 3227, 1669, 330, 13683, 15481, 53212, 2371, 88, 9141, 15370, 20, 11195, 10225, 791, 11, 1848, 1669, 1532, 55, 1878, 2354, 41471, 30948, 11, 220, 16, 17, 18, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestViewPullUp(t *testing.T) { for _, tc := range []struct { desc string inputs []string offset int length int output string failed bool // lengths is the lengths of each buffer node after the pull up. lengths []int }{ { desc: "whole empty view", }, { desc: "zero pull", inputs: []string{"hello", " world"}, lengths: []int{5, 6}, }, { desc: "whole view", inputs: []string{"hello", " world"}, offset: 0, length: 11, output: "hello world", lengths: []int{11}, }, { desc: "middle to end aligned", inputs: []string{"0123", "45678", "9abcd"}, offset: 4, length: 10, output: "456789abcd", lengths: []int{4, 10}, }, { desc: "middle to end unaligned", inputs: []string{"0123", "45678", "9abcd"}, offset: 6, length: 8, output: "6789abcd", lengths: []int{4, 10}, }, { desc: "middle aligned", inputs: []string{"0123", "45678", "9abcd", "efgh"}, offset: 6, length: 5, output: "6789a", lengths: []int{4, 10, 4}, }, // Failed cases. { desc: "empty view - length too long", offset: 0, length: 1, failed: true, }, { desc: "empty view - offset too large", offset: 1, length: 1, failed: true, }, { desc: "length too long", inputs: []string{"0123", "45678", "9abcd"}, offset: 4, length: 100, failed: true, lengths: []int{4, 5, 5}, }, { desc: "offset too large", inputs: []string{"0123", "45678", "9abcd"}, offset: 100, length: 1, failed: true, lengths: []int{4, 5, 5}, }, } { t.Run(tc.desc, func(t *testing.T) { var v View for _, s := range tc.inputs { v.AppendOwned([]byte(s)) } got, gotOk := v.PullUp(tc.offset, tc.length) want, wantOk := []byte(tc.output), !tc.failed if gotOk != wantOk || !bytes.Equal(got, want) { t.Errorf("v.PullUp(%d, %d) = %q, %t; %q, %t", tc.offset, tc.length, got, gotOk, want, wantOk) } var gotLengths []int for buf := v.data.Front(); buf != nil; buf = buf.Next() { gotLengths = append(gotLengths, buf.ReadSize()) } if !reflect.DeepEqual(gotLengths, tc.lengths) { t.Errorf("lengths = %v; want %v", gotLengths, tc.lengths) } }) } }
explode_data.jsonl/52510
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1105 }
[ 2830, 3393, 851, 36068, 2324, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17130, 1669, 2088, 3056, 1235, 341, 197, 41653, 256, 914, 198, 197, 22427, 82, 3056, 917, 198, 197, 40668, 526, 198, 197, 49046, 526, 198, 197, 21170, 914, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestBuildChallengeTx(t *testing.T) { kp0 := newKeypair0() { // 1 minute timebound tx, err := BuildChallengeTx(kp0.Seed(), kp0.Address(), "testwebauth.stellar.org", "testanchor.stellar.org", network.TestNetworkPassphrase, time.Minute) assert.NoError(t, err) txeBase64, err := tx.Base64() assert.NoError(t, err) var txXDR xdr.TransactionEnvelope err = xdr.SafeUnmarshalBase64(txeBase64, &txXDR) assert.NoError(t, err) assert.Equal(t, int64(0), txXDR.SeqNum(), "sequence number should be 0") assert.Equal(t, uint32(200), txXDR.Fee(), "Fee should be 100") assert.Equal(t, 2, len(txXDR.Operations()), "number operations should be 2") timeDiff := txXDR.TimeBounds().MaxTime - txXDR.TimeBounds().MinTime assert.Equal(t, int64(60), int64(timeDiff), "time difference should be 300 seconds") op := txXDR.Operations()[0] assert.Equal(t, xdr.OperationTypeManageData, op.Body.Type, "operation type should be manage data") assert.Equal(t, xdr.String64("testanchor.stellar.org auth"), op.Body.ManageDataOp.DataName, "DataName should be 'testanchor.stellar.org auth'") assert.Equal(t, 64, len(*op.Body.ManageDataOp.DataValue), "DataValue should be 64 bytes") webAuthOp := txXDR.Operations()[1] assert.Equal(t, xdr.OperationTypeManageData, webAuthOp.Body.Type, "operation type should be manage data") assert.Equal(t, xdr.String64("web_auth_domain"), webAuthOp.Body.ManageDataOp.DataName, "DataName should be 'web_auth_domain'") assert.Equal(t, "testwebauth.stellar.org", string(*webAuthOp.Body.ManageDataOp.DataValue), "DataValue should be 'testwebauth.stellar.org'") } { // 5 minutes timebound tx, err := BuildChallengeTx(kp0.Seed(), kp0.Address(), "testwebauth.stellar.org", "testanchor.stellar.org", network.TestNetworkPassphrase, time.Duration(5*time.Minute)) assert.NoError(t, err) txeBase64, err := tx.Base64() assert.NoError(t, err) var txXDR1 xdr.TransactionEnvelope err = xdr.SafeUnmarshalBase64(txeBase64, &txXDR1) assert.NoError(t, err) assert.Equal(t, int64(0), txXDR1.SeqNum(), "sequence number should be 0") assert.Equal(t, uint32(200), txXDR1.Fee(), "Fee should be 100") assert.Equal(t, 2, len(txXDR1.Operations()), "number operations should be 2") timeDiff := txXDR1.TimeBounds().MaxTime - txXDR1.TimeBounds().MinTime assert.Equal(t, int64(300), int64(timeDiff), "time difference should be 300 seconds") op1 := txXDR1.Operations()[0] assert.Equal(t, xdr.OperationTypeManageData, op1.Body.Type, "operation type should be manage data") assert.Equal(t, xdr.String64("testanchor.stellar.org auth"), op1.Body.ManageDataOp.DataName, "DataName should be 'testanchor.stellar.org auth'") assert.Equal(t, 64, len(*op1.Body.ManageDataOp.DataValue), "DataValue should be 64 bytes") webAuthOp := txXDR1.Operations()[1] assert.Equal(t, xdr.OperationTypeManageData, webAuthOp.Body.Type, "operation type should be manage data") assert.Equal(t, xdr.String64("web_auth_domain"), webAuthOp.Body.ManageDataOp.DataName, "DataName should be 'web_auth_domain'") assert.Equal(t, "testwebauth.stellar.org", string(*webAuthOp.Body.ManageDataOp.DataValue), "DataValue should be 'testwebauth.stellar.org'") } //transaction with infinite timebound _, err := BuildChallengeTx(kp0.Seed(), kp0.Address(), "webauthdomain", "sdf", network.TestNetworkPassphrase, 0) if assert.Error(t, err) { assert.Contains(t, err.Error(), "provided timebound must be at least 1s (300s is recommended)") } }
explode_data.jsonl/20690
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1330 }
[ 2830, 3393, 11066, 62078, 31584, 1155, 353, 8840, 836, 8, 341, 16463, 79, 15, 1669, 501, 6608, 1082, 1310, 15, 2822, 197, 515, 197, 197, 322, 220, 16, 9383, 882, 10891, 198, 197, 46237, 11, 1848, 1669, 7854, 62078, 31584, 5969, 79, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestConvertToTrippingErrIfShould_ConvertToTrippingErrIfShould(t *testing.T) { defaultError := errors.New("default") overriddenError := errors.New("overridden") cases := map[string]struct { build func() ConvertToTrippingErrIfShould inputErr error expectedErr error }{ "default": { build: func() (x ConvertToTrippingErrIfShould) { return }, inputErr: defaultError, expectedErr: tripping.New(defaultError), }, "overridden": { build: func() ConvertToTrippingErrIfShould { return func(_ *http.Response, _ error) error { return overriddenError } }, inputErr: nil, expectedErr: overriddenError, }, } for caseName, dt := range cases { t.Run(caseName, func(t *testing.T) { g := NewWithT(t) actual := dt.build().ConvertToTrippingErrIfShould(&http.Response{}, dt.inputErr) g.Expect(actual).Should(Equal(dt.expectedErr)) }) } }
explode_data.jsonl/54073
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 378 }
[ 2830, 3393, 12012, 1249, 21884, 10732, 7747, 2679, 14996, 15100, 1621, 1249, 21884, 10732, 7747, 2679, 14996, 1155, 353, 8840, 836, 8, 341, 11940, 1454, 1669, 5975, 7121, 445, 2258, 1138, 197, 1975, 42185, 1454, 1669, 5975, 7121, 445, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStartNoop(t *testing.T) { var tickChan = make(chan struct{}, 2) cron := newWithSeconds() cron.AddFunc("* * * * * ?", func() { tickChan <- struct{}{} }) cron.Start() defer cron.Stop() // Wait for the first firing to ensure the runner is going <-tickChan cron.Start() <-tickChan // Fail if this job fires again in a short period, indicating a double-run select { case <-time.After(time.Millisecond): case <-tickChan: t.Error("expected job fires exactly twice") } }
explode_data.jsonl/8310
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 180 }
[ 2830, 3393, 3479, 2753, 453, 1155, 353, 8840, 836, 8, 341, 2405, 9341, 46019, 284, 1281, 35190, 2036, 22655, 220, 17, 692, 1444, 2248, 1669, 501, 2354, 15343, 741, 1444, 2248, 1904, 9626, 29592, 353, 353, 353, 353, 42313, 2915, 368, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPrivateActivityNoVisibleForOtherUser(t *testing.T) { defer prepareTestEnv(t)() testPrivateActivityDoSomethingForActionEntries(t) session := loginUser(t, privateActivityTestOtherUser) visible := testPrivateActivityHelperHasVisibleActivitiesFromSession(t, session) assert.True(t, visible, "user should have visible activities") }
explode_data.jsonl/51647
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 98 }
[ 2830, 3393, 16787, 4052, 2753, 5715, 2461, 11409, 1474, 1155, 353, 8840, 836, 8, 341, 16867, 10549, 2271, 14359, 1155, 8, 741, 18185, 16787, 4052, 5404, 23087, 2461, 2512, 24533, 1155, 692, 25054, 1669, 87169, 1155, 11, 869, 4052, 2271, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContainsFold(t *testing.T) { cs := []struct { w bool s string b string }{ {true, "ABCDEF", "abc"}, {false, "ABCDEF", "Z"}, } for i, c := range cs { a := ContainsFold(c.s, c.b) if a != c.w { t.Errorf("[%d] ContainsFold(%q, %q) = %v, want %v", i, c.s, c.b, a, c.w) } } }
explode_data.jsonl/80912
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 23805, 75536, 1155, 353, 8840, 836, 8, 341, 71899, 1669, 3056, 1235, 341, 197, 6692, 1807, 198, 197, 1903, 914, 198, 197, 2233, 914, 198, 197, 59403, 197, 197, 90, 1866, 11, 330, 25411, 13649, 497, 330, 13683, 7115, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRestartKillWait(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("kill", id) if err := job.Run(); err != nil { t.Fatal(err) } eng = newTestEngine(t, false, runtime.Config().Root) srv = mkServerFromEngine(eng, t) job = srv.Eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { job = srv.Eng.Job("wait", outs.Data[0].Get("Id")) if err := job.Run(); err != nil { t.Fatal(err) } }) }
explode_data.jsonl/1593
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 603 }
[ 2830, 3393, 59354, 53734, 14190, 1155, 353, 8840, 836, 8, 341, 197, 826, 1669, 1532, 2271, 4571, 1155, 340, 1903, 10553, 1669, 23789, 5475, 3830, 4571, 7, 826, 11, 259, 340, 7000, 4466, 1669, 23789, 89177, 3830, 4571, 7, 826, 11, 259,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMsgCreateValidator(t *testing.T) { commission1 := NewCommissionRates(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()) commission2 := NewCommissionRates(sdk.NewDec(5), sdk.NewDec(5), sdk.NewDec(5)) tests := []struct { name, moniker, identity, website, securityContact, details string CommissionRates CommissionRates minSelfDelegation sdk.Int validatorAddr sdk.ValAddress pubkey cryptotypes.PubKey bond sdk.Coin expectPass bool }{ {"basic good", "a", "b", "c", "d", "e", commission1, sdk.OneInt(), valAddr1, pk1, coinPos, true}, {"partial description", "", "", "c", "", "", commission1, sdk.OneInt(), valAddr1, pk1, coinPos, true}, {"empty description", "", "", "", "", "", commission2, sdk.OneInt(), valAddr1, pk1, coinPos, false}, {"empty address", "a", "b", "c", "d", "e", commission2, sdk.OneInt(), emptyAddr, pk1, coinPos, false}, {"empty pubkey", "a", "b", "c", "d", "e", commission1, sdk.OneInt(), valAddr1, emptyPubkey, coinPos, false}, {"empty bond", "a", "b", "c", "d", "e", commission2, sdk.OneInt(), valAddr1, pk1, coinZero, false}, {"nil bond", "a", "b", "c", "d", "e", commission2, sdk.OneInt(), valAddr1, pk1, sdk.Coin{}, false}, {"zero min self delegation", "a", "b", "c", "d", "e", commission1, sdk.ZeroInt(), valAddr1, pk1, coinPos, false}, {"negative min self delegation", "a", "b", "c", "d", "e", commission1, sdk.NewInt(-1), valAddr1, pk1, coinPos, false}, {"delegation less than min self delegation", "a", "b", "c", "d", "e", commission1, coinPos.Amount.Add(sdk.OneInt()), valAddr1, pk1, coinPos, false}, } for _, tc := range tests { description := NewDescription(tc.moniker, tc.identity, tc.website, tc.securityContact, tc.details) msg, err := NewMsgCreateValidator(tc.validatorAddr, tc.pubkey, tc.bond, description, tc.CommissionRates, tc.minSelfDelegation) require.NoError(t, err) if tc.expectPass { require.Nil(t, msg.ValidateBasic(), "test: %v", tc.name) } else { require.NotNil(t, msg.ValidateBasic(), "test: %v", tc.name) } } }
explode_data.jsonl/22795
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1093 }
[ 2830, 3393, 6611, 4021, 14256, 1155, 353, 8840, 836, 8, 341, 32810, 2728, 16, 1669, 1532, 73750, 82623, 1141, 7584, 35489, 4900, 1507, 45402, 35489, 4900, 1507, 45402, 35489, 4900, 2398, 32810, 2728, 17, 1669, 1532, 73750, 82623, 1141, 75...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.BeginAuth("test_state") s := session.(*paypal.Session) a.NoError(err) a.Contains(s.AuthURL, "paypal.com/webapps/auth/protocol/openidconnect/v1/authorize") }
explode_data.jsonl/33911
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 110 }
[ 2830, 3393, 93447, 5087, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 11323, 1669, 2060, 7121, 1155, 340, 3223, 1669, 9109, 741, 25054, 11, 1848, 1669, 281, 28467, 5087, 445, 1944, 4387, 1138, 1903, 1669, 3797, 41399, 71205, 20...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetPing(t *testing.T) { th := Setup().InitBasic().InitSystemAdmin() defer th.TearDown() Client := th.Client goRoutineHealthThreshold := *th.App.Config().ServiceSettings.GoroutineHealthThreshold defer func() { th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.GoroutineHealthThreshold = goRoutineHealthThreshold }) }() status, resp := Client.GetPing() CheckNoError(t, resp) if status != "OK" { t.Fatal("should return OK") } th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.GoroutineHealthThreshold = 10 }) status, resp = th.SystemAdminClient.GetPing() CheckInternalErrorStatus(t, resp) if status != "unhealthy" { t.Fatal("should return unhealthy") } }
explode_data.jsonl/10676
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 248 }
[ 2830, 3393, 1949, 69883, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1005, 3803, 15944, 1005, 3803, 2320, 7210, 741, 16867, 270, 836, 682, 4454, 741, 71724, 1669, 270, 11716, 271, 30680, 73018, 14542, 37841, 1669, 353, 339, 5105, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBasicGetNotProvided(t *testing.T) { client := newBasicClient() result, err := client.GetNotProvided(context.Background(), nil) if err != nil { t.Fatalf("GetNotProvided: %v", err) } if r := cmp.Diff(result.Basic, Basic{}); r != "" { t.Fatal(r) } }
explode_data.jsonl/15342
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 15944, 1949, 2623, 35819, 291, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 501, 15944, 2959, 741, 9559, 11, 1848, 1669, 2943, 2234, 2623, 35819, 291, 5378, 19047, 1507, 2092, 340, 743, 1848, 961, 2092, 341, 197, 3244, 30762, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestScrapeLoopAppend(t *testing.T) { tests := []struct { title string honorLabels bool scrapeLabels string discoveryLabels []string expLset labels.Labels expValue float64 }{ { // When "honor_labels" is not set // label name collision is handler by adding a prefix. title: "Label name collision", honorLabels: false, scrapeLabels: `metric{n="1"} 0`, discoveryLabels: []string{"n", "2"}, expLset: labels.FromStrings("__name__", "metric", "exported_n", "1", "n", "2"), expValue: 0, }, { // Labels with no value need to be removed as these should not be ingested. title: "Delete Empty labels", honorLabels: false, scrapeLabels: `metric{n=""} 0`, discoveryLabels: nil, expLset: labels.FromStrings("__name__", "metric"), expValue: 0, }, { // Honor Labels should ignore labels with the same name. title: "Honor Labels", honorLabels: true, scrapeLabels: `metric{n1="1" n2="2"} 0`, discoveryLabels: []string{"n1", "0"}, expLset: labels.FromStrings("__name__", "metric", "n1", "1", "n2", "2"), expValue: 0, }, { title: "Stale - NaN", honorLabels: false, scrapeLabels: `metric NaN`, discoveryLabels: nil, expLset: labels.FromStrings("__name__", "metric"), expValue: float64(value.NormalNaN), }, } for _, test := range tests { app := &collectResultAppender{} discoveryLabels := &Target{ labels: labels.FromStrings(test.discoveryLabels...), } sl := newScrapeLoop(context.Background(), nil, nil, nil, func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) }, func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, discoveryLabels) }, func() storage.Appender { return app }, nil, 0, true, ) now := time.Now() _, _, _, err := sl.append([]byte(test.scrapeLabels), "", now) if err != nil { t.Fatalf("Unexpected append error: %s", err) } expected := []sample{ { metric: test.expLset, t: timestamp.FromTime(now), v: test.expValue, }, } // When the expected value is NaN // DeepEqual will report NaNs as being different, // so replace it with the expected one. if test.expValue == float64(value.NormalNaN) { app.result[0].v = expected[0].v } t.Logf("Test:%s", test.title) testutil.Equals(t, expected, app.result) } }
explode_data.jsonl/56127
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1177 }
[ 2830, 3393, 3326, 19842, 14620, 23877, 1155, 353, 8840, 836, 8, 1476, 78216, 1669, 3056, 1235, 341, 197, 24751, 1843, 914, 198, 197, 9598, 54037, 23674, 257, 1807, 198, 197, 29928, 19842, 23674, 262, 914, 198, 197, 34597, 7449, 23674, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConnectClusterUnreachableNodes(t *testing.T) { nodeList := createMockNodes(allocatableMap, schedulableNodeMap) ms := &MockClusterScrapper{ mockGetAllNodes: func() ([]*v1.Node, error) { return nodeList, nil }, mockGetKubernetesServiceID: func() (svcID string, err error) { return testClusterName, nil }, } ns := &MockNodeScrapper{ mockGetSummary: func(ip, nodeName string) (*stats.Summary, error) { return nil, fmt.Errorf("%s node is unreachable", ip) }, } clusterProcessor := &ClusterProcessor{ clusterInfoScraper: ms, nodeScrapper: ns, } err := clusterProcessor.ConnectCluster() assert.False(t, clusterProcessor.isValidated) assert.NotNil(t, err) }
explode_data.jsonl/62018
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 280 }
[ 2830, 3393, 14611, 28678, 1806, 46550, 12288, 1155, 353, 8840, 836, 8, 341, 20831, 852, 1669, 1855, 11571, 12288, 7, 4742, 15086, 2227, 11, 5575, 360, 480, 1955, 2227, 340, 47691, 1669, 609, 11571, 28678, 3326, 5518, 515, 197, 77333, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMemberRegistration_NoLeadership(t *testing.T) { // set up serverAddress, testServer, cleanup := newTestPlacementServer(testRaftServer) testServer.hasLeadership = false // arrange conn, stream, err := newTestClient(serverAddress) assert.NoError(t, err) host := &v1pb.Host{ Name: "127.0.0.1:50102", Entities: []string{"DogActor", "CatActor"}, Id: "testAppID", Load: 1, // Not used yet // Port is redundant because Name should include port number } // act stream.Send(host) _, err = stream.Recv() s, ok := status.FromError(err) // assert assert.True(t, ok) assert.Equal(t, codes.FailedPrecondition, s.Code()) stream.CloseSend() // tear down conn.Close() cleanup() }
explode_data.jsonl/42593
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 272 }
[ 2830, 3393, 9366, 23365, 36989, 92724, 2151, 1155, 353, 8840, 836, 8, 341, 197, 322, 738, 705, 198, 41057, 4286, 11, 1273, 5475, 11, 21290, 1669, 501, 2271, 28237, 5475, 8623, 55535, 723, 5475, 340, 18185, 5475, 6858, 92724, 2151, 284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWhenGitlabIsSelectedGitlabRepoIsCreated(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockPrompt := NewMockPrompt(ctrl) mockPrompt.EXPECT().forGitRepository().Return(constants.Gitlab, nil) factory := NewFactory(mockPrompt) repo, _ := factory.Create(projectName) if !isGitlabType(repo) { t.Log("Expected gitlab type returned") t.Fail() return } }
explode_data.jsonl/1736
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 160 }
[ 2830, 3393, 4498, 46562, 14380, 3872, 6316, 46562, 14380, 25243, 3872, 11694, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 77333, 54615, 1669, 1532, 11571, 54615, 62100, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestReturnUndefined(t *testing.T) { const SCRIPT = ` function f() { return x; } var thrown = false; try { f(); } catch (e) { thrown = e instanceof ReferenceError; } thrown; ` testScript1(SCRIPT, valueTrue, t) }
explode_data.jsonl/75309
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 102 }
[ 2830, 3393, 5598, 30571, 1155, 353, 8840, 836, 8, 341, 4777, 53679, 284, 22074, 7527, 282, 368, 341, 7782, 853, 856, 280, 197, 630, 2405, 14989, 284, 895, 280, 6799, 341, 197, 1166, 543, 197, 92, 2287, 320, 68, 8, 341, 197, 70479, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_GetAndSetFileName(t *testing.T) { v := NewFileConfig(testFilename, new(fakeCoder)) v.SetFileName(testFilename) if v.FileName() != testFilename { t.Errorf("FileName test failed, get %s != set %s", v.FileName(), testFilename) } }
explode_data.jsonl/69619
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 90 }
[ 2830, 3393, 13614, 3036, 1649, 10903, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 1532, 1703, 2648, 8623, 20759, 11, 501, 74138, 35290, 1171, 5195, 4202, 10903, 8623, 20759, 340, 743, 348, 35231, 368, 961, 1273, 20759, 341, 197, 3244, 130...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestXClient(t *testing.T) { var mainlog log.Logger var logOpenError error defer cleanTestArtifacts(t) sc := getMockServerConfig() sc.XClientOn = true mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug") if logOpenError != nil { mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface) } conn, server := getMockServerConn(sc, t) // call the serve.handleClient() func in a goroutine. client := NewClient(conn.Server, 1, mainlog, mail.NewPool(5)) var wg sync.WaitGroup wg.Add(1) go func() { server.handleClient(client) wg.Done() }() // Wait for the greeting from the server r := textproto.NewReader(bufio.NewReader(conn.Client)) line, _ := r.ReadLine() // fmt.Println(line) w := textproto.NewWriter(bufio.NewWriter(conn.Client)) if err := w.PrintfLine("HELO test.test.com"); err != nil { t.Error(err) } line, _ = r.ReadLine() //fmt.Println(line) if err := w.PrintfLine("XCLIENT ADDR=212.96.64.216 NAME=[UNAVAILABLE]"); err != nil { t.Error(err) } line, _ = r.ReadLine() if client.RemoteIP != "212.96.64.216" { t.Error("client.RemoteIP should be 212.96.64.216, but got:", client.RemoteIP) } expected := "250 2.1.0 OK" if strings.Index(line, expected) != 0 { t.Error("expected", expected, "but got:", line) } // try malformed input if err := w.PrintfLine("XCLIENT c"); err != nil { t.Error(err) } line, _ = r.ReadLine() expected = "250 2.1.0 OK" if strings.Index(line, expected) != 0 { t.Error("expected", expected, "but got:", line) } if err := w.PrintfLine("QUIT"); err != nil { t.Error(err) } line, _ = r.ReadLine() wg.Wait() // wait for handleClient to exit }
explode_data.jsonl/17641
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 675 }
[ 2830, 3393, 55, 2959, 1155, 353, 8840, 836, 8, 341, 2405, 1887, 839, 1487, 12750, 198, 2405, 1487, 5002, 1454, 1465, 198, 16867, 4240, 2271, 9286, 26401, 1155, 340, 29928, 1669, 633, 11571, 5475, 2648, 741, 29928, 4338, 2959, 1925, 284,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGRPCBackend(t *testing.T) { lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) grpcAddr := lis.Addr().String() var grpcConfigYAML = fmt.Sprintf(` listenAddress: 127.0.0.1:0 logging: level: info metrics: scope: prefix: "coordinator" prometheus: handlerPath: /metrics listenAddress: "127.0.0.1:0" onError: stderr sanitization: prometheus samplingRate: 1.0 rpc: remoteListenAddresses: ["%s"] backend: grpc tagOptions: metricName: "bar" idScheme: prepend_meta readWorkerPoolPolicy: grow: true size: 100 shards: 1000 killProbability: 0.3 writeWorkerPoolPolicy: grow: true size: 100 shards: 1000 killProbability: 0.3 `, grpcAddr) ctrl := gomock.NewController(xtest.Reporter{T: t}) defer ctrl.Finish() s := grpc.NewServer() defer s.GracefulStop() qs := newQueryServer() rpc.RegisterQueryServer(s, qs) go func() { s.Serve(lis) }() configFile, close := newTestFile(t, "config_backend.yaml", grpcConfigYAML) defer close() var cfg config.Configuration err = xconfig.LoadFile(&cfg, configFile.Name(), xconfig.Options{}) require.NoError(t, err) // No clusters require.Equal(t, 0, len(cfg.Clusters)) require.Equal(t, config.GRPCStorageType, cfg.Backend) interruptCh := make(chan error) doneCh := make(chan struct{}) listenerCh := make(chan net.Listener, 1) go func() { Run(RunOptions{ Config: cfg, InterruptCh: interruptCh, ListenerCh: listenerCh, }) doneCh <- struct{}{} }() // Wait for listener listener := <-listenerCh addr := listener.Addr().String() // Wait for server to come up waitForServerHealthy(t, addr) // Send Prometheus read request promReq := test.GeneratePromReadRequest() promReqBody := test.GeneratePromReadRequestBody(t, promReq) req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s%s", addr, remote.PromReadURL), promReqBody) require.NoError(t, err) _, err = http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, qs.reads, 1) // Ensure close server performs as expected interruptCh <- fmt.Errorf("interrupt") <-doneCh }
explode_data.jsonl/22543
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 840 }
[ 2830, 3393, 8626, 4872, 29699, 1155, 353, 8840, 836, 8, 341, 8810, 285, 11, 1848, 1669, 4179, 68334, 445, 27161, 497, 330, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, 15, 1138, 17957, 35699, 1155, 11, 1848, 340, 197, 56585, 13986, 166...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValueToICECandidate(t *testing.T) { testCases := []struct { jsonCandidate string expect ICECandidate }{ { // Firefox-style ICECandidateInit: `{"candidate":"1966762133 1 udp 2122260222 192.168.20.128 47298 typ srflx raddr 203.0.113.1 rport 5000"}`, ICECandidate{ Foundation: "1966762133", Priority: 2122260222, Address: "192.168.20.128", Protocol: ICEProtocolUDP, Port: 47298, Typ: ICECandidateTypeSrflx, Component: 1, RelatedAddress: "203.0.113.1", RelatedPort: 5000, }, }, { // Chrome/Webkit-style ICECandidate: `{"foundation":"1966762134", "component":"rtp", "protocol":"udp", "priority":2122260223, "address":"192.168.20.129", "port":47299, "type":"host", "relatedAddress":null}`, ICECandidate{ Foundation: "1966762134", Priority: 2122260223, Address: "192.168.20.129", Protocol: ICEProtocolUDP, Port: 47299, Typ: ICECandidateTypeHost, Component: 1, RelatedAddress: "<null>", RelatedPort: 0, }, }, { // Both are present, Chrome/Webkit-style takes precedent: `{"candidate":"1966762133 1 udp 2122260222 192.168.20.128 47298 typ srflx raddr 203.0.113.1 rport 5000", "foundation":"1966762134", "component":"rtp", "protocol":"udp", "priority":2122260223, "address":"192.168.20.129", "port":47299, "type":"host", "relatedAddress":null}`, ICECandidate{ Foundation: "1966762134", Priority: 2122260223, Address: "192.168.20.129", Protocol: ICEProtocolUDP, Port: 47299, Typ: ICECandidateTypeHost, Component: 1, RelatedAddress: "<null>", RelatedPort: 0, }, }, } for i, testCase := range testCases { v := map[string]interface{}{} err := json.Unmarshal([]byte(testCase.jsonCandidate), &v) if err != nil { t.Errorf("Case %d: bad test, got error: %v", i, err) } val := *valueToICECandidate(js.ValueOf(v)) val.statsID = "" assert.Equal(t, testCase.expect, val) } }
explode_data.jsonl/61187
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1010 }
[ 2830, 3393, 1130, 1249, 5487, 63901, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 30847, 63901, 914, 198, 197, 24952, 286, 40563, 63901, 198, 197, 59403, 197, 197, 515, 298, 197, 322, 25929, 11297, 40563, 6390...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReversiAnz37(t *testing.T) { r := NewReversiAnz() r.SetOwnEdgeSideOtherCnt(1) if r.GetOwnEdgeSideOtherCnt() != 1 { t.Errorf("NG") } }
explode_data.jsonl/23060
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 72 }
[ 2830, 3393, 693, 3004, 72, 2082, 89, 18, 22, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 693, 3004, 72, 2082, 89, 741, 7000, 4202, 14182, 11656, 16384, 11409, 33747, 7, 16, 340, 743, 435, 2234, 14182, 11656, 16384, 11409, 33747, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestRollbackNoTransaction(t *testing.T) { t.Parallel() db := &DB{} if err := db.Rollback(context.Background()); err.Error() != "context has no transaction" { t.Errorf("unexpected error value: %v", err) } }
explode_data.jsonl/25446
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 78 }
[ 2830, 3393, 32355, 1419, 2753, 8070, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 20939, 1669, 609, 3506, 16094, 743, 1848, 1669, 2927, 88918, 5378, 19047, 13426, 1848, 6141, 368, 961, 330, 2147, 702, 902, 7745, 1, 341, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestKeyValueDecoder_decodeerror(t *testing.T) { verifyFails := func(kvLine string, expectedErr error) func(t *testing.T) { return func(t *testing.T) { d := keyValueDecoder{} _, err := d.decode(kvLine) require.Error(t, err) require.Equal(t, expectedErr, err) } } t.Run("case=startspace", verifyFails(" akey: bob", errInvalidKeyValueLowercase)) t.Run("case=empty", verifyFails("", errInvalidKeyNoColon)) t.Run("case=upperstart", verifyFails("Akey: bob", errInvalidKeyValueLowercase)) t.Run("case=emptykey", verifyFails(": bob", errInvalidKeyValueEmpty)) t.Run("case=keywithspaces", verifyFails("a key: bob", errInvalidKeyValueSpaces)) t.Run("case=keywithtab", verifyFails("a\tkey: bob", errInvalidKeyValueSpaces)) t.Run("case=keywithnewline", verifyFails("a\nkey: bob", errInvalidKeyValueSpaces)) t.Run("case=valuewithnewline", verifyFails("akey: bo\nb", errInvalidKeyValueReturn)) }
explode_data.jsonl/39869
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 348 }
[ 2830, 3393, 72082, 20732, 15227, 841, 1155, 353, 8840, 836, 8, 341, 93587, 37, 6209, 1669, 2915, 90265, 2460, 914, 11, 3601, 7747, 1465, 8, 2915, 1155, 353, 8840, 836, 8, 341, 197, 853, 2915, 1155, 353, 8840, 836, 8, 341, 298, 2698,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReadZero(t *testing.T) { for _, size := range []int{100, 2} { t.Run(fmt.Sprintf("bufsize=%d", size), func(t *testing.T) { r := io.MultiReader(strings.NewReader("abc"), &emptyThenNonEmptyReader{r: strings.NewReader("def"), n: 1}) br := NewReaderSize(r, size) want := func(s string, wantErr error) { p := make([]byte, 50) n, err := br.Read(p) if err != wantErr || n != len(s) || string(p[:n]) != s { t.Fatalf("read(%d) = %q, %v, want %q, %v", len(p), string(p[:n]), err, s, wantErr) } t.Logf("read(%d) = %q, %v", len(p), string(p[:n]), err) } want("abc", nil) want("", nil) want("def", nil) want("", io.EOF) }) } }
explode_data.jsonl/22884
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 330 }
[ 2830, 3393, 4418, 17999, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 1379, 1669, 2088, 3056, 396, 90, 16, 15, 15, 11, 220, 17, 92, 341, 197, 3244, 16708, 28197, 17305, 445, 5909, 2141, 7846, 67, 497, 1379, 701, 2915, 1155, 353, 8840, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestIncludes(t *testing.T) { items := []int{23, 24, 2, 5, 10} interfaceItems := make([]interface{}, len(items)) for i, v := range items { interfaceItems[i] = v } a := New(interfaceItems) included := a.Includes(2) if !included { t.Log("Array should return true for value in array") t.Log("Expected", true, "\n Got", included) t.Fail() } included2 := a.Includes(30) if included2 { t.Log("Array should return false for value not in array") t.Log("Expected", false, "\n Got", included2) t.Fail() } }
explode_data.jsonl/47087
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 204 }
[ 2830, 3393, 55834, 1155, 353, 8840, 836, 8, 341, 46413, 1669, 3056, 396, 90, 17, 18, 11, 220, 17, 19, 11, 220, 17, 11, 220, 20, 11, 220, 16, 15, 532, 58915, 1564, 4353, 1669, 1281, 10556, 4970, 22655, 2422, 24337, 4390, 2023, 600,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestListDocumentsCommand(t *testing.T) { ssmMock := &manager.MockSSM{ Error: false, NextToken: "", CommandStatus: "Success", CommandHistory: map[string]*struct { Command *ssm.Command Status string }{}, Documents: ssmDocumentIdentifiers, } m := manager.NewTestManager(ssmMock, nil, nil) t.Run("List documents works", func(t *testing.T) { expected := outputDocumentIdentifiers actual, err := m.ListDocuments(50, nil) assert.Nil(t, err) assert.NotNil(t, actual) assert.Equal(t, expected, actual) }) t.Run("Limit number of documents works", func(t *testing.T) { expected := outputDocumentIdentifiers[:1] actual, err := m.ListDocuments(1, nil) assert.Nil(t, err) assert.NotNil(t, actual) assert.Equal(t, expected, actual) }) t.Run("Pagination works", func(t *testing.T) { ssmMock.NextToken = "next" defer func() { ssmMock.NextToken = "" }() expected := outputDocumentIdentifiers actual, err := m.ListDocuments(50, nil) assert.Nil(t, err) assert.NotNil(t, actual) assert.Equal(t, expected, actual) }) t.Run("Filter works", func(t *testing.T) { expected := outputDocumentIdentifiers[:1] actual, err := m.ListDocuments(50, []*ssm.DocumentFilter{ { Key: aws.String("Owner"), Value: aws.String("Amazon"), }, }) assert.Nil(t, err) assert.NotNil(t, actual) assert.Equal(t, expected, actual) }) t.Run("Errors are propagated", func(t *testing.T) { ssmMock.Error = true defer func() { ssmMock.Error = false }() actual, err := m.ListDocuments(50, nil) assert.NotNil(t, err) assert.EqualError(t, err, "failed to list document: expected") assert.Nil(t, actual) }) }
explode_data.jsonl/27143
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 700 }
[ 2830, 3393, 852, 27143, 4062, 1155, 353, 8840, 836, 8, 341, 34472, 76, 11571, 1669, 609, 13297, 24664, 1220, 44, 515, 197, 58421, 25, 260, 895, 345, 197, 197, 5847, 3323, 25, 257, 8324, 197, 97493, 2522, 25, 330, 7188, 756, 197, 974...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInvalidateCaches(t *testing.T) { th := Setup().InitBasic().InitSystemAdmin() defer th.TearDown() Client := th.Client flag, resp := Client.InvalidateCaches() CheckForbiddenStatus(t, resp) if flag { t.Fatal("should not clean the cache due no permission.") } flag, resp = th.SystemAdminClient.InvalidateCaches() CheckNoError(t, resp) if !flag { t.Fatal("should clean the cache") } }
explode_data.jsonl/10686
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 641, 7067, 34, 14242, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1005, 3803, 15944, 1005, 3803, 2320, 7210, 741, 16867, 270, 836, 682, 4454, 741, 71724, 1669, 270, 11716, 271, 30589, 11, 9039, 1669, 8423, 5337, 7067, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestExifEditor_SetUserComment(t *testing.T) { je := getJpegEditor(LeicaImg, t) expUserComment := "A new User Comment" if err := je.Exif().SetUserComment(expUserComment); err != nil { t.Fatalf("Could not set User Comment: %v", err) } md := jpegEditorMD(je, t) ret := md.exifData.GetIfdUserComment() if ret != expUserComment { t.Fatalf("Expected %s got %s", expUserComment, ret) } }
explode_data.jsonl/79886
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 155 }
[ 2830, 3393, 840, 333, 9410, 14812, 1474, 10677, 1155, 353, 8840, 836, 8, 341, 197, 3756, 1669, 633, 41, 10311, 9410, 7, 2304, 3001, 13033, 11, 259, 340, 48558, 1474, 10677, 1669, 330, 32, 501, 2657, 12255, 698, 743, 1848, 1669, 4759, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetInsecureRegistries(t *testing.T) { registryPath, err := createTmpFile([]byte(registry)) assert.NoError(t, err) os.Setenv("REGISTRIES_CONFIG_PATH", registryPath) defer os.Remove(registryPath) registries, err := GetInsecureRegistries() assert.NoError(t, err) assert.True(t, reflect.DeepEqual(registries, []string{"two"})) }
explode_data.jsonl/35604
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 126 }
[ 2830, 3393, 1949, 641, 25132, 3477, 380, 4019, 1155, 353, 8840, 836, 8, 341, 197, 29172, 1820, 11, 1848, 1669, 1855, 35986, 1703, 10556, 3782, 18390, 4944, 1171, 6948, 35699, 1155, 11, 1848, 340, 25078, 4202, 3160, 445, 12173, 3846, 655...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMapCopy(t *testing.T) { t.Parallel() f := func(gcTypes []string, memTypes map[string]string) bool { m := &datatype.MapConvert{ GCTypes: gcTypes, MemoryTypes: memTypes, } c := m.Copy() cc, ok := c.(*datatype.MapConvert) if !ok { t.Errorf("c.(*datatype.MapConvert) = (%T); want (datatype.MapConvert)", c) return false } if m == c { t.Error("m.Copy(): wasn't copied") return false } if !reflect.DeepEqual(cc, m) { t.Errorf("reflect.DeepEqual(cc, m): c = (%v); want (%v)", c, m) return false } return true } if err := quick.Check(f, nil); err != nil { t.Error(err) } }
explode_data.jsonl/57179
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 295 }
[ 2830, 3393, 2227, 12106, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 1166, 1669, 2915, 78657, 4173, 3056, 917, 11, 1833, 4173, 2415, 14032, 30953, 8, 1807, 341, 197, 2109, 1669, 609, 62409, 10104, 12012, 515, 298, 9600, 1162, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestValidRemoteDriver(t *testing.T) { if !testutils.IsRunningInContainer() { t.Skip("Skipping test when not running inside a Container") } mux := http.NewServeMux() server := httptest.NewServer(mux) if server == nil { t.Fatal("Failed to start a HTTP Server") } defer server.Close() type pluginRequest struct { name string } mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, `{"Implements": ["%s"]}`, driverapi.NetworkPluginEndpointType) }) mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, "null") }) if err := os.MkdirAll("/etc/docker/plugins", 0755); err != nil { t.Fatal(err) } defer func() { if err := os.RemoveAll("/etc/docker/plugins"); err != nil { t.Fatal(err) } }() if err := ioutil.WriteFile("/etc/docker/plugins/valid-network-driver.spec", []byte(server.URL), 0644); err != nil { t.Fatal(err) } n, err := controller.NewNetwork("valid-network-driver", "dummy", libnetwork.NetworkOptionGeneric(getEmptyGenericOption())) if err != nil { // Only fail if we could not find the plugin driver if _, ok := err.(types.NotFoundError); ok { t.Fatal(err) } return } defer func() { if err := n.Delete(); err != nil { t.Fatal(err) } }() }
explode_data.jsonl/6375
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 588 }
[ 2830, 3393, 4088, 24703, 11349, 1155, 353, 8840, 836, 8, 341, 743, 753, 1944, 6031, 4506, 18990, 641, 4502, 368, 341, 197, 3244, 57776, 445, 85945, 1273, 979, 537, 4303, 4766, 264, 9678, 1138, 197, 630, 2109, 2200, 1669, 1758, 7121, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIssue20622(t *testing.T) { db := newTestDB(t, "people") defer closeDB(t, db) ctx, cancel := context.WithCancel(context.Background()) defer cancel() tx, err := db.BeginTx(ctx, nil) if err != nil { t.Fatal(err) } rows, err := tx.Query("SELECT|people|age,name|") if err != nil { t.Fatal(err) } count := 0 for rows.Next() { count++ var age int var name string if err := rows.Scan(&age, &name); err != nil { t.Fatal("scan failed", err) } if count == 1 { cancel() } time.Sleep(100 * time.Millisecond) } rows.Close() tx.Commit() }
explode_data.jsonl/16019
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 252 }
[ 2830, 3393, 42006, 17, 15, 21, 17, 17, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 501, 2271, 3506, 1155, 11, 330, 16069, 1138, 16867, 3265, 3506, 1155, 11, 2927, 692, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 16867,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestMin(t *testing.T) { type scenario struct { a int b int expected int } scenarios := []scenario{ { 1, 1, 1, }, { 1, 2, 1, }, { 2, 1, 1, }, } for _, s := range scenarios { assert.EqualValues(t, s.expected, Min(s.a, s.b)) } }
explode_data.jsonl/11580
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 180 }
[ 2830, 3393, 6217, 1155, 353, 8840, 836, 8, 341, 13158, 15048, 2036, 341, 197, 11323, 286, 526, 198, 197, 2233, 286, 526, 198, 197, 42400, 526, 198, 197, 630, 29928, 60494, 1669, 3056, 61422, 515, 197, 197, 515, 298, 197, 16, 345, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUpdateUserGroup(t *testing.T) { if createdUserGroupID == 0 { fmt.Println("User group doesn't created, skip to test!") return } type args struct { id int groupName string } tests := []struct { name string args args wantErr bool }{ {"Update user group", args{id: createdUserGroupID, groupName: "updated_groupname"}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fmt.Printf("id=%v\n", createdUserGroupID) if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr { t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr) userGroup, err := GetUserGroup(tt.args.id) if err != nil { t.Errorf("Error occurred when GetUserGroup: %v", err) } if userGroup == nil { t.Fatalf("Failed to get updated user group") } if userGroup.GroupName != tt.args.groupName { t.Fatalf("Failed to update user group") } } }) } }
explode_data.jsonl/73817
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 420 }
[ 2830, 3393, 4289, 1474, 2808, 1155, 353, 8840, 836, 8, 341, 743, 3465, 1474, 2808, 915, 621, 220, 15, 341, 197, 11009, 12419, 445, 1474, 1874, 3171, 944, 3465, 11, 10706, 311, 1273, 22988, 197, 853, 198, 197, 532, 13158, 2827, 2036, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestPasswordRecovery(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() token, err := th.App.CreatePasswordRecoveryToken(th.BasicUser.Id, th.BasicUser.Email) assert.Nil(t, err) tokenData := struct { UserId string Email string }{} err2 := json.Unmarshal([]byte(token.Extra), &tokenData) assert.Nil(t, err2) assert.Equal(t, th.BasicUser.Id, tokenData.UserId) assert.Equal(t, th.BasicUser.Email, tokenData.Email) // Password token with same eMail as during creation err = th.App.ResetPasswordFromToken(token.Token, "abcdefgh") assert.Nil(t, err) // Password token with modified eMail after creation token, err = th.App.CreatePasswordRecoveryToken(th.BasicUser.Id, th.BasicUser.Email) assert.Nil(t, err) th.App.UpdateConfig(func(c *model.Config) { *c.EmailSettings.RequireEmailVerification = false }) th.BasicUser.Email = th.MakeEmail() _, err = th.App.UpdateUser(th.BasicUser, false) assert.Nil(t, err) err = th.App.ResetPasswordFromToken(token.Token, "abcdefgh") assert.NotNil(t, err) }
explode_data.jsonl/31421
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 393 }
[ 2830, 3393, 4876, 693, 7449, 1155, 353, 8840, 836, 8, 341, 70479, 1669, 18626, 1155, 568, 3803, 15944, 741, 16867, 270, 836, 682, 4454, 2822, 43947, 11, 1848, 1669, 270, 5105, 7251, 4876, 693, 7449, 3323, 24365, 48868, 1474, 6444, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIamUserSupplier_Resources(t *testing.T) { cases := []struct { test string dirName string mocks func(client *mocks.FakeIAM) err error }{ { test: "no iam user", dirName: "iam_user_empty", mocks: func(client *mocks.FakeIAM) { client.On("ListUsersPages", mock.Anything, mock.Anything).Return(nil) }, err: nil, }, { test: "iam multiples users", dirName: "iam_user_multiple", mocks: func(client *mocks.FakeIAM) { client.On("ListUsersPages", &iam.ListUsersInput{}, mock.MatchedBy(func(callback func(res *iam.ListUsersOutput, lastPage bool) bool) bool { callback(&iam.ListUsersOutput{Users: []*iam.User{ { UserName: aws.String("test-driftctl-0"), }, { UserName: aws.String("test-driftctl-1"), }, }}, false) callback(&iam.ListUsersOutput{Users: []*iam.User{ { UserName: aws.String("test-driftctl-2"), }, }}, true) return true })).Return(nil) }, err: nil, }, { test: "cannot list iam user", dirName: "iam_user_empty", mocks: func(client *mocks.FakeIAM) { client.On("ListUsersPages", mock.Anything, mock.Anything).Return(awserr.NewRequestFailure(nil, 403, "")) }, err: remoteerror.NewResourceEnumerationError(awserr.NewRequestFailure(nil, 403, ""), resourceaws.AwsIamUserResourceType), }, } for _, c := range cases { shouldUpdate := c.dirName == *goldenfile.Update providerLibrary := terraform.NewProviderLibrary() supplierLibrary := resource.NewSupplierLibrary() if shouldUpdate { provider, err := InitTestAwsProvider(providerLibrary) if err != nil { t.Fatal(err) } supplierLibrary.AddSupplier(NewIamUserSupplier(provider)) } t.Run(c.test, func(tt *testing.T) { fakeIam := mocks.FakeIAM{} c.mocks(&fakeIam) provider := mocks2.NewMockedGoldenTFProvider(c.dirName, providerLibrary.Provider(terraform.AWS), shouldUpdate) deserializer := awsdeserializer.NewIamUserDeserializer() s := &IamUserSupplier{ provider, deserializer, &fakeIam, terraform.NewParallelResourceReader(parallel.NewParallelRunner(context.TODO(), 10)), } got, err := s.Resources() assert.Equal(tt, c.err, err) mock.AssertExpectationsForObjects(tt) test.CtyTestDiff(got, c.dirName, provider, deserializer, shouldUpdate, t) }) } }
explode_data.jsonl/42748
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1058 }
[ 2830, 3393, 40, 309, 1474, 46167, 62, 11277, 1155, 353, 8840, 836, 8, 1476, 1444, 2264, 1669, 3056, 1235, 341, 197, 18185, 262, 914, 198, 197, 48532, 675, 914, 198, 197, 2109, 25183, 256, 2915, 12805, 353, 16712, 82, 991, 726, 73707, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEnabled(t *testing.T) { tests := []struct { name string setMap map[string]bool wantEnabled map[string]bool }{ { name: "set multiple features", setMap: map[string]bool{ "a": true, "b": false, }, wantEnabled: map[string]bool{ "a": true, "b": false, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gates := NewFeatureGate() gates.SetFromMap(tt.setMap) for k, want := range tt.wantEnabled { got := gates.Enabled(k) if got != want { t.Errorf("[feature: %s] want %v, got %v", k, want, got) } } }) } }
explode_data.jsonl/34559
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 304 }
[ 2830, 3393, 5462, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 286, 914, 198, 197, 8196, 2227, 414, 2415, 14032, 96436, 198, 197, 50780, 5462, 2415, 14032, 96436, 198, 197, 59403, 197, 197, 515, 298, 11609, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestToOpSpec_BucketsAccessed(t *testing.T) { // TODO(adam) add this test back when BucketsAccessed is restored for the from function // https://github.com/influxdata/flux/issues/114 t.Skip("https://github.com/influxdata/flux/issues/114") bucketName := "my_bucket" orgName := "my_org" tests := []querytest.BucketAwareQueryTestCase{ { Name: "from() with bucket and to with org and bucket", Raw: `from(bucket:"my_bucket") |> to(bucket:"my_bucket", org:"my_org")`, WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Organization: &orgName}}, }, } for _, tc := range tests { tc := tc t.Run(tc.Name, func(t *testing.T) { t.Parallel() querytest.BucketAwareQueryTestHelper(t, tc) }) } }
explode_data.jsonl/74726
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 345 }
[ 2830, 3393, 1249, 7125, 8327, 1668, 38551, 6054, 291, 1155, 353, 8840, 836, 8, 341, 197, 322, 5343, 43779, 309, 8, 912, 419, 1273, 1182, 979, 26064, 1415, 6054, 291, 374, 27003, 369, 279, 504, 729, 198, 197, 322, 3703, 1110, 5204, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRouter_NewRouter_WithMethodNotAllowedResponseEnabled(t *testing.T) { mainRouter := NewRouter(RouterConfig{ EnableMethodNotAllowedResponse: true, }) _ = mainRouter.Register(http.MethodGet, "/some", testHandlerFunc) _ = mainRouter.Register(http.MethodPost, "/some", testDummyHandlerFunc) req, _ := http.NewRequest(http.MethodDelete, "/some", nil) getResponse := httptest.NewRecorder() mainRouter.ServeHTTP(getResponse, req) assertEqual(t, http.StatusMethodNotAllowed, getResponse.Code) assertStringContains(t, "GET", getResponse.Header().Get("Allow")) assertStringContains(t, "POST", getResponse.Header().Get("Allow")) req, _ = http.NewRequest(http.MethodDelete, "/another-route", nil) getResponse = httptest.NewRecorder() mainRouter.ServeHTTP(getResponse, req) assertEqual(t, http.StatusNotFound, getResponse.Code) }
explode_data.jsonl/31752
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 285 }
[ 2830, 3393, 9523, 39582, 9523, 62, 2354, 3523, 97634, 2582, 5462, 1155, 353, 8840, 836, 8, 341, 36641, 9523, 1669, 1532, 9523, 2785, 2676, 2648, 515, 197, 197, 11084, 3523, 97634, 2582, 25, 830, 345, 197, 8824, 197, 62, 284, 1887, 952...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBackupRestoreAppend(t *testing.T) { defer leaktest.AfterTest(t)() const numAccounts = 1000 ctx, _, sqlDB, _, cleanupFn := BackupRestoreTestSetup(t, MultiNode, numAccounts, InitNone) defer cleanupFn() // Ensure that each node has at least one leaseholder. (These splits were // made in BackupRestoreTestSetup.) These are wrapped with SucceedsSoon() // because EXPERIMENTAL_RELOCATE can fail if there are other replication // changes happening. for _, stmt := range []string{ `ALTER TABLE data.bank EXPERIMENTAL_RELOCATE VALUES (ARRAY[1], 0)`, `ALTER TABLE data.bank EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 100)`, `ALTER TABLE data.bank EXPERIMENTAL_RELOCATE VALUES (ARRAY[3], 200)`, } { testutils.SucceedsSoon(t, func() error { _, err := sqlDB.DB.ExecContext(ctx, stmt) return err }) } const localFoo1, localFoo2, localFoo3 = LocalFoo + "/1", LocalFoo + "/2", LocalFoo + "/3" backups := []interface{}{ fmt.Sprintf("%s?COCKROACH_LOCALITY=%s", localFoo1, url.QueryEscape("default")), fmt.Sprintf("%s?COCKROACH_LOCALITY=%s", localFoo2, url.QueryEscape("dc=dc1")), fmt.Sprintf("%s?COCKROACH_LOCALITY=%s", localFoo3, url.QueryEscape("dc=dc2")), } var tsBefore, ts1, ts2 string sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsBefore) sqlDB.Exec(t, "BACKUP TO ($1, $2, $3) AS OF SYSTEM TIME "+tsBefore, backups...) sqlDB.QueryRow(t, "UPDATE data.bank SET balance = 100 RETURNING cluster_logical_timestamp()").Scan(&ts1) sqlDB.Exec(t, "BACKUP TO ($1, $2, $3) AS OF SYSTEM TIME "+ts1, backups...) sqlDB.QueryRow(t, "UPDATE data.bank SET balance = 200 RETURNING cluster_logical_timestamp()").Scan(&ts2) rowsTS2 := sqlDB.QueryStr(t, "SELECT * from data.bank ORDER BY id") sqlDB.Exec(t, "BACKUP TO ($1, $2, $3) AS OF SYSTEM TIME "+ts2, backups...) sqlDB.Exec(t, "ALTER TABLE data.bank RENAME TO data.renamed") sqlDB.Exec(t, "BACKUP TO ($1, $2, $3)", backups...) sqlDB.ExpectErr(t, "cannot append a backup of specific", "BACKUP system.users TO ($1, $2, $3)", backups...) sqlDB.Exec(t, "DROP DATABASE data CASCADE") sqlDB.Exec(t, "RESTORE DATABASE data FROM ($1, $2, $3)", backups...) sqlDB.ExpectErr(t, "relation \"data.bank\" does not exist", "SELECT * FROM data.bank ORDER BY id") sqlDB.CheckQueryResults(t, "SELECT * from data.renamed ORDER BY id", rowsTS2) // TODO(dt): test restoring to other backups via AOST. }
explode_data.jsonl/57575
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 921 }
[ 2830, 3393, 56245, 56284, 23877, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 4777, 1629, 41369, 284, 220, 16, 15, 15, 15, 198, 20985, 11, 8358, 5704, 3506, 11, 8358, 21290, 24911, 1669, 43438, 56284, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetConfigFromFlags(t *testing.T) { driver := NewDriver("default", "path") checkFlags := &drivers.CheckDriverOptions{ FlagsValues: map[string]interface{}{ "rackspace-region": "REGION", "rackspace-username": "user", "rackspace-api-key": "KEY", "rackspace-endpoint-type": "publicURL", }, CreateFlags: driver.GetCreateFlags(), } err := driver.SetConfigFromFlags(checkFlags) assert.NoError(t, err) assert.Empty(t, checkFlags.InvalidFlags) }
explode_data.jsonl/42300
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 188 }
[ 2830, 3393, 1649, 2648, 3830, 9195, 1155, 353, 8840, 836, 8, 341, 33652, 1669, 1532, 11349, 445, 2258, 497, 330, 2343, 5130, 25157, 9195, 1669, 609, 62125, 10600, 11349, 3798, 515, 197, 197, 9195, 6227, 25, 2415, 14032, 31344, 67066, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNestedStruct(t *testing.T) { callErr, funcErr, assert, callBuffer, funcBuffer := testOpenAPITypeWriter(t, ` package foo // Nested is used as struct field type Nested struct { // A simple string String string } // Blah demonstrate a struct with struct field. type Blah struct { // A struct field Field Nested } `) if callErr != nil { t.Fatal(callErr) } if funcErr != nil { t.Fatal(funcErr) } assert.Equal(`"base/foo.Blah": schema_base_foo_Blah(ref), `, callBuffer.String()) assert.Equal(`func schema_base_foo_Blah(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "Blah demonstrate a struct with struct field.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "Field": { SchemaProps: spec.SchemaProps{ Description: "A struct field", Default: map[string]interface {}{}, Ref: ref("base/foo.Nested"), }, }, }, Required: []string{"Field"}, }, }, Dependencies: []string{ "base/foo.Nested",}, } } `, funcBuffer.String()) }
explode_data.jsonl/3342
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 386 }
[ 2830, 3393, 71986, 9422, 1155, 353, 8840, 836, 8, 341, 67288, 7747, 11, 2915, 7747, 11, 2060, 11, 1618, 4095, 11, 2915, 4095, 1669, 1273, 5002, 7082, 929, 6492, 1155, 11, 22074, 1722, 15229, 271, 322, 71742, 374, 1483, 438, 2036, 2070...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSourceRTSP(t *testing.T) { for _, source := range []string{ "udp", "tcp", "tls", } { t.Run(source, func(t *testing.T) { switch source { case "udp", "tcp": p1, ok := testProgram("rtmpDisable: yes\n" + "hlsDisable: yes\n" + "rtspPort: 8555\n" + "rtpPort: 8100\n" + "rtcpPort: 8101\n" + "paths:\n" + " all:\n" + " readUser: testuser\n" + " readPass: testpass\n") require.Equal(t, true, ok) defer p1.close() cnt1, err := newContainer("ffmpeg", "source", []string{ "-re", "-stream_loop", "-1", "-i", "emptyvideo.mkv", "-c", "copy", "-f", "rtsp", "-rtsp_transport", "udp", "rtsp://" + ownDockerIP + ":8555/teststream", }) require.NoError(t, err) defer cnt1.close() p2, ok := testProgram("rtmpDisable: yes\n" + "hlsDisable: yes\n" + "paths:\n" + " proxied:\n" + " source: rtsp://testuser:testpass@localhost:8555/teststream\n" + " sourceProtocol: " + source[len(""):] + "\n" + " sourceOnDemand: yes\n") require.Equal(t, true, ok) defer p2.close() case "tls": serverCertFpath, err := writeTempFile(serverCert) require.NoError(t, err) defer os.Remove(serverCertFpath) serverKeyFpath, err := writeTempFile(serverKey) require.NoError(t, err) defer os.Remove(serverKeyFpath) p, ok := testProgram("rtmpDisable: yes\n" + "hlsDisable: yes\n" + "rtspPort: 8555\n" + "rtpPort: 8100\n" + "rtcpPort: 8101\n" + "readTimeout: 20s\n" + "protocols: [tcp]\n" + "encryption: yes\n" + "serverCert: " + serverCertFpath + "\n" + "serverKey: " + serverKeyFpath + "\n" + "paths:\n" + " all:\n" + " readUser: testuser\n" + " readPass: testpass\n") require.Equal(t, true, ok) defer p.close() cnt1, err := newContainer("ffmpeg", "source", []string{ "-re", "-stream_loop", "-1", "-i", "emptyvideo.mkv", "-c", "copy", "-f", "rtsp", "rtsps://" + ownDockerIP + ":8555/teststream", }) require.NoError(t, err) defer cnt1.close() time.Sleep(1 * time.Second) p2, ok := testProgram("rtmpDisable: yes\n" + "hlsDisable: yes\n" + "paths:\n" + " proxied:\n" + " source: rtsps://testuser:testpass@localhost:8555/teststream\n" + " sourceFingerprint: 33949E05FFFB5FF3E8AA16F8213A6251B4D9363804BA53233C4DA9A46D6F2739\n" + " sourceOnDemand: yes\n") require.Equal(t, true, ok) defer p2.close() } time.Sleep(1 * time.Second) cnt3, err := newContainer("ffmpeg", "dest", []string{ "-rtsp_transport", "udp", "-i", "rtsp://" + ownDockerIP + ":8554/proxied", "-vframes", "1", "-f", "image2", "-y", "/dev/null", }) require.NoError(t, err) defer cnt3.close() require.Equal(t, 0, cnt3.wait()) }) } }
explode_data.jsonl/82136
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1530 }
[ 2830, 3393, 3608, 5350, 4592, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 2530, 1669, 2088, 3056, 917, 515, 197, 197, 1, 31101, 756, 197, 197, 1, 27161, 756, 197, 197, 1, 34488, 756, 197, 92, 341, 197, 3244, 16708, 12437, 11, 2915, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFindObjectFailsOnNoHandles(t *testing.T) { ctx := MockCtx{} ctx.FindObjectsInitFunc = findObjectsInitOK ctx.FindObjectsFinalFunc = findObjectsFinalOK // test FindObject fails when no handles are returned ctx.FindObjectsFunc = func(pkcs11.SessionHandle, int) ([]pkcs11.ObjectHandle, bool, error) { return []pkcs11.ObjectHandle{}, false, nil } s := &Session{ctx, 0} _, err := s.FindObject(nil) test.AssertEquals(t, err, ErrNoObject) }
explode_data.jsonl/1156
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 171 }
[ 2830, 3393, 80835, 37, 6209, 1925, 2753, 65928, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 14563, 23684, 16094, 20985, 9998, 11543, 3803, 9626, 284, 1477, 11543, 3803, 3925, 198, 20985, 9998, 11543, 19357, 9626, 284, 1477, 11543, 19357, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestResourceRebuild(t *testing.T) { c := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer c.Terminate(t) client := c.RandClient() s := etcd.NewStore(client, "store") require.NoError(t, s.CreateNamespace(context.Background(), types.FixtureNamespace("default"))) ctx := store.NamespaceContext(context.Background(), "default") cacher := Resource{ cache: make(map[string][]Value), client: client, resourceT: &fixture.Resource{}, } // Empty store cacher.updates = append(cacher.updates, store.WatchEventResource{ Action: store.WatchError, }) cacher.updateCache(ctx) assert.Len(t, cacher.cache["default"], 0) // Resource added to a new namespace foo := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "foo", Namespace: "default"}} if err := s.CreateOrUpdateResource(ctx, foo); err != nil { t.Fatal(err) } cacher.updates = append(cacher.updates, store.WatchEventResource{ Action: store.WatchError, }) cacher.updateCache(ctx) assert.Len(t, cacher.cache["default"], 1) // Resource added to an existing namespace bar := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "bar", Namespace: "default"}} if err := s.CreateOrUpdateResource(ctx, bar); err != nil { t.Fatal(err) } cacher.updates = append(cacher.updates, store.WatchEventResource{ Action: store.WatchError, }) cacher.updateCache(ctx) assert.Len(t, cacher.cache["default"], 2) // Resource updated bar.Foo = "acme" if err := s.CreateOrUpdateResource(ctx, bar); err != nil { t.Fatal(err) } cacher.updates = append(cacher.updates, store.WatchEventResource{ Action: store.WatchError, }) cacher.updateCache(ctx) assert.Len(t, cacher.cache["default"], 2) // Resource deleted if err := s.DeleteResource(ctx, bar.StorePrefix(), bar.GetObjectMeta().Name); err != nil { t.Fatal(err) } cacher.updates = append(cacher.updates, store.WatchEventResource{ Action: store.WatchError, }) cacher.updateCache(ctx) assert.Len(t, cacher.cache["default"], 1) }
explode_data.jsonl/15116
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 733 }
[ 2830, 3393, 4783, 693, 5834, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 17590, 7121, 28678, 53, 18, 1155, 11, 609, 60168, 72883, 2648, 90, 1695, 25, 220, 16, 3518, 16867, 272, 836, 261, 34016, 1155, 340, 25291, 1669, 272, 2013, 437, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestNewChangesetCreationOfResources(t *testing.T) { tests := map[string]struct { templateFixture string expectedGolden string }{ "Without annotations": { templateFixture: "is.yml", expectedGolden: "is.yml", }, "With annotations": { templateFixture: "is-annotation.yml", expectedGolden: "is-annotation.yml", }, "With image reference": { templateFixture: "dc.yml", expectedGolden: "dc.yml", }, "With image reference and annotation": { templateFixture: "dc-annotation.yml", expectedGolden: "dc-annotation.yml", }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { filter, err := NewResourceFilter("", "", []string{}) if err != nil { t.Fatal(err) } platformBasedList, err := NewPlatformBasedResourceList( filter, []byte(""), // empty to ensure creation of resource ) if err != nil { t.Fatal(err) } templateBasedList, err := NewTemplateBasedResourceList( filter, helper.ReadFixtureFile(t, "templates/"+tc.templateFixture), ) if err != nil { t.Fatal(err) } upsertOnly := false allowRecreate := false preservePaths := []string{} cs, err := NewChangeset( platformBasedList, templateBasedList, upsertOnly, allowRecreate, preservePaths, ) if err != nil { t.Fatal(err) } createChanges := cs.Create numberOfCreateChanges := len(createChanges) if numberOfCreateChanges != 1 { t.Fatalf("Expected one creation change, got: %d", numberOfCreateChanges) } createChange := createChanges[0] want := string(helper.ReadGoldenFile(t, "desired-state/"+tc.expectedGolden)) got := createChange.DesiredState if diff := cmp.Diff(want, got); diff != "" { t.Fatalf("Desired state mismatch (-want +got):\n%s", diff) } }) } }
explode_data.jsonl/33769
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 748 }
[ 2830, 3393, 3564, 11317, 295, 32701, 2124, 11277, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 2415, 14032, 60, 1235, 341, 197, 22832, 18930, 914, 198, 197, 42400, 59790, 220, 914, 198, 197, 59403, 197, 197, 1, 26040, 32207, 788, 341, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestMCP23017DriverWrite(t *testing.T) { // clear bit mcp, adaptor := initTestMCP23017DriverWithStubbedAdaptor(0) gobottest.Assert(t, mcp.Start(), nil) port := mcp.getPort("A") adaptor.i2cReadImpl = func(b []byte) (int, error) { return len(b), nil } adaptor.i2cWriteImpl = func([]byte) (int, error) { return 0, nil } err := mcp.write(port.IODIR, uint8(7), 0) gobottest.Assert(t, err, nil) // set bit mcp, adaptor = initTestMCP23017DriverWithStubbedAdaptor(0) gobottest.Assert(t, mcp.Start(), nil) port = mcp.getPort("B") adaptor.i2cReadImpl = func(b []byte) (int, error) { return len(b), nil } adaptor.i2cWriteImpl = func([]byte) (int, error) { return 0, nil } err = mcp.write(port.IODIR, uint8(7), 1) gobottest.Assert(t, err, nil) // write error mcp, adaptor = initTestMCP23017DriverWithStubbedAdaptor(0) gobottest.Assert(t, mcp.Start(), nil) adaptor.i2cReadImpl = func(b []byte) (int, error) { return len(b), nil } adaptor.i2cWriteImpl = func([]byte) (int, error) { return 0, errors.New("write error") } err = mcp.write(port.IODIR, uint8(7), 0) gobottest.Assert(t, err, errors.New("write error")) //debug debug = true log.SetOutput(ioutil.Discard) adaptor.i2cReadImpl = func(b []byte) (int, error) { return len(b), nil } adaptor.i2cWriteImpl = func([]byte) (int, error) { return 0, nil } err = mcp.write(port.IODIR, uint8(7), 1) gobottest.Assert(t, err, nil) debug = false log.SetOutput(os.Stdout) }
explode_data.jsonl/42322
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 644 }
[ 2830, 3393, 44, 7123, 17, 18, 15, 16, 22, 11349, 7985, 1155, 353, 8840, 836, 8, 341, 197, 322, 2797, 2699, 198, 2109, 4672, 11, 91941, 1669, 2930, 2271, 44, 7123, 17, 18, 15, 16, 22, 11349, 2354, 33838, 2721, 2589, 32657, 7, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLs(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1) file2 := r.WriteBoth(ctx, "empty space", "-", t2) fstest.CheckItems(t, r.Fremote, file1, file2) var buf bytes.Buffer err := operations.List(ctx, r.Fremote, &buf) require.NoError(t, err) res := buf.String() assert.Contains(t, res, " 1 empty space\n") assert.Contains(t, res, " 60 potato2\n") }
explode_data.jsonl/51925
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 204 }
[ 2830, 3393, 43, 82, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 7000, 1669, 48434, 477, 7121, 6727, 1155, 340, 16867, 435, 991, 977, 1064, 741, 17661, 16, 1669, 435, 4073, 20629, 7502, 11, 330, 19099, 4330, 17, 497, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReadChallengeTx_acceptsV0AndV1Transactions(t *testing.T) { kp0 := newKeypair0() tx, err := BuildChallengeTx( kp0.Seed(), kp0.Address(), "testwebauth.stellar.org", "testanchor.stellar.org", network.TestNetworkPassphrase, time.Hour, ) assert.NoError(t, err) originalHash, err := tx.HashHex(network.TestNetworkPassphrase) assert.NoError(t, err) v1Challenge, err := marshallBase64(tx.envelope, tx.Signatures()) assert.NoError(t, err) convertToV0(tx) v0Challenge, err := marshallBase64(tx.envelope, tx.Signatures()) assert.NoError(t, err) for _, challenge := range []string{v1Challenge, v0Challenge} { parsedTx, clientAccountID, _, err := ReadChallengeTx( challenge, kp0.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, ) assert.NoError(t, err) assert.Equal(t, kp0.Address(), clientAccountID) hash, err := parsedTx.HashHex(network.TestNetworkPassphrase) assert.NoError(t, err) assert.Equal(t, originalHash, hash) } }
explode_data.jsonl/20713
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 423 }
[ 2830, 3393, 4418, 62078, 31584, 35728, 82, 53, 15, 3036, 53, 16, 48761, 1155, 353, 8840, 836, 8, 341, 16463, 79, 15, 1669, 501, 6608, 1082, 1310, 15, 741, 46237, 11, 1848, 1669, 7854, 62078, 31584, 1006, 197, 16463, 79, 15, 5732, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetIntOrPercentValue(t *testing.T) { int10 := intstr.FromInt(10) percent20 := intstr.FromString("20%") intInString30 := intstr.FromString("30") invalidStringA := intstr.FromString("a") invalidStringAPercent := intstr.FromString("a%") invalidStringNumericPercent := intstr.FromString("1%0") testCases := []struct { name string in *intstr.IntOrString expectedValue int expectedPercent bool expectedError error }{ { name: "with a integer", in: &int10, expectedValue: 10, expectedPercent: false, expectedError: nil, }, { name: "with a percentage", in: &percent20, expectedValue: 20, expectedPercent: true, expectedError: nil, }, { name: "with an int in string", in: &intInString30, expectedValue: 30, expectedPercent: false, expectedError: nil, }, { name: "with an 'a' string", in: &invalidStringA, expectedValue: 0, expectedPercent: false, expectedError: fmt.Errorf("invalid value \"a\": strconv.Atoi: parsing \"a\": invalid syntax"), }, { name: "with an 'a%' string", in: &invalidStringAPercent, expectedValue: 0, expectedPercent: true, expectedError: fmt.Errorf("invalid value \"a%%\": strconv.Atoi: parsing \"a\": invalid syntax"), }, { name: "with an '1%0' string", in: &invalidStringNumericPercent, expectedValue: 0, expectedPercent: false, expectedError: fmt.Errorf("invalid value \"1%%0\": strconv.Atoi: parsing \"1%%0\": invalid syntax"), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { value, percent, err := getIntOrPercentValue(tc.in) // Check first if one is nil, and the other isn't, otherwise if not nil, do the messages match if (tc.expectedError != nil) != (err != nil) || err != nil && tc.expectedError.Error() != err.Error() { t.Errorf("Case: %s. Got: %v, expected: %v", tc.name, err, tc.expectedError) } if tc.expectedPercent != percent { t.Errorf("Case: %s. Got: %v, expected: %v", tc.name, percent, tc.expectedPercent) } if tc.expectedValue != value { t.Errorf("Case: %s. Got: %v, expected: %v", tc.name, value, tc.expectedValue) } }) } }
explode_data.jsonl/31012
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1062 }
[ 2830, 3393, 85097, 2195, 32010, 1130, 1155, 353, 8840, 836, 8, 341, 2084, 16, 15, 1669, 526, 495, 11439, 1072, 7, 16, 15, 340, 197, 24422, 17, 15, 1669, 526, 495, 11439, 703, 445, 17, 15, 4, 1138, 2084, 641, 703, 18, 15, 1669, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestJson1(t *testing.T) { msg := "stringstring" bss, err := natsbus.FmtData2Byte([]byte(msg)) fmt.Println(msg, string(bss), err) assert.NotNil(t, nil) }
explode_data.jsonl/57738
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 74 }
[ 2830, 3393, 5014, 16, 1155, 353, 8840, 836, 8, 341, 21169, 1669, 330, 917, 917, 698, 2233, 778, 11, 1848, 1669, 308, 1862, 10338, 991, 2501, 1043, 17, 7153, 10556, 3782, 8119, 1171, 11009, 12419, 8119, 11, 914, 1883, 778, 701, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestCatchClosureInStashlessFunc(t *testing.T) { const SCRIPT = ` function f() { var ex; try { throw "ex1"; } catch (er1) { return function() { return er1; } } } f()(); ` testScript1(SCRIPT, asciiString("ex1"), t) }
explode_data.jsonl/75241
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 57760, 53854, 641, 623, 988, 1717, 9626, 1155, 353, 8840, 836, 8, 341, 4777, 53679, 284, 22074, 7527, 282, 368, 341, 197, 2405, 505, 280, 197, 6799, 341, 298, 9581, 330, 327, 16, 876, 197, 197, 92, 2287, 320, 261, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_singleNumber(t *testing.T) { type args struct { nums []int } tests := []struct { name string args args want int }{ { "test-1", args{[]int{2, 2, 3, 2}}, 3, }, { "test-2", args{[]int{0, 1, 0, 1, 0, 1, 99}}, 99, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := singleNumber(tt.args.nums); got != tt.want { t.Errorf("singleNumber() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/17071
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 242 }
[ 2830, 3393, 19487, 2833, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 22431, 82, 3056, 396, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 31215, 2827, 198, 197, 50780, 526, 198, 197, 59403, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSequentialProcessing(t *testing.T) { // Use middleware and pass 1 as an argument // this is the same as setting the config.ProxyClientLimit to 1 // also known as "PROXY_CLIENT_LIMIT" env var // normally I would just test the middleware function with mocks // but since the requirement is to prove the proxy can perform and still use // redis backing cache I do not use mocks assert := assert.New(t) config := NewConfig() proxy := NewProxyCache(config) var ctx = context.Background() redisClient := redis.NewClient(&redis.Options{ Addr: config.RedisUrl, Password: "", // no password set DB: 0, // use default DB }) // redis client that should be running _, err := redisClient.Ping(ctx).Result() assert.NoError(err) // get rid of value for test redisClient.Del(ctx, "bing") redisClient.Del(ctx, "bong") err = redisClient.Set(ctx, "bing", "charlie", 0).Err() assert.NoError(err) err = redisClient.Set(ctx, "bong", "is cool", 0).Err() assert.NoError(err) handler := http.HandlerFunc(LimitNumClients(proxy.PayloadHandler, 1)) req1, _ := http.NewRequest("GET", "/bing", nil) req2, _ := http.NewRequest("GET", "/bong", nil) req3, _ := http.NewRequest("GET", "/zeep", nil) req4, _ := http.NewRequest("GET", "/zoot", nil) var wg sync.WaitGroup wg.Add(6) go func() { defer wg.Done() rr := httptest.NewRecorder() handler.ServeHTTP(rr, req1) assert.Equal(`{"bing": "charlie"}`, rr.Body.String()) }() go func() { defer wg.Done() rr := httptest.NewRecorder() handler.ServeHTTP(rr, req2) assert.Equal(`{"bong": "is cool"}`, rr.Body.String()) }() go func() { defer wg.Done() rr := httptest.NewRecorder() handler.ServeHTTP(rr, req1) assert.Equal(`{"bing": "charlie"}`, rr.Body.String()) }() go func() { defer wg.Done() rr := httptest.NewRecorder() handler.ServeHTTP(rr, req2) assert.Equal(`{"bong": "is cool"}`, rr.Body.String()) }() go func() { defer wg.Done() rr := httptest.NewRecorder() handler.ServeHTTP(rr, req3) assert.Equal(http.StatusNotFound, rr.Code) }() go func() { defer wg.Done() rr := httptest.NewRecorder() handler.ServeHTTP(rr, req4) assert.Equal(http.StatusNotFound, rr.Code) }() wg.Wait() }
explode_data.jsonl/73847
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 897 }
[ 2830, 3393, 22046, 28892, 1155, 353, 8840, 836, 8, 341, 197, 322, 5443, 29679, 323, 1494, 220, 16, 438, 458, 5693, 198, 197, 322, 419, 374, 279, 1852, 438, 6243, 279, 2193, 75200, 2959, 16527, 311, 220, 16, 198, 197, 322, 1083, 3881...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOuterHTML(t *testing.T) { t.Parallel() ctx, cancel := testAllocate(t, "table.html") defer cancel() tests := []struct { sel string by QueryOption }{ {`/html/body/table/thead/tr`, BySearch}, {`thead tr`, ByQueryAll}, {`thead tr`, ByQuery}, {`document.querySelector("#footer > td:nth-child(2)")`, ByJSPath}, } for i, test := range tests { var html string if err := Run(ctx, OuterHTML(test.sel, &html, test.by)); err != nil { t.Fatalf("test %d got error: %v", i, err) } if html == "" { t.Fatalf("test %d: OuterHTML is empty", i) } } }
explode_data.jsonl/59491
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 251 }
[ 2830, 3393, 51322, 5835, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 20985, 11, 9121, 1669, 1273, 75380, 1155, 11, 330, 2005, 2564, 1138, 16867, 9121, 2822, 78216, 1669, 3056, 1235, 341, 197, 1903, 301, 914, 198, 197, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestMetadataErrorResponse(t *testing.T) { c := ec2metadata.New(unit.Session) c.Handlers.Send.Clear() c.Handlers.Send.PushBack(func(r *request.Request) { r.HTTPResponse = &http.Response{ StatusCode: http.StatusBadRequest, Status: http.StatusText(http.StatusBadRequest), Body: ioutil.NopCloser(strings.NewReader("error message text")), } r.Retryable = aws.Bool(false) // network errors are retryable }) data, err := c.GetMetadata("uri/path") if e, a := "error message text", err.Error(); !strings.Contains(a, e) { t.Fatalf("expect %v to be in %v", e, a) } if len(data) != 0 { t.Fatalf("expect empty, got %v", data) } }
explode_data.jsonl/17842
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 274 }
[ 2830, 3393, 14610, 55901, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 11942, 17, 17637, 7121, 24144, 20674, 340, 1444, 35308, 9254, 20176, 13524, 741, 1444, 35308, 9254, 20176, 34981, 3707, 18552, 2601, 353, 2035, 9659, 8, 341, 197, 7000, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEmbedded(t *testing.T) { var cc CC bb := BB{ AA: AA{ A: "dsdd", C: 3, }, D: true, E: map[string]string{"ab": "cc", "a": "ee"}, } StructCopy(&cc, &bb) if cc.A != "dsdd" { t.Error("field A failed") } if cc.D != true { t.Error("field D failed") } if cc.C != "" { t.Error("field C failed") } }
explode_data.jsonl/40578
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 168 }
[ 2830, 3393, 83466, 1155, 353, 8840, 836, 8, 341, 2405, 12527, 13534, 198, 2233, 65, 1669, 18270, 515, 197, 197, 6029, 25, 28234, 515, 298, 22985, 25, 330, 5356, 631, 756, 298, 6258, 25, 220, 18, 345, 197, 197, 1583, 197, 10957, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestGCSClose(t *testing.T) { testCases := []struct { testcase gcsTestCase output error in <-chan *helpers.VolumeInfo }{ { testcase: gcsTestCase{ client: validClient, conf: validConfig, }, output: nil, }, { testcase: gcsTestCase{ client: &gcsMockClient{ err: errTest, }, conf: validConfig, }, output: errTest, }, } for idx, c := range testCases { b := &GoogleCloudStorageBackend{} if err := b.Init(context.Background(), c.testcase.conf, WithGCSClient(c.testcase.client)); err != nil { t.Errorf("%d: error setting up backend - %v", idx, err) } else { err = b.Close() if err != c.output { t.Errorf("%d: Expected %v, got %v", idx, c.output, err) } } } }
explode_data.jsonl/66362
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 352 }
[ 2830, 3393, 22863, 3540, 1469, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 18185, 5638, 342, 4837, 16458, 198, 197, 21170, 256, 1465, 198, 197, 17430, 981, 9119, 5658, 353, 21723, 79106, 1731, 198, 197, 59403...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_SLB_DescribeRegionsWithRPCrequest(t *testing.T) { client, err := slb.NewClientWithAccessKey(os.Getenv("REGION_ID"), os.Getenv("ACCESS_KEY_ID"), os.Getenv("ACCESS_KEY_SECRET")) assert.Nil(t, err) request := slb.CreateDescribeRegionsRequest() response, err := client.DescribeRegions(request) assert.Nil(t, err) assert.True(t, response.IsSuccess()) assert.Equal(t, 36, len(response.RequestId)) assert.True(t, len(response.Regions.Region) > 0) }
explode_data.jsonl/56876
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 176 }
[ 2830, 3393, 25622, 33, 98054, 3114, 79284, 2354, 29528, 2035, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 1739, 65, 7121, 2959, 2354, 6054, 1592, 9638, 64883, 445, 77431, 3450, 3975, 2643, 64883, 445, 55749, 6600, 3450, 3975, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetFullMembership(t *testing.T) { nodeNum := 15 bootPeers := []string{bootPeer(5511), bootPeer(5512)} instances := []*gossipInstance{} var inst *gossipInstance for i := 3; i <= nodeNum; i++ { id := fmt.Sprintf("d%d", i) inst = createDiscoveryInstance(5510+i, id, bootPeers) instances = append(instances, inst) } time.Sleep(time.Second) inst = createDiscoveryInstance(5511, "d1", bootPeers) instances = append(instances, inst) inst = createDiscoveryInstance(5512, "d2", bootPeers) instances = append(instances, inst) assertMembership(t, instances, nodeNum-1) // Ensure that internal endpoint was propagated to everyone for _, inst := range instances { for _, member := range inst.GetMembership() { assert.NotEmpty(t, member.InternalEndpoint) assert.NotEmpty(t, member.Endpoint) } } // Check that Lookup() is valid for _, inst := range instances { for _, member := range inst.GetMembership() { assert.Equal(t, string(member.PKIid), inst.Lookup(member.PKIid).Endpoint) assert.Equal(t, member.PKIid, inst.Lookup(member.PKIid).PKIid) } } stopInstances(t, instances) }
explode_data.jsonl/62265
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 418 }
[ 2830, 3393, 1949, 9432, 80904, 1155, 353, 8840, 836, 8, 341, 20831, 4651, 1669, 220, 16, 20, 198, 197, 4619, 10197, 388, 1669, 3056, 917, 90, 4619, 30888, 7, 20, 20, 16, 16, 701, 10459, 30888, 7, 20, 20, 16, 17, 10569, 197, 47825,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestInverseAssociationEdge(t *testing.T) { edgeInfo := getTestEdgeInfo(t, "folder") edge := edgeInfo.GetAssociationEdgeByName("Todos") expectedAssocEdge := &AssociationEdge{ EdgeConst: "FolderToTodosEdge", commonEdgeInfo: getCommonEdgeInfo( "Todos", schemaparser.GetEntConfigFromName("todo"), ), InverseEdge: &InverseAssocEdge{ EdgeConst: "TodoToFoldersEdge", commonEdgeInfo: getCommonEdgeInfo( "Folders", schemaparser.GetEntConfigFromName("folder"), ), }, TableName: "folder_todos_edges", } testAssocEdge(t, edge, expectedAssocEdge) }
explode_data.jsonl/73726
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 234 }
[ 2830, 3393, 69179, 63461, 11656, 1155, 353, 8840, 836, 8, 341, 197, 7186, 1731, 1669, 633, 2271, 11656, 1731, 1155, 11, 330, 17668, 1138, 197, 7186, 1669, 6821, 1731, 2234, 63461, 11656, 16898, 445, 42147, 5130, 42400, 98628, 11656, 1669,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCrUnmergedRenameIntoNewDir(t *testing.T) { test(t, users("alice", "bob"), as(alice, mkfile("a/b", "hello"), ), as(bob, disableUpdates(), ), as(alice, write("a/c", "world"), ), as(bob, noSync(), rename("a/b", "d/e"), reenableUpdates(), lsdir("a/", m{"c": "FILE"}), lsdir("d/", m{"e": "FILE"}), read("a/c", "world"), read("d/e", "hello"), ), as(alice, lsdir("a/", m{"c": "FILE"}), lsdir("d/", m{"e": "FILE"}), read("a/c", "world"), read("d/e", "hello"), ), ) }
explode_data.jsonl/31354
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 290 }
[ 2830, 3393, 16001, 1806, 40354, 88757, 26591, 3564, 6184, 1155, 353, 8840, 836, 8, 341, 18185, 1155, 345, 197, 90896, 445, 63195, 497, 330, 47086, 4461, 197, 60451, 17643, 558, 345, 298, 2109, 74, 1192, 445, 64, 3470, 497, 330, 14990, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIssue11549_Expect100(t *testing.T) { req := reqBytes(`PUT /readbody HTTP/1.1 User-Agent: PycURL/7.22.0 Host: 127.0.0.1:9000 Accept: */* Expect: 100-continue Content-Length: 10 HelloWorldPUT /noreadbody HTTP/1.1 User-Agent: PycURL/7.22.0 Host: 127.0.0.1:9000 Accept: */* Expect: 100-continue Content-Length: 10 GET /should-be-ignored HTTP/1.1 Host: foo `) var buf bytes.Buffer conn := &rwTestConn{ Reader: bytes.NewReader(req), Writer: &buf, closec: make(chan bool, 1), } ln := &oneConnListener{conn: conn} numReq := 0 go Serve(ln, HandlerFunc(func(w ResponseWriter, r *Request) { numReq++ if r.URL.Path == "/readbody" { ioutil.ReadAll(r.Body) } io.WriteString(w, "Hello world!") })) <-conn.closec if numReq != 2 { t.Errorf("num requests = %d; want 2", numReq) } if !strings.Contains(buf.String(), "Connection: close\r\n") { t.Errorf("expected 'Connection: close' in response; got: %s", buf.String()) } }
explode_data.jsonl/22483
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 411 }
[ 2830, 3393, 42006, 16, 16, 20, 19, 24, 62, 17536, 16, 15, 15, 1155, 353, 8840, 836, 8, 341, 24395, 1669, 4232, 7078, 5809, 6221, 608, 878, 2599, 10130, 14, 16, 13, 16, 198, 1474, 45118, 25, 393, 3337, 3144, 14, 22, 13, 17, 17, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestOR(t *testing.T) { fmt.Println("===== OR =====") var a1 = AND(OR(SQL("a=?", 10), SQL("b=?", 20)), OR(SQL("c=?", 30), SQL("d=?", 40))) fmt.Println(a1.ToSQL()) }
explode_data.jsonl/1261
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 80 }
[ 2830, 3393, 868, 1155, 353, 8840, 836, 8, 341, 11009, 12419, 445, 46725, 2726, 30742, 1138, 2405, 264, 16, 284, 3567, 7, 868, 38669, 445, 64, 87873, 220, 16, 15, 701, 7870, 445, 65, 87873, 220, 17, 15, 5731, 2726, 38669, 445, 66, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestVerifyChallengeTxSigners_validWhenWebAuthDomainMissing(t *testing.T) { serverKP := newKeypair0() clientKP := newKeypair1() txSource := NewSimpleAccount(serverKP.Address(), -1) op := ManageData{ SourceAccount: clientKP.Address(), Name: "testanchor.stellar.org auth", Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), } tx64, err := newSignedTransaction( TransactionParams{ SourceAccount: &txSource, IncrementSequenceNum: true, Operations: []Operation{&op}, BaseFee: MinBaseFee, Timebounds: NewTimeout(1000), }, network.TestNetworkPassphrase, serverKP, clientKP, ) assert.NoError(t, err) wantSigners := []string{clientKP.Address()} signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) assert.ElementsMatch(t, wantSigners, signersFound) assert.NoError(t, err) }
explode_data.jsonl/20767
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 413 }
[ 2830, 3393, 32627, 62078, 31584, 7264, 388, 8337, 4498, 5981, 5087, 13636, 25080, 1155, 353, 8840, 836, 8, 341, 41057, 65036, 1669, 501, 6608, 1082, 1310, 15, 741, 25291, 65036, 1669, 501, 6608, 1082, 1310, 16, 741, 46237, 3608, 1669, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_NegativeCallSingleCommandWithSubcommand(t *testing.T) { out, err := test.ExecuteCommand(buildTestCmd(), "completion", "fail") if err != nil { assert.Error(t, err) } assert.NotContains(t, out, `Error: invalid argument "fail" for "releaser completion"`) assert.Contains(t, out, `Usage:`) assert.NotContains(t, out, `Available Commands:`) assert.Contains(t, out, `Flags:`) }
explode_data.jsonl/59261
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 144 }
[ 2830, 3393, 1604, 15060, 7220, 10888, 4062, 2354, 3136, 5631, 1155, 353, 8840, 836, 8, 341, 13967, 11, 1848, 1669, 1273, 13827, 4062, 43333, 2271, 15613, 1507, 330, 43312, 497, 330, 18403, 1138, 743, 1848, 961, 2092, 341, 197, 6948, 614...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCertificateLifecycle(t *testing.T) { client, err := clients.NewWafV1Client() th.AssertNoErr(t, err) name := tools.RandomString("test_cert-", 5) opts := certificates.CreateOpts{ Name: name, Content: testCert, Key: testKey, } created, err := certificates.Create(client, opts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, testCertExpiration, created.ExpireTime) defer func() { err := certificates.Delete(client, created.Id).ExtractErr() th.AssertNoErr(t, err) }() got, err := certificates.Get(client, created.Id).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, created.Name, got.Name) th.AssertEquals(t, created.ExpireTime, got.ExpireTime) updateOpts := certificates.UpdateOpts{Name: name + "_updated"} updated, err := certificates.Update(client, created.Id, updateOpts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, created.ExpireTime, updated.ExpireTime) th.AssertEquals(t, created.Id, updated.Id) th.AssertEquals(t, updateOpts.Name, updated.Name) pages, err := certificates.List(client, nil).AllPages() th.AssertNoErr(t, err) certs, err := certificates.ExtractCertificates(pages) th.AssertNoErr(t, err) if len(certs) == 0 { t.Errorf("no certificates in the list") } pages2, err := certificates.List(client, certificates.ListOpts{ Limit: -1, }).AllPages() th.AssertNoErr(t, err) certs2, err := certificates.ExtractCertificates(pages2) th.AssertNoErr(t, err) if len(certs2) == 0 { t.Errorf("no certificates in the list") } }
explode_data.jsonl/28483
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 600 }
[ 2830, 3393, 33202, 62731, 1155, 353, 8840, 836, 8, 341, 25291, 11, 1848, 1669, 8239, 7121, 54, 2577, 53, 16, 2959, 741, 70479, 11711, 2753, 7747, 1155, 11, 1848, 692, 11609, 1669, 7375, 26709, 703, 445, 1944, 37097, 73918, 220, 20, 69...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReadJSONFromMIME(t *testing.T) { // Read JSON from MIME by specifying non-JSON content type header := http.Header{} header.Add("MIME-Version", "1.0") header.Add("Content-Type", "multipart/related; boundary=0123456789") input := ioutil.NopCloser(strings.NewReader(`{"key":"foo","value":"bar"}`)) var body db.Body err := ReadJSONFromMIME(header, input, &body) assert.Error(t, err, "Can't read JSON from MIME by specifying non-JSON content type") assert.Contains(t, err.Error(), strconv.Itoa(http.StatusUnsupportedMediaType)) // Read valid JSON from MIME header = http.Header{} header.Add("MIME-Version", "1.0") header.Add("Content-Type", "application/json") input = ioutil.NopCloser(strings.NewReader(`{"key":"foo","value":"bar"}`)) err = ReadJSONFromMIME(header, input, &body) assert.NoError(t, err, "Should read valid JSON from MIME") assert.Equal(t, "foo", body["key"]) assert.Equal(t, "bar", body["value"]) // Read JSON from MIME with illegal JSON body content header = http.Header{} header.Add("MIME-Version", "1.0") header.Add("Content-Type", "application/json") input = ioutil.NopCloser(strings.NewReader(`"key":"foo","value":"bar"`)) err = ReadJSONFromMIME(header, input, &body) assert.Error(t, err, "Can't read JSON from MIME with illegal JSON body content") assert.Contains(t, err.Error(), strconv.Itoa(http.StatusBadRequest)) // Read JSON from MIME with gzip content encoding and illegal content type (application/json) header = http.Header{} header.Add("MIME-Version", "1.0") header.Add("Content-Type", "application/json") header.Add("Content-Encoding", "gzip") input = ioutil.NopCloser(strings.NewReader(`{"key":"foo","value":"bar"}`)) err = ReadJSONFromMIME(header, input, &body) assert.Error(t, err, "Can't read JSON from MIME with gzip content encoding and illegal content type") assert.Contains(t, err.Error(), "invalid header") // Read JSON from MIME with unsupported content encoding. header = http.Header{} header.Add("MIME-Version", "1.0") header.Add("Content-Encoding", "zip") input = ioutil.NopCloser(strings.NewReader(`{"key":"foo","value":"bar"}`)) err = ReadJSONFromMIME(header, input, &body) assert.Error(t, err, "Can't read JSON from MIME with unsupported content encoding") assert.Contains(t, err.Error(), strconv.Itoa(http.StatusUnsupportedMediaType)) // Read JSON from MIME with gzip content encoding. header = http.Header{} header.Add("MIME-Version", "1.0") header.Add("Content-Encoding", "gzip") var buffer bytes.Buffer gz := gzip.NewWriter(&buffer) _, err = gz.Write([]byte(`{"key":"foo","value":"bar"}`)) assert.NoError(t, err, "Writes a compressed form of bytes to the underlying io.Writer") assert.NoError(t, gz.Flush(), "Flushes any pending compressed data to the underlying writer") assert.NoError(t, gz.Close(), "Closes the Writer by flushing any unwritten data") input = ioutil.NopCloser(&buffer) err = ReadJSONFromMIME(header, input, &body) assert.NoError(t, err, "Should read JSON from MIME with gzip content encoding") assert.Equal(t, "foo", body["key"]) assert.Equal(t, "bar", body["value"]) }
explode_data.jsonl/56945
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1076 }
[ 2830, 3393, 4418, 5370, 3830, 44, 5660, 1155, 353, 8840, 836, 8, 341, 197, 322, 4457, 4718, 504, 57477, 553, 37838, 2477, 12, 5370, 2213, 943, 198, 20883, 1669, 1758, 15753, 16094, 20883, 1904, 445, 44, 5660, 83902, 497, 330, 16, 13, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSimpleQuery(t *testing.T) { db := openTestConn(t) defer db.Close() r, err := db.Query("select 1") if err != nil { t.Fatal(err) } defer r.Close() if !r.Next() { t.Fatal("expected row") } }
explode_data.jsonl/63432
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 96 }
[ 2830, 3393, 16374, 2859, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 1787, 2271, 9701, 1155, 340, 16867, 2927, 10421, 2822, 7000, 11, 1848, 1669, 2927, 15685, 445, 1742, 220, 16, 1138, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPessimisticTxn(t *testing.T) { store, clean := createMockStoreAndSetup(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") // Make the name has different indent for easier read. tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") tk.MustExec("drop table if exists pessimistic") tk.MustExec("create table pessimistic (k int, v int)") tk.MustExec("insert into pessimistic values (1, 1)") // t1 lock, t2 update, t1 update and retry statement. tk1.MustExec("begin pessimistic") tk.MustExec("update pessimistic set v = 2 where v = 1") // Update can see the change, so this statement affects 0 rows. tk1.MustExec("update pessimistic set v = 3 where v = 1") require.Equal(t, uint64(0), tk1.Session().AffectedRows()) require.Equal(t, 0, session.GetHistory(tk1.Session()).Count()) // select for update can see the change of another transaction. tk1.MustQuery("select * from pessimistic for update").Check(testkit.Rows("1 2")) // plain select can not see the change of another transaction. tk1.MustQuery("select * from pessimistic").Check(testkit.Rows("1 1")) tk1.MustExec("update pessimistic set v = 3 where v = 2") require.Equal(t, uint64(1), tk1.Session().AffectedRows()) // pessimistic lock doesn't block read operation of other transactions. tk.MustQuery("select * from pessimistic").Check(testkit.Rows("1 2")) tk1.MustExec("commit") tk1.MustQuery("select * from pessimistic").Check(testkit.Rows("1 3")) // t1 lock, t1 select for update, t2 wait t1. tk1.MustExec("begin pessimistic") tk1.MustExec("select * from pessimistic where k = 1 for update") finishCh := make(chan struct{}) go func() { tk.MustExec("update pessimistic set v = 5 where k = 1") finishCh <- struct{}{} }() time.Sleep(time.Millisecond * 10) tk1.MustExec("update pessimistic set v = 3 where k = 1") tk1.MustExec("commit") <-finishCh tk.MustQuery("select * from pessimistic").Check(testkit.Rows("1 5")) }
explode_data.jsonl/12450
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 652 }
[ 2830, 3393, 47, 66733, 4532, 31584, 77, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1855, 11571, 6093, 3036, 21821, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3553, 340, 3244, 74, 50...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_directGzip_perf(t *testing.T) { res := testing.Benchmark(Benchmark_directGzip) if httptestbench.RaceDetectorEnabled { assert.Less(t, res.Extra["B:rcvd/op"], 640.0) assert.Less(t, res.Extra["B:sent/op"], 104.0) assert.Less(t, res.AllocsPerOp(), int64(60)) assert.Less(t, res.AllocedBytesPerOp(), int64(9000)) } else { assert.Less(t, res.Extra["B:rcvd/op"], 640.0) assert.Less(t, res.Extra["B:sent/op"], 104.0) assert.Less(t, res.AllocsPerOp(), int64(45)) assert.Less(t, res.AllocedBytesPerOp(), int64(4000)) } }
explode_data.jsonl/25740
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 241 }
[ 2830, 3393, 32871, 38, 9964, 76776, 1155, 353, 8840, 836, 8, 341, 10202, 1669, 7497, 1785, 39381, 5349, 39381, 32871, 38, 9964, 692, 743, 54320, 70334, 27024, 2013, 578, 31606, 5462, 341, 197, 6948, 1214, 433, 1155, 11, 592, 5121, 2172,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestOptions(t *testing.T) { // TODO: test result srv := httptest.NewServer(http.HandlerFunc(HandleOptions)) defer srv.Close() url := "http://" + srv.Listener.Addr().String() resp, err := Options(url, nil, nil) if err != nil { t.Error(err) } assert.Equal(t, 200, resp.Status()) }
explode_data.jsonl/14811
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 118 }
[ 2830, 3393, 3798, 1155, 353, 8840, 836, 8, 341, 197, 322, 5343, 25, 1273, 1102, 198, 1903, 10553, 1669, 54320, 70334, 7121, 5475, 19886, 89164, 90832, 3798, 1171, 16867, 43578, 10421, 741, 19320, 1669, 330, 1254, 52136, 488, 43578, 64091,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRegisterTwice(t *testing.T) { cfg := DefaultConfig() cfg.RateLimitPeriod = 0 gd, err := startDispatcher(cfg) assert.NoError(t, err) defer gd.Close() var expectedSessionID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) msg, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, msg.SessionID) expectedSessionID = msg.SessionID stream.CloseSend() } { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) msg, err := stream.Recv() assert.NoError(t, err) // session should be different! assert.NotEqual(t, msg.SessionID, expectedSessionID) stream.CloseSend() } }
explode_data.jsonl/13841
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 283 }
[ 2830, 3393, 8690, 22816, 558, 1155, 353, 8840, 836, 8, 341, 50286, 1669, 7899, 2648, 741, 50286, 2013, 349, 16527, 23750, 284, 220, 15, 198, 3174, 67, 11, 1848, 1669, 1191, 21839, 28272, 340, 6948, 35699, 1155, 11, 1848, 340, 16867, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMutableTree_LazyLoadVersionWithEmptyTree(t *testing.T) { mdb := db.NewMemDB() tree, err := NewMutableTree(mdb, 1000) require.NoError(t, err) _, v1, err := tree.SaveVersion() require.NoError(t, err) newTree1, err := NewMutableTree(mdb, 1000) require.NoError(t, err) v2, err := newTree1.LazyLoadVersion(1) require.NoError(t, err) require.True(t, v1 == v2) newTree2, err := NewMutableTree(mdb, 1000) require.NoError(t, err) v2, err = newTree1.LoadVersion(1) require.NoError(t, err) require.True(t, v1 == v2) require.True(t, newTree1.root == newTree2.root) }
explode_data.jsonl/23783
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 248 }
[ 2830, 3393, 11217, 6533, 2351, 13619, 5879, 5637, 2354, 3522, 6533, 1155, 353, 8840, 836, 8, 341, 2109, 1999, 1669, 2927, 7121, 18816, 3506, 741, 51968, 11, 1848, 1669, 1532, 11217, 6533, 1255, 1999, 11, 220, 16, 15, 15, 15, 340, 1795...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAbsCollection_Nth(t *testing.T) { intColl := NewIntCollection([]int{1, 2, 3, 4, 5, 6}) ret := intColl.Nth(4, 1) if ret.Count() != 2 { t.Fatal("Nth 错误") } }
explode_data.jsonl/66443
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 84 }
[ 2830, 3393, 27778, 6482, 1604, 339, 1155, 353, 8840, 836, 8, 341, 2084, 15265, 1669, 1532, 1072, 6482, 10556, 396, 90, 16, 11, 220, 17, 11, 220, 18, 11, 220, 19, 11, 220, 20, 11, 220, 21, 3518, 11262, 1669, 526, 15265, 2067, 339, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLoopBackManager_overrideDevicesFromSetConfigWithSize(t *testing.T) { var mockexec = &mocks.GoMockExecutor{} var manager = NewLoopBackManager(mockexec, "", "", logger) testSN := "testSN" testNodeID := "testNode" testConfigPath := "/tmp/config.yaml" config := []byte("defaultDriveSize: 30Mi \ndefaultDrivePerNodeCount: 3\nnodes:\n" + fmt.Sprintf("- nodeID: %s\n", testNodeID) + fmt.Sprintf(" driveCount: %d\n", 5) + " drives:\n" + fmt.Sprintf(" - serialNumber: %s\n", testSN) + fmt.Sprintf(" size: %s\n", "40Mi")) err := ioutil.WriteFile(testConfigPath, config, 0777) assert.Nil(t, err) defer func() { _ = os.Remove(testConfigPath) }() manager.nodeName = testNodeID for _, device := range manager.devices { mockexec.On("RunCmd", fmt.Sprintf(detachLoopBackDeviceCmdTmpl, device.devicePath)). Return("", "", nil) mockexec.On("RunCmd", fmt.Sprintf(deleteFileCmdTmpl, device.fileName)). Return("", "", nil) } manager.readAndSetConfig(testConfigPath) manager.updateDevicesFromConfig() config = []byte("defaultDriveSize: 30Mi \ndefaultDrivePerNodeCount: 3\nnodes:\n" + fmt.Sprintf("- nodeID: %s\n", testNodeID) + fmt.Sprintf(" driveCount: %d\n", 5) + " drives:\n" + fmt.Sprintf(" - serialNumber: %s\n", testSN)) err = ioutil.WriteFile(testConfigPath, config, 0777) assert.Nil(t, err) for _, device := range manager.devices { if device.SerialNumber == testSN { device.devicePath = "/dev/sda" mockexec.On("RunCmd", fmt.Sprintf(detachLoopBackDeviceCmdTmpl, device.devicePath)). Return("", "", nil) mockexec.On("RunCmd", fmt.Sprintf(deleteFileCmdTmpl, device.fileName)). Return("", "", nil) } } manager.readAndSetConfig(testConfigPath) manager.updateDevicesFromConfig() assert.Nil(t, err) // resizing is not supported for _, device := range manager.devices { if device.SerialNumber == testSN { assert.Equal(t, device.Size, "40Mi") } } }
explode_data.jsonl/73566
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 787 }
[ 2830, 3393, 14620, 3707, 2043, 48576, 40835, 3830, 1649, 2648, 2354, 1695, 1155, 353, 8840, 836, 8, 341, 2405, 7860, 11748, 284, 609, 16712, 82, 67131, 11571, 25255, 16094, 2405, 6645, 284, 1532, 14620, 3707, 2043, 30389, 11748, 11, 7342,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) { const key1 = "" const key2 = "x" m := make(map[string]int) m[key1] = 99 for k := range m { delete(m, k) } m[key2]++ if n2 := m[key2]; n2 != 1 { t.Errorf("incremented 0 to %d", n2) } }
explode_data.jsonl/19933
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 124 }
[ 2830, 3393, 38311, 6025, 88194, 14008, 1592, 82696, 1072, 1155, 353, 8840, 836, 8, 341, 4777, 1376, 16, 284, 8389, 4777, 1376, 17, 284, 330, 87, 1837, 2109, 1669, 1281, 9147, 14032, 63025, 340, 2109, 8157, 16, 60, 284, 220, 24, 24, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRunParallel(t *testing.T) { testing.Benchmark(func(b *testing.B) { procs := uint32(0) iters := uint64(0) b.SetParallelism(3) b.RunParallel(func(pb *testing.PB) { atomic.AddUint32(&procs, 1) for pb.Next() { atomic.AddUint64(&iters, 1) } }) if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want { t.Errorf("got %v procs, want %v", procs, want) } if iters != uint64(b.N) { t.Errorf("got %v iters, want %v", iters, b.N) } }) }
explode_data.jsonl/33937
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 239 }
[ 2830, 3393, 6727, 16547, 1155, 353, 8840, 836, 8, 341, 197, 8840, 1785, 39381, 18552, 1883, 353, 8840, 1785, 8, 341, 197, 197, 90087, 1669, 2622, 18, 17, 7, 15, 340, 197, 23374, 388, 1669, 2622, 21, 19, 7, 15, 340, 197, 2233, 4202...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestContext2Plan_ignoreChanges(t *testing.T) { m := testModule(t, "plan-ignore-changes") p := testProvider("aws") p.DiffFn = testDiffFn s := MustShimLegacyState(&State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{"ami": "ami-abcd1234"}, }, }, }, }, }, }) ctx := testContext2(t, &ContextOpts{ Config: m, ProviderResolver: providers.ResolverFixed( map[string]providers.Factory{ "aws": testProviderFuncFixed(p), }, ), Variables: InputValues{ "foo": &InputValue{ Value: cty.StringVal("ami-1234abcd"), SourceType: ValueFromCaller, }, }, State: s, }) plan, diags := ctx.Plan() if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) } res := plan.Changes.Resources[0] ric, err := res.Decode(ty) if err != nil { t.Fatal(err) } if res.Action != plans.Update { t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action) } if ric.Addr.String() != "aws_instance.foo" { t.Fatalf("unexpected resource: %s", ric.Addr) } checkVals(t, objectVal(t, schema, map[string]cty.Value{ "id": cty.StringVal("bar"), "ami": cty.StringVal("ami-abcd1234"), "type": cty.StringVal("aws_instance"), }), ric.After) }
explode_data.jsonl/28713
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 715 }
[ 2830, 3393, 1972, 17, 20485, 58493, 11317, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 1273, 3332, 1155, 11, 330, 10393, 43171, 11582, 5520, 1138, 3223, 1669, 1273, 5179, 445, 8635, 5130, 3223, 98063, 24911, 284, 1273, 21751, 24911, 271, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestEntGQL_buildTypes_todoplugin_relay(t *testing.T) { graph, err := entc.LoadGraph("./internal/todoplugin/ent/schema", &gen.Config{}) require.NoError(t, err) plugin, err := newSchemaGenerator(graph) require.NoError(t, err) types, err := plugin.buildTypes() require.NoError(t, err) require.Equal(t, `type Category implements Node & Entity { id: ID! text: String! uuidA: UUID status: CategoryStatus! config: CategoryConfig! duration: Duration! count: Uint64! @deprecated(reason: "We don't use this field anymore") strings: [String!] } """ A connection to a list of items. """ type CategoryConnection { """ A list of edges. """ edges: [CategoryEdge] """ Information to aid in pagination. """ pageInfo: PageInfo! totalCount: Int! } """ An edge in a connection. """ type CategoryEdge { """ The item at the end of the edge. """ node: Category """ A cursor for use in pagination. """ cursor: Cursor! } input CategoryOrder { direction: OrderDirection! = ASC field: CategoryOrderField! } enum CategoryOrderField { TEXT DURATION } """ CategoryStatus is enum for the field status """ enum CategoryStatus @goModel(model: "entgo.io/contrib/entgql/internal/todoplugin/ent/category.Status") { ENABLED DISABLED } type MasterUser implements Node @goModel(model: "entgo.io/contrib/entgql/internal/todoplugin/ent.User") { id: ID! username: String! age: Float! amount: Float! role: Role! nullableString: String } """ A connection to a list of items. """ type MasterUserConnection { """ A list of edges. """ edges: [MasterUserEdge] """ Information to aid in pagination. """ pageInfo: PageInfo! totalCount: Int! } """ An edge in a connection. """ type MasterUserEdge { """ The item at the end of the edge. """ node: MasterUser """ A cursor for use in pagination. """ cursor: Cursor! } """ Role is enum for the field role """ enum Role @goModel(model: "entgo.io/contrib/entgql/internal/todoplugin/ent/role.Role") { ADMIN USER UNKNOWN } """ Status is enum for the field status """ enum Status @goModel(model: "entgo.io/contrib/entgql/internal/todoplugin/ent/todo.Status") { IN_PROGRESS COMPLETED } type Todo implements Node { id: ID! createdAt: Time! visibilityStatus: VisibilityStatus! status: Status! priority: Int! text: String! } """ A connection to a list of items. """ type TodoConnection { """ A list of edges. """ edges: [TodoEdge] """ Information to aid in pagination. """ pageInfo: PageInfo! totalCount: Int! } """ An edge in a connection. """ type TodoEdge { """ The item at the end of the edge. """ node: Todo """ A cursor for use in pagination. """ cursor: Cursor! } input TodoOrder { direction: OrderDirection! = ASC field: TodoOrderField! } enum TodoOrderField { CREATED_AT VISIBILITY_STATUS STATUS PRIORITY TEXT } """ VisibilityStatus is enum for the field visibility_status """ enum VisibilityStatus @goModel(model: "entgo.io/contrib/entgql/internal/todoplugin/ent/todo.VisibilityStatus") { LISTING HIDDEN } `, printSchema(&ast.Schema{ Types: types, })) }
explode_data.jsonl/63029
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1132 }
[ 2830, 3393, 2250, 38, 3588, 20801, 4173, 528, 347, 55078, 3631, 1288, 6651, 1155, 353, 8840, 836, 8, 341, 66616, 11, 1848, 1669, 1197, 66, 13969, 11212, 13988, 10481, 5523, 347, 55078, 3631, 14, 306, 61142, 497, 609, 4370, 10753, 37790,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRollingUpdater_cleanupWithClients(t *testing.T) { rc := oldRc(2, 2) rcExisting := newRc(1, 3) tests := []struct { name string policy RollingUpdaterCleanupPolicy responses []runtime.Object expected []string }{ { name: "preserve", policy: PreserveRollingUpdateCleanupPolicy, responses: []runtime.Object{rcExisting}, expected: []string{ "get", "update", "get", "get", }, }, { name: "delete", policy: DeleteRollingUpdateCleanupPolicy, responses: []runtime.Object{rcExisting}, expected: []string{ "get", "update", "get", "get", "delete", }, }, { name: "rename", policy: RenameRollingUpdateCleanupPolicy, responses: []runtime.Object{rcExisting}, expected: []string{ "get", "update", "get", "get", "delete", "create", "delete", }, }, } for _, test := range tests { fake := testclient.NewSimpleFake(test.responses...) updater := &RollingUpdater{ ns: "default", c: fake, } config := &RollingUpdaterConfig{ Out: ioutil.Discard, OldRc: rc, NewRc: rcExisting, UpdatePeriod: 0, Interval: time.Millisecond, Timeout: time.Millisecond, CleanupPolicy: test.policy, } err := updater.cleanupWithClients(rc, rcExisting, config) if err != nil { t.Errorf("unexpected error: %v", err) } if len(fake.Actions()) != len(test.expected) { t.Fatalf("%s: unexpected actions: %v, expected %v", test.name, fake.Actions(), test.expected) } for j, action := range fake.Actions() { if e, a := test.expected[j], action.GetVerb(); e != a { t.Errorf("%s: unexpected action: expected %s, got %s", test.name, e, a) } } } }
explode_data.jsonl/52534
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 835 }
[ 2830, 3393, 32355, 287, 79854, 42444, 2354, 47174, 1155, 353, 8840, 836, 8, 341, 30295, 1669, 2310, 49, 66, 7, 17, 11, 220, 17, 340, 30295, 53067, 1669, 501, 49, 66, 7, 16, 11, 220, 18, 692, 78216, 1669, 3056, 1235, 341, 197, 1160...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestServerHTTP10ConnectionKeepAlive(t *testing.T) { ln := fasthttputil.NewInmemoryListener() ch := make(chan struct{}) go func() { err := Serve(ln, func(ctx *RequestCtx) { if string(ctx.Path()) == "/close" { ctx.SetConnectionClose() } }) if err != nil { t.Fatalf("unexpected error: %s", err) } close(ch) }() conn, err := ln.Dial() if err != nil { t.Fatalf("unexpected error: %s", err) } _, err = fmt.Fprintf(conn, "%s", "GET / HTTP/1.0\r\nHost: aaa\r\nConnection: keep-alive\r\n\r\n") if err != nil { t.Fatalf("error when writing request: %s", err) } _, err = fmt.Fprintf(conn, "%s", "GET /close HTTP/1.0\r\nHost: aaa\r\nConnection: keep-alive\r\n\r\n") if err != nil { t.Fatalf("error when writing request: %s", err) } br := bufio.NewReader(conn) var resp Response if err = resp.Read(br); err != nil { t.Fatalf("error when reading response: %s", err) } if resp.ConnectionClose() { t.Fatalf("response mustn't have 'Connection: close' header") } if err = resp.Read(br); err != nil { t.Fatalf("error when reading response: %s", err) } if !resp.ConnectionClose() { t.Fatalf("response must have 'Connection: close' header") } tailCh := make(chan struct{}) go func() { tail, err := ioutil.ReadAll(br) if err != nil { t.Fatalf("error when reading tail: %s", err) } if len(tail) > 0 { t.Fatalf("unexpected non-zero tail %q", tail) } close(tailCh) }() select { case <-tailCh: case <-time.After(time.Second): t.Fatalf("timeout when reading tail") } if err = conn.Close(); err != nil { t.Fatalf("error when closing the connection: %s", err) } if err = ln.Close(); err != nil { t.Fatalf("error when closing listener: %s", err) } select { case <-ch: case <-time.After(time.Second): t.Fatalf("timeout when waiting for the server to stop") } }
explode_data.jsonl/73282
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 757 }
[ 2830, 3393, 5475, 9230, 16, 15, 4526, 19434, 32637, 1155, 353, 8840, 836, 8, 341, 197, 2261, 1669, 4937, 96336, 628, 321, 7121, 641, 17269, 2743, 2822, 23049, 1669, 1281, 35190, 2036, 37790, 30680, 2915, 368, 341, 197, 9859, 1669, 52932...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestScryptRoundTrip(t *testing.T) { password := "twitch.tv/filosottile" r, err := age.NewScryptRecipient(password) if err != nil { t.Fatal(err) } r.SetWorkFactor(15) i, err := age.NewScryptIdentity(password) if err != nil { t.Fatal(err) } if r.Type() != i.Type() || r.Type() != "scrypt" { t.Errorf("invalid Type values: %v, %v", r.Type(), i.Type()) } fileKey := make([]byte, 16) if _, err := rand.Read(fileKey); err != nil { t.Fatal(err) } block, err := r.Wrap(fileKey) if err != nil { t.Fatal(err) } b := &bytes.Buffer{} block.Marshal(b) t.Logf("%s", b.Bytes()) out, err := i.Unwrap(block) if err != nil { t.Fatal(err) } if !bytes.Equal(fileKey, out) { t.Errorf("invalid output: %x, expected %x", out, fileKey) } }
explode_data.jsonl/58873
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 351 }
[ 2830, 3393, 3326, 3571, 27497, 56352, 1155, 353, 8840, 836, 8, 341, 58199, 1669, 330, 83, 5539, 14485, 6663, 89319, 1716, 457, 1837, 7000, 11, 1848, 1669, 4231, 7121, 3326, 3571, 74432, 22768, 340, 743, 1848, 961, 2092, 341, 197, 3244, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestUnion(t *testing.T) { var input []string var output []struct { Best string Err bool } planSuiteUnexportedData.GetTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.TODO() for i, tt := range input { comment := fmt.Sprintf("case:%v sql:%s", i, tt) stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) err = Preprocess(s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) sctx := MockContext() builder, _ := NewPlanBuilder().Init(sctx, s.is, &hint.BlockHintProcessor{}) domain.GetDomain(sctx).MockInfoCacheAndLoadInfoSchema(s.is) plan, err := builder.Build(ctx, stmt) testdata.OnRecord(func() { output[i].Err = err != nil }) if output[i].Err { require.Error(t, err) continue } require.NoError(t, err) p := plan.(LogicalPlan) p, err = logicalOptimize(ctx, builder.optFlag, p) testdata.OnRecord(func() { output[i].Best = ToString(p) }) require.NoError(t, err) require.Equal(t, output[i].Best, ToString(p), comment) } }
explode_data.jsonl/50224
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 457 }
[ 2830, 3393, 32658, 1155, 353, 8840, 836, 8, 341, 2405, 1946, 3056, 917, 198, 2405, 2550, 3056, 1235, 341, 197, 12791, 477, 914, 198, 197, 197, 7747, 220, 1807, 198, 197, 532, 197, 10393, 28000, 1806, 1533, 291, 1043, 2234, 2271, 37302...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildPassthroughClusters(t *testing.T) { cases := []struct { name string ips []string ipv4Expected bool ipv6Expected bool }{ { name: "both ipv4 and ipv6", ips: []string{"6.6.6.6", "::1"}, ipv4Expected: true, ipv6Expected: true, }, { name: "ipv4 only", ips: []string{"6.6.6.6"}, ipv4Expected: true, ipv6Expected: false, }, { name: "ipv6 only", ips: []string{"::1"}, ipv4Expected: false, ipv6Expected: true, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { proxy := &model.Proxy{IPAddresses: tt.ips} cg := NewConfigGenTest(t, TestOptions{}) cb := NewClusterBuilder(cg.SetupProxy(proxy), cg.PushContext()) clusters := cb.buildInboundPassthroughClusters() var hasIpv4, hasIpv6 bool for _, c := range clusters { hasIpv4 = hasIpv4 || c.Name == util.InboundPassthroughClusterIpv4 hasIpv6 = hasIpv6 || c.Name == util.InboundPassthroughClusterIpv6 } if hasIpv4 != tt.ipv4Expected { t.Errorf("Unexpected Ipv4 Passthrough Cluster, want %v got %v", tt.ipv4Expected, hasIpv4) } if hasIpv6 != tt.ipv6Expected { t.Errorf("Unexpected Ipv6 Passthrough Cluster, want %v got %v", tt.ipv6Expected, hasIpv6) } passthrough := xdstest.ExtractCluster(util.InboundPassthroughClusterIpv4, clusters) if passthrough == nil { passthrough = xdstest.ExtractCluster(util.InboundPassthroughClusterIpv6, clusters) } // Validate that Passthrough Cluster LB Policy is set correctly. if passthrough.GetType() != cluster.Cluster_ORIGINAL_DST || passthrough.GetLbPolicy() != cluster.Cluster_CLUSTER_PROVIDED { t.Errorf("Unexpected Discovery type or Lb policy, got Discovery type: %v, Lb Policy: %v", passthrough.GetType(), passthrough.GetLbPolicy()) } }) } }
explode_data.jsonl/44339
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 884 }
[ 2830, 3393, 11066, 70911, 86901, 94992, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 11609, 260, 914, 198, 197, 197, 3077, 688, 3056, 917, 198, 197, 197, 42676, 19, 18896, 1807, 198, 197, 197, 42676, 21, 18896...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9