text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestCaptivePrepareRangeCloseNotFullyTerminated(t *testing.T) { metaChan := make(chan metaResult, 100) for i := 64; i <= 100; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx, cancel := context.WithCancel(context.Background()) mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Twice() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil) mockRunner.On("getProcessExitError").Return(true, nil) mockRunner.On("getProcessExitError").Return(false, nil) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.NoError(t, err) // Simulates a long (but graceful) shutdown... cancel() err = captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.NoError(t, err) mockRunner.AssertExpectations(t) mockArchive.AssertExpectations(t) }
explode_data.jsonl/7314
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 509 }
[ 2830, 3393, 34, 27781, 50590, 6046, 7925, 2623, 67386, 21209, 51199, 1155, 353, 8840, 836, 8, 341, 84004, 46019, 1669, 1281, 35190, 8823, 2077, 11, 220, 16, 15, 15, 340, 2023, 600, 1669, 220, 21, 19, 26, 600, 2651, 220, 16, 15, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCITests(t *testing.T) { t.Parallel() //nolint tests := []struct { name string want checker.CheckResult status string wantErr bool commit []clients.Commit r []clients.CheckRun expected scut.TestReturn }{ { name: "success", expected: scut.TestReturn{ NumberOfDebug: 1, }, commit: []clients.Commit{ { SHA: "sha", AssociatedMergeRequest: clients.PullRequest{ HeadSHA: "sha", Number: 1, Labels: []clients.Label{ { Name: "label", }, }, MergedAt: time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC), }, }, }, }, { name: "commit 0", expected: scut.TestReturn{ Score: -1, }, commit: []clients.Commit{ { SHA: "sha", AssociatedMergeRequest: clients.PullRequest{ HeadSHA: "sha", Number: 1, Labels: []clients.Label{ { Name: "label", }, }, }, }, }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mockRepoClient := mockrepo.NewMockRepoClient(ctrl) mockRepoClient.EXPECT().ListCommits().Return(tt.commit, nil) mockRepoClient.EXPECT().ListStatuses(gomock.Any()).DoAndReturn( func(sha string) ([]clients.Status, error) { if tt.wantErr { //nolint return nil, errors.New("error") } return []clients.Status{ { State: tt.status, Context: "buildkite", }, }, nil }).AnyTimes() mockRepoClient.EXPECT().ListCheckRunsForRef(gomock.Any()).DoAndReturn( func(sha string) ([]clients.CheckRun, error) { return tt.r, nil }).AnyTimes() dl := scut.TestDetailLogger{} c := checker.CheckRequest{ RepoClient: mockRepoClient, Dlogger: &dl, } r := CITests(&c) if !scut.ValidateTestReturn(t, tt.name, &tt.expected, &r, &dl) { t.Fail() } }) } }
explode_data.jsonl/77210
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1017 }
[ 2830, 3393, 34, 952, 17966, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 197, 322, 77, 337, 396, 198, 78216, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 50780, 257, 40915, 10600, 2077, 198, 197, 23847, 256, 914, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestServerErrServerStopped(t *testing.T) { srv := NewServer(RedisClientOpt{Addr: ":6379"}, Config{LogLevel: testLogLevel}) handler := NewServeMux() if err := srv.Start(handler); err != nil { t.Fatal(err) } srv.Stop() err := srv.Start(handler) if err != ErrServerStopped { t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerStopped error", err) } }
explode_data.jsonl/81844
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 151 }
[ 2830, 3393, 5475, 7747, 5475, 59803, 1155, 353, 8840, 836, 8, 341, 1903, 10553, 1669, 1532, 5475, 2785, 41825, 2959, 21367, 90, 13986, 25, 13022, 21, 18, 22, 24, 14345, 5532, 90, 72676, 25, 1273, 72676, 3518, 53326, 1669, 1532, 60421, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestVersionedCheckpointsSpecialCase4(t *testing.T) { tree := NewMutableTree(db.NewMemDB(), 0) tree.Set([]byte("U"), []byte("XamDUtiJ")) tree.Set([]byte("A"), []byte("UkZBuYIU")) tree.Set([]byte("H"), []byte("7a9En4uw")) tree.Set([]byte("V"), []byte("5HXU3pSI")) tree.SaveVersion() tree.Remove([]byte("U")) tree.Remove([]byte("A")) tree.SaveVersion() tree.Set([]byte("X"), []byte("New")) tree.SaveVersion() _, val := tree.GetVersioned([]byte("A"), 2) require.Nil(t, val) _, val = tree.GetVersioned([]byte("A"), 1) require.NotEmpty(t, val) tree.DeleteVersion(1) tree.DeleteVersion(2) _, val = tree.GetVersioned([]byte("A"), 2) require.Nil(t, val) _, val = tree.GetVersioned([]byte("A"), 1) require.Nil(t, val) }
explode_data.jsonl/25130
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 318 }
[ 2830, 3393, 5637, 291, 3973, 7706, 20366, 4207, 19, 1155, 353, 8840, 836, 8, 341, 51968, 1669, 1532, 11217, 6533, 9791, 7121, 18816, 3506, 1507, 220, 15, 692, 51968, 4202, 10556, 3782, 445, 52, 3975, 3056, 3782, 445, 55, 309, 21547, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMutatorDestroy(t *testing.T) { defaultCtx := testutil.NewContext( testutil.ContextWithNamespace("default"), ) testCases := []struct { name string ctx context.Context mutator string fetchResult *types.Mutator fetchErr error deleteErr error expectedErr bool expectedErrCode ErrCode }{ { name: "Deleted", ctx: defaultCtx, mutator: "mutator1", fetchResult: types.FixtureMutator("mutator1"), expectedErr: false, }, { name: "Does Not Exist", ctx: defaultCtx, mutator: "mutator1", fetchResult: nil, expectedErr: true, expectedErrCode: NotFound, }, { name: "store Err on Delete", ctx: defaultCtx, mutator: "mutator1", fetchResult: types.FixtureMutator("mutator1"), deleteErr: errors.New("dunno"), expectedErr: true, expectedErrCode: InternalErr, }, { name: "store Err on Fetch", ctx: defaultCtx, mutator: "mutator1", fetchResult: types.FixtureMutator("mutator1"), fetchErr: errors.New("dunno"), expectedErr: true, expectedErrCode: InternalErr, }, } for _, tc := range testCases { store := &mockstore.MockStore{} actions := NewMutatorController(store) t.Run(tc.name, func(t *testing.T) { assert := assert.New(t) // Mock store methods store. On("GetMutatorByName", mock.Anything, mock.Anything). Return(tc.fetchResult, tc.fetchErr) store. On("DeleteMutatorByName", mock.Anything, "mutator1"). Return(tc.deleteErr) // Exec Query err := actions.Destroy(tc.ctx, tc.mutator) if tc.expectedErr { inferErr, ok := err.(Error) if ok { assert.Equal(tc.expectedErrCode, inferErr.Code) } else { assert.Error(err) assert.FailNow("Given was not of type 'Error'") } } else { assert.NoError(err) } }) } }
explode_data.jsonl/12868
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 993 }
[ 2830, 3393, 51440, 850, 14245, 1155, 353, 8840, 836, 8, 341, 11940, 23684, 1669, 1273, 1314, 7121, 1972, 1006, 197, 18185, 1314, 9328, 2354, 22699, 445, 2258, 4461, 197, 692, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 310, 914, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDriverName(t *testing.T) { denyAllDevmapper() defer denyAllDevmapper() oldInit := fakeInit() defer restoreInit(oldInit) d := newDriver(t) if d.String() != "devicemapper" { t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) } }
explode_data.jsonl/45477
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 104 }
[ 2830, 3393, 11349, 675, 1155, 353, 8840, 836, 8, 341, 2698, 32395, 2403, 1912, 7338, 3106, 741, 16867, 23101, 2403, 1912, 7338, 3106, 2822, 61828, 3803, 1669, 12418, 3803, 741, 16867, 14952, 3803, 21972, 3803, 692, 2698, 1669, 501, 11349,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnmarshalInt(t *testing.T) { tests := map[string]struct { input []byte output int expectedError error }{ "0": {[]byte("i:0;"), 0, nil}, "5": {[]byte("i:5;"), 5, nil}, "-8": {[]byte("i:-8;"), -8, nil}, "1000000": {[]byte("i:1000000;"), 1000000, nil}, "not an integer": {[]byte("N;"), 0, errors.New("not an integer")}, } for testName, test := range tests { t.Run(testName, func(t *testing.T) { t.Run("int", func(t *testing.T) { var result int err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != test.output { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("int8", func(t *testing.T) { var result int8 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != int8(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("int16", func(t *testing.T) { var result int16 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != int16(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("int32", func(t *testing.T) { var result int32 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != int32(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("int64", func(t *testing.T) { var result int64 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != int64(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("uint8", func(t *testing.T) { var result uint8 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != uint8(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("uint16", func(t *testing.T) { var result uint16 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != uint16(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("uint32", func(t *testing.T) { var result uint32 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != uint32(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) t.Run("uint64", func(t *testing.T) { var result uint64 err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if result != uint64(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) }) } }
explode_data.jsonl/27017
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1867 }
[ 2830, 3393, 1806, 27121, 1072, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 2415, 14032, 60, 1235, 341, 197, 22427, 260, 3056, 3782, 198, 197, 21170, 286, 526, 198, 197, 42400, 1454, 1465, 198, 197, 59403, 197, 197, 1, 15, 788, 1060, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetReadCASTemplate(t *testing.T) { sc := &v1_storage.StorageClass{} sc.Annotations = make(map[string]string) tests := map[string]struct { scReadCASAnnotation string scCASTypeAnnotation string envJivaCAST string envCStorCAST string expectedCAST string }{ "CAST annotation is present": { "cast-read-from-annotation", "", "", "", "cast-read-from-annotation", }, "CAST annotation is absent/empty and cas type is cstor": { "", "cstor", "", "cast-cstor-read-from-env", "cast-cstor-read-from-env", }, "CAST annotation is absent/empty and cas type is jiva": { "", "jiva", "cast-jiva-read-from-env", "", "cast-jiva-read-from-env", }, "CAST annotation is absent/empty and cas type unknown": { "", "unknown", "cast-jiva-read-from-env", "cast-cstor-read-from-env", "", }, } defer func() { os.Unsetenv(string(menv.CASTemplateToReadCStorSnapshotENVK)) os.Unsetenv(string(menv.CASTemplateToReadJivaSnapshotENVK)) }() for name, test := range tests { t.Run(name, func(t *testing.T) { sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotRead)] = test.scReadCASAnnotation sc.Annotations[string(v1alpha1.CASTypeKey)] = test.scCASTypeAnnotation os.Setenv(string(menv.CASTemplateToReadCStorSnapshotENVK), test.envCStorCAST) os.Setenv(string(menv.CASTemplateToReadJivaSnapshotENVK), test.envJivaCAST) castName := getReadCASTemplate(sc) if castName != test.expectedCAST { t.Fatalf("unexpected cast name, wanted %q got %q", test.expectedCAST, castName) } }) } }
explode_data.jsonl/562
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 705 }
[ 2830, 3393, 1949, 4418, 34163, 3708, 1155, 353, 8840, 836, 8, 341, 29928, 1669, 609, 85, 16, 23310, 43771, 1957, 16094, 29928, 91172, 284, 1281, 9147, 14032, 30953, 340, 78216, 1669, 2415, 14032, 60, 1235, 341, 197, 29928, 4418, 87516, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTimeout(t *testing.T) { configureNotifier(t) var wg sync.WaitGroup defer afterTest() var eventsData moira.NotificationEvents = []moira.NotificationEvent{event} // Configure events with long sending time pkg := NotificationPackage{ Events: eventsData, Contact: moira.ContactData{ Type: "test", }, } sender.EXPECT().SendEvents(eventsData, pkg.Contact, pkg.Trigger, plots, pkg.Throttled).Return(nil).Do(func(arg0, arg1, arg2, arg3, arg4 interface{}) { fmt.Print("Trying to send for 10 second") time.Sleep(time.Second * 10) }).Times(maxParallelSendsPerSender) for i := 0; i < maxParallelSendsPerSender; i++ { notif.Send(&pkg, &wg) wg.Wait() } // Configure timeouted event notification := moira.ScheduledNotification{} pkg2 := NotificationPackage{ Events: eventsData, Contact: moira.ContactData{ Type: "test", Value: "fail contact", }, } scheduler.EXPECT().ScheduleNotification(gomock.Any(), event, pkg2.Trigger, pkg2.Contact, pkg.Plotting, pkg2.Throttled, pkg2.FailCount+1, gomock.Any()).Return(&notification) dataBase.EXPECT().AddNotification(&notification).Return(nil).Do(func(f ...interface{}) { close(shutdown) }) notif.Send(&pkg2, &wg) wg.Wait() waitTestEnd() }
explode_data.jsonl/29090
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 470 }
[ 2830, 3393, 7636, 1155, 353, 8840, 836, 8, 341, 197, 21002, 64729, 1155, 340, 2405, 63581, 12811, 28384, 2808, 198, 16867, 1283, 2271, 2822, 2405, 4357, 1043, 4544, 8832, 49329, 7900, 284, 3056, 6355, 8832, 49329, 1556, 90, 3087, 630, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMSSNotDelayed(t *testing.T) { tests := []struct { name string fn func(tcpip.Endpoint) }{ {"no-op", func(tcpip.Endpoint) {}}, {"delay", func(ep tcpip.Endpoint) { ep.SocketOptions().SetDelayOption(true) }}, {"cork", func(ep tcpip.Endpoint) { ep.SocketOptions().SetCorkOption(true) }}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { const maxPayload = 100 c := context.New(t, defaultMTU) defer c.Cleanup() c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */, []byte{ header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256), }) test.fn(c.EP) allData := [][]byte{{0}, make([]byte, maxPayload), make([]byte, maxPayload)} for i, data := range allData { var r bytes.Reader r.Reset(data) if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil { t.Fatalf("Write #%d failed: %s", i+1, err) } } seq := c.IRS.Add(1) iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1) for i, data := range allData { // Check that data is received. packet := c.GetPacket() checker.IPv4(t, packet, checker.PayloadLen(len(data)+header.TCPMinimumSize), checker.TCP( checker.DstPort(context.TestPort), checker.TCPSeqNum(uint32(seq)), checker.TCPAckNum(uint32(iss)), checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh), ), ) if got, want := packet[header.IPv4MinimumSize+header.TCPMinimumSize:], data; !bytes.Equal(got, want) { t.Fatalf("got packet #%d's data = %v, want = %v", i+1, got, want) } seq = seq.Add(seqnum.Size(len(data))) } // Acknowledge the data. c.SendPacket(nil, &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck, SeqNum: iss, AckNum: seq, RcvWnd: 30000, }) }) } }
explode_data.jsonl/75966
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 880 }
[ 2830, 3393, 44, 1220, 2623, 57361, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 40095, 256, 2915, 98203, 573, 90409, 340, 197, 59403, 197, 197, 4913, 2152, 29492, 497, 2915, 98203, 573, 90409, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_Fault(t *testing.T) { t.Skip() ori := make([]byte, 10) buf := FromBytes(ori) buf.OpenRW() // buf.Open() bs := buf.Bytes() bs[0] = 0x55 bs[9] = 0xff buf.Close() buf.Open() defer buf.Close() t.Log(hex.EncodeToString(buf.Bytes())) }
explode_data.jsonl/49449
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 120 }
[ 2830, 3393, 1400, 945, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 741, 197, 13601, 1669, 1281, 10556, 3782, 11, 220, 16, 15, 340, 26398, 1669, 5542, 7078, 7, 13601, 340, 26398, 12953, 56368, 741, 197, 322, 6607, 12953, 741, 93801, 166...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOperation_UserLogoutHandler(t *testing.T) { t.Run("logs out user", func(t *testing.T) { o, err := New(config(t)) require.NoError(t, err) o.store.cookies = &cookie.MockStore{ Jar: &cookie.MockJar{ Cookies: map[interface{}]interface{}{ userSubCookieName: uuid.New().String(), }, }, } result := httptest.NewRecorder() o.userLogoutHandler(result, newUserLogoutRequest()) require.Equal(t, http.StatusOK, result.Code) }) t.Run("err badrequest if cannot open cookies", func(t *testing.T) { o, err := New(config(t)) require.NoError(t, err) o.store.cookies = &cookie.MockStore{ OpenErr: errors.New("test"), } result := httptest.NewRecorder() o.userLogoutHandler(result, newUserLogoutRequest()) require.Equal(t, http.StatusBadRequest, result.Code) require.Contains(t, result.Body.String(), "cannot open cookies") }) t.Run("no-op if user sub cookie is not found", func(t *testing.T) { o, err := New(config(t)) require.NoError(t, err) result := httptest.NewRecorder() o.userLogoutHandler(result, newUserLogoutRequest()) require.Equal(t, http.StatusOK, result.Code) }) t.Run("err internal server error if cannot delete cookie", func(t *testing.T) { o, err := New(config(t)) require.NoError(t, err) o.store.cookies = &cookie.MockStore{ Jar: &cookie.MockJar{ Cookies: map[interface{}]interface{}{ userSubCookieName: uuid.New().String(), }, SaveErr: errors.New("test"), }, } result := httptest.NewRecorder() o.userLogoutHandler(result, newUserLogoutRequest()) require.Equal(t, http.StatusInternalServerError, result.Code) require.Contains(t, result.Body.String(), "failed to delete user sub cookie") }) }
explode_data.jsonl/68493
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 684 }
[ 2830, 3393, 8432, 31339, 27958, 3050, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 22081, 700, 1196, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 22229, 11, 1848, 1669, 1532, 8754, 1155, 1171, 197, 17957, 35699, 1155, 11, 1848, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateS3BucketLifecycleTimestamp(t *testing.T) { validDates := []string{ "2016-01-01", "2006-01-02", } for _, v := range validDates { _, errors := validateS3BucketLifecycleTimestamp(v, "date") if len(errors) != 0 { t.Fatalf("%q should be valid date: %q", v, errors) } } invalidDates := []string{ "Jan 01 2016", "20160101", } for _, v := range invalidDates { _, errors := validateS3BucketLifecycleTimestamp(v, "date") if len(errors) == 0 { t.Fatalf("%q should be invalid date", v) } } }
explode_data.jsonl/78572
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 231 }
[ 2830, 3393, 17926, 50, 18, 36018, 62731, 20812, 1155, 353, 8840, 836, 8, 341, 56322, 55238, 1669, 3056, 917, 515, 197, 197, 1, 17, 15, 16, 21, 12, 15, 16, 12, 15, 16, 756, 197, 197, 1, 17, 15, 15, 21, 12, 15, 16, 12, 15, 17,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func Test_plusOne(t *testing.T) { tests := []struct { digits []int want []int }{ { digits: []int{, 9}, want: []int{1, 0, 0}, }, } for _, tt := range tests { t.Run("", func(t *testing.T) { if got := plusOne(tt.digits); !reflect.DeepEqual(got, tt.want) { t.Errorf("plusOne() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/72888
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 179 }
[ 2830, 3393, 28043, 3966, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 2698, 18667, 3056, 396, 198, 197, 50780, 256, 3056, 396, 198, 197, 59403, 197, 197, 515, 298, 2698, 18667, 25, 3056, 396, 90, 11, 220, 24, 158...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestProtocol(t *testing.T) { for _, testCase := range []struct{ URL, Expected string }{ {"google.com", ""}, {"ftp://google.com", "ftp"}, {"http://google.com", "http"}, {"https://google.com", "https"}, {"https://user@google.com", "https"}, {"https://user:pass@google.com", "https"}, } { if result := Protocol(testCase.URL); result != testCase.Expected { t.Errorf(`Url (%q) returned %q for Protocol(), but %q was expected`, testCase.URL, result, testCase.Expected) } } }
explode_data.jsonl/30835
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 187 }
[ 2830, 3393, 20689, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 54452, 1669, 2088, 3056, 1235, 90, 5548, 11, 31021, 914, 335, 515, 197, 197, 4913, 17485, 905, 497, 77496, 197, 197, 4913, 25068, 1110, 17485, 905, 497, 330, 25068, 7115, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReconcileAnalysisRunInvalid(t *testing.T) { f := newFixture(t) defer f.Close() c, _, _ := f.newController(noResyncPeriodFunc) run := &v1alpha1.AnalysisRun{ Spec: v1alpha1.AnalysisRunSpec{ Metrics: []v1alpha1.Metric{{ Name: "success-rate", }}, }, } newRun := c.reconcileAnalysisRun(run) assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase) }
explode_data.jsonl/75829
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 693, 40446, 457, 26573, 6727, 7928, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 501, 18930, 1155, 340, 16867, 282, 10421, 741, 1444, 11, 8358, 716, 1669, 282, 4618, 2051, 39205, 1061, 1721, 23750, 9626, 340, 56742, 1669, 609, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIngressHandler_Start(t *testing.T) { client := &fakeClient{} reporter := &mockReporter{} handler := Handler{ Logger: zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())), CeClient: client, Reporter: reporter, Defaulter: broker.TTLDefaulter(zap.NewNop(), 5), } ctx, cancel := context.WithCancel(context.Background()) go func() { if err := handler.Start(ctx); err != nil { t.Error(err) } }() // Need time for the handler to start up. Wait. for !client.ready() { time.Sleep(1 * time.Millisecond) } event := cloudevents.NewEvent() client.fakeReceive(t, event) cancel() if !client.sent { t.Errorf("client should invoke send function") } if !reporter.eventCountReported { t.Errorf("event count should have been reported") } if !reporter.eventDispatchTimeReported { t.Errorf("event dispatch time should have been reported") } }
explode_data.jsonl/11034
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 336 }
[ 2830, 3393, 641, 2483, 3050, 38056, 1155, 353, 8840, 836, 8, 341, 25291, 1669, 609, 30570, 2959, 16094, 69931, 261, 1669, 609, 16712, 52766, 16094, 53326, 1669, 19954, 515, 197, 55861, 25, 262, 32978, 1944, 7121, 7395, 1155, 11, 32978, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestParseExpandOption(t *testing.T) { type config struct { Host string `env:"HOST" envDefault:"localhost"` Port int `env:"PORT" envDefault:"3000" envExpand:"True"` SecretKey string `env:"SECRET_KEY" envExpand:"True"` ExpandKey string `env:"EXPAND_KEY"` CompoundKey string `env:"HOST_PORT" envDefault:"${HOST}:${PORT}" envExpand:"True"` Default string `env:"DEFAULT" envDefault:"def1" envExpand:"True"` } defer os.Clearenv() os.Setenv("HOST", "localhost") os.Setenv("PORT", "3000") os.Setenv("EXPAND_KEY", "qwerty12345") os.Setenv("SECRET_KEY", "${EXPAND_KEY}") cfg := config{} err := Parse(&cfg) assert.NoError(t, err) assert.Equal(t, "localhost", cfg.Host) assert.Equal(t, 3000, cfg.Port) assert.Equal(t, "qwerty12345", cfg.SecretKey) assert.Equal(t, "qwerty12345", cfg.ExpandKey) assert.Equal(t, "localhost:3000", cfg.CompoundKey) assert.Equal(t, "def1", cfg.Default) }
explode_data.jsonl/78782
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 402 }
[ 2830, 3393, 14463, 38946, 5341, 1155, 353, 8840, 836, 8, 341, 13158, 2193, 2036, 341, 197, 197, 9296, 286, 914, 1565, 3160, 2974, 28687, 1, 6105, 3675, 2974, 8301, 8805, 197, 98459, 286, 526, 262, 1565, 3160, 2974, 5095, 1, 6105, 3675...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestClient_SupportedVsCurrencies(t *testing.T) { scs, err := client.SimpleSupportedVsCurrencies() require.NoError(t, err) require.NotEmpty(t, len(scs)) require.NotEmpty(t, len(scs[0])) }
explode_data.jsonl/37787
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 75 }
[ 2830, 3393, 2959, 1098, 12513, 51737, 34, 19607, 1155, 353, 8840, 836, 8, 341, 1903, 4837, 11, 1848, 1669, 2943, 24252, 34636, 51737, 34, 19607, 741, 17957, 35699, 1155, 11, 1848, 340, 17957, 15000, 3522, 1155, 11, 2422, 1141, 4837, 117...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestInMemoryDB(t *testing.T) { db := GetInMemoryDB() // iterate over the testing suite and call the function for _, f := range data.TestingFuncs { f(t, db) } }
explode_data.jsonl/41283
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 63 }
[ 2830, 3393, 641, 10642, 3506, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 2126, 641, 10642, 3506, 2822, 197, 322, 29629, 916, 279, 7497, 16182, 323, 1618, 279, 729, 198, 2023, 8358, 282, 1669, 2088, 821, 8787, 287, 9626, 82, 341, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestBashWrapper(t *testing.T) { if !*update { t.Skip("use -run-bash flag to run this") } cmd := exec.Command("./testdata/bash_wrapper.sh", os.Args[0], "./testdata/test_bash_wrapper.sh") cmd.Env = append(os.Environ(), "BE_ECHO=1") out, err := cmd.Output() t.Logf("%q\n", out) if err != nil { t.Fatalf("process ran with err %v", err) } want := "echo:[]string{\"param1\", \"param2\"}\n" if string(out) != want { t.Fatalf("wrong process output got `%s` want `%s`", out, want) } }
explode_data.jsonl/30936
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 214 }
[ 2830, 3393, 33, 988, 11542, 1155, 353, 8840, 836, 8, 341, 743, 753, 9, 2386, 341, 197, 3244, 57776, 445, 810, 481, 6108, 1455, 988, 5181, 311, 1598, 419, 1138, 197, 532, 25920, 1669, 3883, 12714, 13988, 92425, 17148, 23561, 2395, 497,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestParseChangeLists_BadInput(t *testing.T) { inputs := []string{ "Android:123", // capital letter "dog999", // no colon "cat:", // no change num ":123", // no short host "chromium:1;chromium:2", // uses semi-colon rather than comma } for _, i := range inputs { _, err := ParseChangeListsSorted(i) if err == nil { t.Errorf("expected an error, but got none for input %v", i) } } }
explode_data.jsonl/68385
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 220 }
[ 2830, 3393, 14463, 4072, 37848, 1668, 329, 2505, 1155, 353, 8840, 836, 8, 341, 22427, 82, 1669, 3056, 917, 515, 197, 197, 1, 21831, 25, 16, 17, 18, 497, 1843, 442, 6722, 6524, 198, 197, 197, 1, 18457, 24, 24, 24, 497, 394, 442, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStageParsingSLITest(t *testing.T) { input := make(chan common.EntryMap) parser := mocks.NewSLIMock() stage := NewStageParsingSLI(input, "", parser, logging.NewLoggerDefault()) wg := &sync.WaitGroup{} wg.Add(1) go func() { StageInit(stage, 2) wg.Done() }() input <- common.EntryMap{} close(input) <-stage.Out() wg.Wait() assert.Equal(t, 1, parser.Parsed()) }
explode_data.jsonl/81429
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 165 }
[ 2830, 3393, 19398, 68839, 7984, 952, 477, 1155, 353, 8840, 836, 8, 341, 22427, 1669, 1281, 35190, 4185, 22330, 2227, 340, 55804, 1669, 68909, 7121, 7984, 1791, 1176, 741, 91575, 1669, 1532, 19398, 68839, 7984, 40, 5384, 11, 7342, 6729, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHashMap_SetConcurrent(t *testing.T) { blocks := &HashMap{} var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func(blocks *HashMap, i int) { defer wg.Done() blocks.Set(strconv.Itoa(i), struct{}{}) wg.Add(1) go func(blocks *HashMap, i int) { defer wg.Done() blocks.Get(strconv.Itoa(i)) }(blocks, i) }(blocks, i) } wg.Wait() }
explode_data.jsonl/24439
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 191 }
[ 2830, 3393, 18497, 14812, 1109, 3231, 1155, 353, 8840, 836, 8, 341, 2233, 34936, 1669, 609, 18497, 31483, 2405, 63581, 12811, 28384, 2808, 198, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 16, 15, 15, 26, 600, 1027, 1476, 197, 72079, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPeerConnection_SetConfiguration_Go(t *testing.T) { // Note: this test includes all SetConfiguration features that are supported // by Go but not the WASM bindings, namely: ICEServer.Credential, // ICEServer.CredentialType, and Certificates. api := NewAPI() secretKey1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) assert.Nil(t, err) certificate1, err := GenerateCertificate(secretKey1) assert.Nil(t, err) secretKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) assert.Nil(t, err) certificate2, err := GenerateCertificate(secretKey2) assert.Nil(t, err) for _, test := range []struct { name string init func() (*PeerConnection, error) config Configuration wantErr error }{ { name: "valid", init: func() (*PeerConnection, error) { pc, err := api.NewPeerConnection(Configuration{ PeerIdentity: "unittest", Certificates: []Certificate{*certificate1}, ICECandidatePoolSize: 5, }) if err != nil { return pc, err } err = pc.SetConfiguration(Configuration{ ICEServers: []ICEServer{ { URLs: []string{ "stun:stun.l.google.com:19302", "turns:google.de?transport=tcp", }, Username: "unittest", Credential: OAuthCredential{ MACKey: "WmtzanB3ZW9peFhtdm42NzUzNG0=", AccessToken: "AAwg3kPHWPfvk9bDFL936wYvkoctMADzQ==", }, CredentialType: ICECredentialTypeOauth, }, }, ICETransportPolicy: ICETransportPolicyAll, BundlePolicy: BundlePolicyBalanced, RTCPMuxPolicy: RTCPMuxPolicyRequire, PeerIdentity: "unittest", Certificates: []Certificate{*certificate1}, ICECandidatePoolSize: 5, }) if err != nil { return pc, err } return pc, nil }, config: Configuration{}, wantErr: nil, }, { name: "update multiple certificates", init: func() (*PeerConnection, error) { return api.NewPeerConnection(Configuration{}) }, config: Configuration{ Certificates: []Certificate{*certificate1, *certificate2}, }, wantErr: &rtcerr.InvalidModificationError{Err: ErrModifyingCertificates}, }, { name: "update certificate", init: func() (*PeerConnection, error) { return api.NewPeerConnection(Configuration{}) }, config: Configuration{ Certificates: []Certificate{*certificate1}, }, wantErr: &rtcerr.InvalidModificationError{Err: ErrModifyingCertificates}, }, { name: "update ICEServers, no TURN credentials", init: func() (*PeerConnection, error) { return NewPeerConnection(Configuration{}) }, config: Configuration{ ICEServers: []ICEServer{ { URLs: []string{ "stun:stun.l.google.com:19302", "turns:google.de?transport=tcp", }, Username: "unittest", }, }, }, wantErr: &rtcerr.InvalidAccessError{Err: ErrNoTurnCredencials}, }, } { pc, err := test.init() if err != nil { t.Errorf("SetConfiguration %q: init failed: %v", test.name, err) } err = pc.SetConfiguration(test.config) if got, want := err, test.wantErr; !reflect.DeepEqual(got, want) { t.Errorf("SetConfiguration %q: err = %v, want %v", test.name, got, want) } } }
explode_data.jsonl/37344
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1467 }
[ 2830, 3393, 30888, 4526, 14812, 7688, 2646, 78, 1155, 353, 8840, 836, 8, 341, 197, 322, 7036, 25, 419, 1273, 5646, 678, 2573, 7688, 4419, 429, 525, 7248, 198, 197, 322, 553, 5994, 714, 537, 279, 37776, 44, 35700, 11, 31025, 25, 1928...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestSumPerKeyWithPartitionsNegativeBoundsInt(t *testing.T) { // We have two test cases, one for public partitions as a PCollection and one for public partitions as a slice (i.e., in-memory). for _, tc := range []struct { inMemory bool }{ {true}, {false}, } { triples := testutils.ConcatenateTriplesWithIntValue( testutils.MakeTripleWithIntValue(21, 1, -1), // should be clamped down to -2 testutils.MakeTripleWithIntValue(50, 2, -4)) // should be clamped up to -3 result := []testutils.TestInt64Metric{ {1, -42}, {2, -150}, } p, s, col, want := ptest.CreateList2(triples, result) col = beam.ParDo(s, testutils.ExtractIDFromTripleWithIntValue, col) publicPartitionsSlice := []int{1, 2} var publicPartitions interface{} if tc.inMemory { publicPartitions = publicPartitionsSlice } else { publicPartitions = beam.CreateList(s, publicPartitionsSlice) } // We have ε=50, δ=0 and l1Sensitivity=6. // We have 2 partitions. So, to get an overall flakiness of 10⁻²³, // we need to have each partition pass with 1-10⁻²⁵ probability (k=25). epsilon, delta, k, l1Sensitivity := 50.0, 0.0, 25.0, 6.0 pcol := MakePrivate(s, col, NewPrivacySpec(epsilon, delta)) pcol = ParDo(s, testutils.TripleWithIntValueToKV, pcol) sumParams := SumParams{MaxPartitionsContributed: 2, MinValue: -3, MaxValue: -2, NoiseKind: LaplaceNoise{}, PublicPartitions: publicPartitions} got := SumPerKey(s, pcol, sumParams) want = beam.ParDo(s, testutils.Int64MetricToKV, want) if err := testutils.ApproxEqualsKVInt64(s, got, want, testutils.RoundedLaplaceTolerance(k, l1Sensitivity, epsilon)); err != nil { t.Fatalf("TestSumPerKeyWithPartitionsNegativeBoundsInt in-memory=%t: %v", tc.inMemory, err) } if err := ptest.Run(p); err != nil { t.Errorf("TestSumPerKeyWithPartitionsNegativeBoundsInt in-memory=%t: SumPerKey(%v) = %v, expected %v: %v", tc.inMemory, col, got, want, err) } } }
explode_data.jsonl/42957
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 761 }
[ 2830, 3393, 9190, 3889, 1592, 2354, 5800, 5930, 38489, 11394, 1072, 1155, 353, 8840, 836, 8, 341, 197, 322, 1205, 614, 1378, 1273, 5048, 11, 825, 369, 584, 46688, 438, 264, 393, 6482, 323, 825, 369, 584, 46688, 438, 264, 15983, 320, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestCache_AuthorizeCredential(t *testing.T) { u := NewSecret(10) p := NewSecret(10) a := NewCache() a.PutCredential(NewCredential(u, p)) if ok := a.AuthorizeCredential(newTestCredReq(u, p)); ok { t.Fatal(u, p) } }
explode_data.jsonl/43690
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 103 }
[ 2830, 3393, 8233, 1566, 1553, 551, 48265, 1155, 353, 8840, 836, 8, 341, 10676, 1669, 1532, 19773, 7, 16, 15, 340, 3223, 1669, 1532, 19773, 7, 16, 15, 692, 11323, 1669, 1532, 8233, 741, 11323, 39825, 48265, 35063, 48265, 8154, 11, 281,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGenerateContextKey(t *testing.T) { mSample := metrics.MetricSample{ Name: "my.metric.name", Value: 1, Mtype: metrics.GaugeType, Tags: []string{"foo", "bar"}, Host: "metric-hostname", SampleRate: 1, } contextKey := generateContextKey(&mSample) assert.Equal(t, ckey.ContextKey(0xdd892472f57d5cf1), contextKey) }
explode_data.jsonl/16669
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 167 }
[ 2830, 3393, 31115, 1972, 1592, 1155, 353, 8840, 836, 8, 341, 2109, 17571, 1669, 16734, 1321, 16340, 17571, 515, 197, 21297, 25, 981, 330, 2408, 85816, 2644, 756, 197, 47399, 25, 414, 220, 16, 345, 197, 9209, 1313, 25, 414, 16734, 1224...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateConfig(t *testing.T) { testCases := []struct { name string applyConf func(*Config) expectedErr string }{ { name: "server section must be configured", applyConf: func(c *Config) { c.Server = nil }, expectedErr: "server section must be configured", }, { name: "bind_address must be configured", applyConf: func(c *Config) { c.Server.BindAddress = "" }, expectedErr: "bind_address and bind_port must be configured", }, { name: "bind_port must be configured", applyConf: func(c *Config) { c.Server.BindPort = 0 }, expectedErr: "bind_address and bind_port must be configured", }, { name: "registration_uds_path must be configured", applyConf: func(c *Config) { c.Server.RegistrationUDSPath = "" }, expectedErr: "registration_uds_path must be configured", }, { name: "trust_domain must be configured", applyConf: func(c *Config) { c.Server.TrustDomain = "" }, expectedErr: "trust_domain must be configured", }, { name: "data_dir must be configured", applyConf: func(c *Config) { c.Server.DataDir = "" }, expectedErr: "data_dir must be configured", }, { name: "plugins section must be configured", applyConf: func(c *Config) { c.Plugins = nil }, expectedErr: "plugins section must be configured", }, { name: "if ACME is used, federation.bundle_endpoint.acme.domain_name must be configured", applyConf: func(c *Config) { c.Server.Federation = &federationConfig{ BundleEndpoint: &bundleEndpointConfig{ ACME: &bundleEndpointACMEConfig{}, }, } }, expectedErr: "federation.bundle_endpoint.acme.domain_name must be configured", }, { name: "if ACME is used, federation.bundle_endpoint.acme.email must be configured", applyConf: func(c *Config) { c.Server.Federation = &federationConfig{ BundleEndpoint: &bundleEndpointConfig{ ACME: &bundleEndpointACMEConfig{ DomainName: "domain-name", }, }, } }, expectedErr: "federation.bundle_endpoint.acme.email must be configured", }, { name: "if FederatesWith is used, federation.bundle_endpoint.address must be configured", applyConf: func(c *Config) { federatesWith := make(map[string]federatesWithConfig) federatesWith["domain.test"] = federatesWithConfig{} c.Server.Federation = &federationConfig{ FederatesWith: federatesWith, } }, expectedErr: "federation.federates_with[\"domain.test\"].bundle_endpoint.address must be configured", }, } for _, testCase := range testCases { testCase := testCase t.Run(testCase.name, func(t *testing.T) { conf := defaultValidConfig() testCase.applyConf(conf) err := validateConfig(conf) if testCase.expectedErr != "" { require.Error(t, err) spiretest.AssertErrorContains(t, err, testCase.expectedErr) } else { require.NoError(t, err) } }) } }
explode_data.jsonl/51904
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1219 }
[ 2830, 3393, 17926, 2648, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 286, 914, 198, 197, 197, 10280, 15578, 256, 2915, 4071, 2648, 340, 197, 42400, 7747, 914, 198, 197, 59403, 197, 197, 515, 298, 116...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetHWAddrByIP(t *testing.T) { ip := net.ParseIP("1.2.3.4") hwAddr, err := hwaddr.GenerateHardwareAddr4(ip, hwaddr.PrivateMACPrefix) if err != nil { t.Fatalf("unexpected error: %v", err) } expectedHWAddr := net.HardwareAddr(append(hwaddr.PrivateMACPrefix, ip.To4()...)) if !reflect.DeepEqual(hwAddr, expectedHWAddr) { t.Fatalf("hwaddr.GenerateHardwareAddr4 changed behavior! (%#v != %#v)", hwAddr, expectedHWAddr) } }
explode_data.jsonl/51155
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 190 }
[ 2830, 3393, 1649, 38252, 13986, 1359, 3298, 1155, 353, 8840, 836, 8, 341, 46531, 1669, 4179, 8937, 3298, 445, 16, 13, 17, 13, 18, 13, 19, 1138, 9598, 86, 13986, 11, 1848, 1669, 31256, 6214, 57582, 66862, 13986, 19, 23443, 11, 31256, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUnusedDiag(t *testing.T) { testenv.NeedsGo1Point(t, 14) const proxy = ` -- example.com@v1.0.0/x.go -- package pkg const X = 1 ` const files = ` -- go.mod -- module mod.com go 1.14 require example.com v1.0.0 -- main.go -- package main func main() {} ` const want = `module mod.com go 1.14 ` runModfileTest(t, files, proxy, func(t *testing.T, env *Env) { env.OpenFile("go.mod") var d protocol.PublishDiagnosticsParams env.Await( OnceMet( env.DiagnosticAtRegexp("go.mod", `require example.com`), ReadDiagnostics("go.mod", &d), ), ) env.ApplyQuickFixes("go.mod", d.Diagnostics) if got := env.ReadWorkspaceFile("go.mod"); got != want { t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(want, got)) } }) }
explode_data.jsonl/3741
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 329 }
[ 2830, 3393, 94033, 21685, 351, 1155, 353, 8840, 836, 8, 341, 18185, 3160, 2067, 68, 6767, 10850, 16, 2609, 1155, 11, 220, 16, 19, 692, 4777, 13291, 284, 22074, 313, 3110, 905, 31, 85, 16, 13, 15, 13, 15, 10776, 18002, 39514, 1722, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDealStatusStreamSendReceiveRequest(t *testing.T) { ctx := context.Background() testCases := map[string]struct { senderDisabledNew bool receiverDisabledNew bool }{ "both clients current version": {}, "sender old supports old queries": { senderDisabledNew: true, }, "receiver only supports old queries": { receiverDisabledNew: true, }, } for testCase, data := range testCases { t.Run(testCase, func(t *testing.T) { td := shared_testutil.NewLibp2pTestData(ctx, t) var fromNetwork, toNetwork network.StorageMarketNetwork if data.senderDisabledNew { fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID})) } else { fromNetwork = network.NewFromLibp2pHost(td.Host1) } if data.receiverDisabledNew { toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID})) } else { toNetwork = network.NewFromLibp2pHost(td.Host2) } toHost := td.Host2.ID() // host1 gets no-op receiver tr := &testReceiver{t: t} require.NoError(t, fromNetwork.SetDelegate(tr)) // host2 gets receiver achan := make(chan network.DealStatusRequest) tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { readq, err := s.ReadDealStatusRequest() require.NoError(t, err) achan <- readq }} require.NoError(t, toNetwork.SetDelegate(tr2)) // setup query stream host1 --> host 2 assertDealStatusRequestReceived(ctx, t, fromNetwork, toHost, achan) }) } }
explode_data.jsonl/19995
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 630 }
[ 2830, 3393, 72841, 2522, 3027, 11505, 14742, 1900, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 2822, 18185, 37302, 1669, 2415, 14032, 60, 1235, 341, 197, 1903, 1659, 25907, 3564, 256, 1807, 198, 197, 17200, 12862, 25907, 3564,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSpecificKind(t *testing.T) { kind := "DaemonSet" for i := 0; i < *fuzzIters; i++ { doRoundTripTest(testapi.Groups["extensions"], kind, t) if t.Failed() { break } } }
explode_data.jsonl/47651
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 47514, 10629, 1155, 353, 8840, 836, 8, 341, 197, 15314, 1669, 330, 89177, 1649, 698, 2023, 600, 1669, 220, 15, 26, 600, 366, 353, 69, 8889, 2132, 388, 26, 600, 1027, 341, 197, 19935, 27497, 56352, 2271, 8623, 2068, 59800, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGitCommandDiscardAllFileChanges(t *testing.T) { type scenario struct { testName string command func() (func(string, ...string) *exec.Cmd, *[][]string) test func(*[][]string, error) file *models.File removeFile func(string) error } scenarios := []scenario{ { "An error occurred when resetting", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("test") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.Error(t, err) assert.Len(t, *cmdsCalled, 1) assert.EqualValues(t, *cmdsCalled, [][]string{ {"reset", "--", "test"}, }) }, &models.File{ Name: "test", HasStagedChanges: true, }, func(string) error { return nil }, }, { "An error occurred when removing file", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("test") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.Error(t, err) assert.EqualError(t, err, "an error occurred when removing file") assert.Len(t, *cmdsCalled, 0) }, &models.File{ Name: "test", Tracked: false, Added: true, }, func(string) error { return fmt.Errorf("an error occurred when removing file") }, }, { "An error occurred with checkout", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("test") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.Error(t, err) assert.Len(t, *cmdsCalled, 1) assert.EqualValues(t, *cmdsCalled, [][]string{ {"checkout", "--", "test"}, }) }, &models.File{ Name: "test", Tracked: true, HasStagedChanges: false, }, func(string) error { return nil }, }, { "Checkout only", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("echo") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.NoError(t, err) assert.Len(t, *cmdsCalled, 1) assert.EqualValues(t, *cmdsCalled, [][]string{ {"checkout", "--", "test"}, }) }, &models.File{ Name: "test", Tracked: true, HasStagedChanges: false, }, func(string) error { return nil }, }, { "Reset and checkout staged changes", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("echo") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.NoError(t, err) assert.Len(t, *cmdsCalled, 2) assert.EqualValues(t, *cmdsCalled, [][]string{ {"reset", "--", "test"}, {"checkout", "--", "test"}, }) }, &models.File{ Name: "test", Tracked: true, HasStagedChanges: true, }, func(string) error { return nil }, }, { "Reset and checkout merge conflicts", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("echo") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.NoError(t, err) assert.Len(t, *cmdsCalled, 2) assert.EqualValues(t, *cmdsCalled, [][]string{ {"reset", "--", "test"}, {"checkout", "--", "test"}, }) }, &models.File{ Name: "test", Tracked: true, HasMergeConflicts: true, }, func(string) error { return nil }, }, { "Reset and remove", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("echo") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.NoError(t, err) assert.Len(t, *cmdsCalled, 1) assert.EqualValues(t, *cmdsCalled, [][]string{ {"reset", "--", "test"}, }) }, &models.File{ Name: "test", Tracked: false, Added: true, HasStagedChanges: true, }, func(filename string) error { assert.Equal(t, "test", filename) return nil }, }, { "Remove only", func() (func(string, ...string) *exec.Cmd, *[][]string) { cmdsCalled := [][]string{} return func(cmd string, args ...string) *exec.Cmd { cmdsCalled = append(cmdsCalled, args) return secureexec.Command("echo") }, &cmdsCalled }, func(cmdsCalled *[][]string, err error) { assert.NoError(t, err) assert.Len(t, *cmdsCalled, 0) }, &models.File{ Name: "test", Tracked: false, Added: true, HasStagedChanges: false, }, func(filename string) error { assert.Equal(t, "test", filename) return nil }, }, } for _, s := range scenarios { t.Run(s.testName, func(t *testing.T) { var cmdsCalled *[][]string gitCmd := NewDummyGitCommand() gitCmd.OSCommand.Command, cmdsCalled = s.command() gitCmd.OSCommand.SetRemoveFile(s.removeFile) s.test(cmdsCalled, gitCmd.DiscardAllFileChanges(s.file)) }) } }
explode_data.jsonl/6318
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2851 }
[ 2830, 3393, 46562, 4062, 23477, 567, 2403, 1703, 11317, 1155, 353, 8840, 836, 8, 341, 13158, 15048, 2036, 341, 197, 18185, 675, 256, 914, 198, 197, 45566, 262, 2915, 368, 320, 2830, 3609, 11, 2503, 917, 8, 353, 11748, 64512, 11, 353, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEnvironmentVariablesPassBetweenHooks(t *testing.T) { t.Parallel() tester, err := NewBootstrapTester() if err != nil { t.Fatal(err) } defer tester.Close() if runtime.GOOS != "windows" { var script = []string{ "#!/bin/bash", "export LLAMAS_ROCK=absolutely", } if err := ioutil.WriteFile(filepath.Join(tester.HooksDir, "environment"), []byte(strings.Join(script, "\n")), 0700); err != nil { t.Fatal(err) } } else { var script = []string{ "@echo off", "set LLAMAS_ROCK=absolutely", } if err := ioutil.WriteFile(filepath.Join(tester.HooksDir, "environment.bat"), []byte(strings.Join(script, "\r\n")), 0700); err != nil { t.Fatal(err) } } git := tester.MustMock(t, "git").PassthroughToLocalCommand().Before(func(i bintest.Invocation) error { if err := bintest.ExpectEnv(t, i.Env, `MY_CUSTOM_ENV=1`, `LLAMAS_ROCK=absolutely`); err != nil { return err } return nil }) git.Expect().AtLeastOnce().WithAnyArguments() tester.ExpectGlobalHook("command").Once().AndExitWith(0).AndCallFunc(func(c *bintest.Call) { if err := bintest.ExpectEnv(t, c.Env, `MY_CUSTOM_ENV=1`, `LLAMAS_ROCK=absolutely`); err != nil { fmt.Fprintf(c.Stderr, "%v\n", err) c.Exit(1) } c.Exit(0) }) tester.RunAndCheck(t, "MY_CUSTOM_ENV=1") }
explode_data.jsonl/8967
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 582 }
[ 2830, 3393, 12723, 22678, 12187, 25477, 67769, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 18185, 261, 11, 1848, 1669, 1532, 45511, 58699, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 16867, 37111, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCmdGenerator(t *testing.T) { err := os.RemoveAll(genTestDir3) require.NoError(t, err) cmd := exec.Command("jet", "-source=MySQL", "-dbname=dvds", "-host=localhost", "-port=3306", "-user=jet", "-password=jet", "-path="+genTestDir3) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout err = cmd.Run() require.NoError(t, err) assertGeneratedFiles(t) err = os.RemoveAll(genTestDirRoot) require.NoError(t, err) // check that generation via DSN works dsn := fmt.Sprintf("mysql://%[1]s:%[2]s@tcp(%[3]s:%[4]d)/%[5]s", dbconfig.MySQLUser, dbconfig.MySQLPassword, dbconfig.MySqLHost, dbconfig.MySQLPort, "dvds", ) cmd = exec.Command("jet", "-dsn="+dsn, "-path="+genTestDir3) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout err = cmd.Run() require.NoError(t, err) err = os.RemoveAll(genTestDirRoot) require.NoError(t, err) }
explode_data.jsonl/81302
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 382 }
[ 2830, 3393, 15613, 12561, 1155, 353, 8840, 836, 8, 341, 9859, 1669, 2643, 84427, 36884, 2271, 6184, 18, 340, 17957, 35699, 1155, 11, 1848, 692, 25920, 1669, 3883, 12714, 445, 15407, 497, 6523, 2427, 28, 59224, 497, 6523, 35265, 25405, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateDaisyInflater_File_HappyCase(t *testing.T) { source := fileSource{gcsPath: "gs://bucket/vmdk"} inflater := createDaisyInflaterSafe(t, ImageImportRequest{ Source: source, Subnet: "projects/subnet/subnet", Network: "projects/network/network", Zone: "us-west1-c", ExecutionID: "1234", NoExternalIP: false, }, imagefile.Metadata{}) daisyutils.CheckWorkflow(inflater.worker, func(wf *daisy.Workflow, err error) { assert.Equal(t, "zones/us-west1-c/disks/disk-1234", inflater.inflatedDiskURI) assert.Equal(t, "gs://bucket/vmdk", wf.Vars["source_disk_file"].Value) assert.Equal(t, "projects/subnet/subnet", wf.Vars["import_subnet"].Value) assert.Equal(t, "projects/network/network", wf.Vars["import_network"].Value) assert.Equal(t, "default", wf.Vars["compute_service_account"].Value) network := getWorkerNetwork(t, wf) assert.Nil(t, network.AccessConfigs, "AccessConfigs must be nil to allow ExternalIP to be allocated.") }) }
explode_data.jsonl/75630
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 412 }
[ 2830, 3393, 4021, 35, 49056, 12342, 34061, 2039, 11144, 4207, 1155, 353, 8840, 836, 8, 341, 47418, 1669, 1034, 3608, 90, 70, 4837, 1820, 25, 330, 5857, 1110, 30410, 5457, 2277, 74, 16707, 17430, 11729, 1669, 1855, 35, 49056, 12342, 2566...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCDPSessionSend(t *testing.T) { BeforeEach(t) defer AfterEach(t) cdpSession, err := browser.NewBrowserCDPSession() if isChromium { require.NoError(t, err) result, err := cdpSession.Send("Target.getTargets", nil) require.NoError(t, err) targetInfos := result.(map[string]interface{})["targetInfos"].([]interface{}) require.GreaterOrEqual(t, len(targetInfos), 1) } else { require.Error(t, err) } }
explode_data.jsonl/76231
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 169 }
[ 2830, 3393, 6484, 5012, 1338, 11505, 1155, 353, 8840, 836, 8, 341, 197, 87275, 1155, 340, 16867, 4636, 4854, 1155, 340, 1444, 9796, 5283, 11, 1848, 1669, 6929, 7121, 17878, 6484, 5012, 1338, 741, 743, 374, 1143, 77859, 341, 197, 17957, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestValidateGlusterfsPersistentVolumeSource(t *testing.T) { var epNs *string namespace := "" epNs = &namespace testCases := []struct { name string gfs *core.GlusterfsPersistentVolumeSource errtype field.ErrorType errfield string }{ { name: "missing endpointname", gfs: &core.GlusterfsPersistentVolumeSource{EndpointsName: "", Path: "/tmp"}, errtype: field.ErrorTypeRequired, errfield: "endpoints", }, { name: "missing path", gfs: &core.GlusterfsPersistentVolumeSource{EndpointsName: "my-endpoint", Path: ""}, errtype: field.ErrorTypeRequired, errfield: "path", }, { name: "non null endpointnamespace with empty string", gfs: &core.GlusterfsPersistentVolumeSource{EndpointsName: "my-endpoint", Path: "/tmp", EndpointsNamespace: epNs}, errtype: field.ErrorTypeInvalid, errfield: "endpointsNamespace", }, { name: "missing endpointname and path", gfs: &core.GlusterfsPersistentVolumeSource{EndpointsName: "", Path: ""}, errtype: field.ErrorTypeRequired, errfield: "endpoints", }, } for i, tc := range testCases { errs := validateGlusterfsPersistentVolumeSource(tc.gfs, field.NewPath("field")) if len(errs) > 0 && tc.errtype == "" { t.Errorf("[%d: %q] unexpected error(s): %v", i, tc.name, errs) } else if len(errs) == 0 && tc.errtype != "" { t.Errorf("[%d: %q] expected error type %v", i, tc.name, tc.errtype) } else if len(errs) >= 1 { if errs[0].Type != tc.errtype { t.Errorf("[%d: %q] expected error type %v, got %v", i, tc.name, tc.errtype, errs[0].Type) } else if !strings.HasSuffix(errs[0].Field, "."+tc.errfield) { t.Errorf("[%d: %q] expected error on field %q, got %q", i, tc.name, tc.errfield, errs[0].Field) } } } }
explode_data.jsonl/1003
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 761 }
[ 2830, 3393, 17926, 31541, 4993, 3848, 53194, 18902, 3608, 1155, 353, 8840, 836, 8, 341, 2405, 4155, 47360, 353, 917, 198, 56623, 1669, 8389, 96626, 47360, 284, 609, 2231, 271, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestGetWhenHit(t *testing.T) { // Given cacheValue := &struct { Hello string }{ Hello: "world", } store := &mocksStore.StoreInterface{} store.On("Get", "my-key").Return(cacheValue, nil) codec := New(store) // When value, err := codec.Get("my-key") // Then assert.Nil(t, err) assert.Equal(t, cacheValue, value) assert.Equal(t, 1, codec.GetStats().Hits) assert.Equal(t, 0, codec.GetStats().Miss) assert.Equal(t, 0, codec.GetStats().SetSuccess) assert.Equal(t, 0, codec.GetStats().SetError) assert.Equal(t, 0, codec.GetStats().DeleteSuccess) assert.Equal(t, 0, codec.GetStats().DeleteError) assert.Equal(t, 0, codec.GetStats().InvalidateSuccess) assert.Equal(t, 0, codec.GetStats().InvalidateError) }
explode_data.jsonl/29038
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 288 }
[ 2830, 3393, 1949, 4498, 19498, 1155, 353, 8840, 836, 8, 341, 197, 322, 16246, 198, 52680, 1130, 1669, 609, 1235, 341, 197, 197, 9707, 914, 198, 197, 59403, 197, 197, 9707, 25, 330, 14615, 756, 197, 630, 57279, 1669, 609, 16712, 82, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAddFeedShouldShowErrorMessageWhenDataStoreIsNotSet(t *testing.T) { expected := command.DataStoreNotSet actual := Add(config.Config{"", ""}, test.NonRssMockFeed) test.AssertFailure(t, expected, actual) }
explode_data.jsonl/75093
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 2212, 28916, 14996, 7812, 21349, 4498, 1043, 6093, 3872, 2623, 1649, 1155, 353, 8840, 836, 8, 341, 42400, 1669, 3210, 3336, 6093, 2623, 1649, 198, 88814, 1669, 2691, 8754, 10753, 4913, 497, 1591, 2137, 1273, 30706, 49, 778, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestLogoutNoProviders(t *testing.T) { svc := NewService(Opts{Logger: logger.Std}) authRoute, _ := svc.Handlers() mux := http.NewServeMux() mux.Handle("/auth/", authRoute) ts := httptest.NewServer(mux) defer ts.Close() resp, err := http.Get(ts.URL + "/auth/logout") require.NoError(t, err) defer resp.Body.Close() assert.Equal(t, 400, resp.StatusCode) b, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) assert.Equal(t, "{\"error\":\"provides not defined\"}\n", string(b)) }
explode_data.jsonl/34046
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 209 }
[ 2830, 3393, 27958, 2753, 37351, 1155, 353, 8840, 836, 8, 341, 1903, 7362, 1669, 1532, 1860, 19238, 12754, 90, 7395, 25, 5925, 36086, 3518, 78011, 4899, 11, 716, 1669, 46154, 35308, 9254, 2822, 2109, 2200, 1669, 1758, 7121, 60421, 44, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGreetWithoutName(t *testing.T) { var name string t.Run("Morning", func(t *testing.T) { got := greet(name, 9) expected := "Good morning!" if got != expected { t.Errorf("Unexpected greeting. Got %q but expected %q", got, expected) } }) t.Run("Afternoon", func(t *testing.T) { got := greet(name, 13) expected := "Good afternoon!" if got != expected { t.Errorf("Unexpected greeting. Got %q but expected %q", got, expected) } }) t.Run("Evening", func(t *testing.T) { got := greet(name, 18) expected := "Good evening!" if got != expected { t.Errorf("Unexpected greeting. Got %q but expected %q", got, expected) } }) t.Run("Night", func(t *testing.T) { got := greet(name, 0) expected := "Good night!" if got != expected { t.Errorf("Unexpected greeting. Got %q but expected %q", got, expected) } }) }
explode_data.jsonl/64681
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 331 }
[ 2830, 3393, 38, 3744, 26040, 675, 1155, 353, 8840, 836, 8, 341, 2405, 829, 914, 271, 3244, 16708, 445, 84344, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 3174, 354, 1669, 40786, 3153, 11, 220, 24, 340, 197, 42400, 1669, 330, 15216...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestConstraintUnion(t *testing.T) { test := func(t *testing.T, evalCtx *tree.EvalContext, left, right *Constraint, expected string) { t.Helper() clone := *left clone.UnionWith(evalCtx, right) if actual := clone.String(); actual != expected { format := "left: %s, right: %s, expected: %v, actual: %v" t.Errorf(format, left.String(), right.String(), expected, actual) } } st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) data := newConstraintTestData(&evalCtx) // Union constraint with itself. test(t, &evalCtx, &data.c1to10, &data.c1to10, "/1: [/1 - /10]") // Merge first spans in each constraint. test(t, &evalCtx, &data.c1to10, &data.c5to25, "/1: [/1 - /25)") test(t, &evalCtx, &data.c5to25, &data.c1to10, "/1: [/1 - /25)") // Disjoint spans in each constraint. test(t, &evalCtx, &data.c1to10, &data.c40to50, "/1: [/1 - /10] [/40 - /50]") test(t, &evalCtx, &data.c40to50, &data.c1to10, "/1: [/1 - /10] [/40 - /50]") // Adjacent disjoint spans in each constraint. test(t, &evalCtx, &data.c20to30, &data.c30to40, "/1: [/20 - /40]") test(t, &evalCtx, &data.c30to40, &data.c20to30, "/1: [/20 - /40]") // Merge multiple spans down to single span. var left, right Constraint left = data.c1to10 left.UnionWith(&evalCtx, &data.c20to30) left.UnionWith(&evalCtx, &data.c40to50) right = data.c5to25 right.UnionWith(&evalCtx, &data.c30to40) test(t, &evalCtx, &left, &right, "/1: [/1 - /50]") test(t, &evalCtx, &right, &left, "/1: [/1 - /50]") // Multiple disjoint spans on each side. left = data.c1to10 left.UnionWith(&evalCtx, &data.c20to30) right = data.c40to50 right.UnionWith(&evalCtx, &data.c60to70) test(t, &evalCtx, &left, &right, "/1: [/1 - /10] [/20 - /30) [/40 - /50] (/60 - /70)") test(t, &evalCtx, &right, &left, "/1: [/1 - /10] [/20 - /30) [/40 - /50] (/60 - /70)") // Multiple spans that yield the unconstrained span. left = data.cLt10 right = data.c5to25 right.UnionWith(&evalCtx, &data.cGt20) test(t, &evalCtx, &left, &right, "/1: unconstrained") test(t, &evalCtx, &right, &left, "/1: unconstrained") if left.String() != "/1: [ - /10)" { t.Errorf("tryUnionWith failed, but still modified one of the spans: %v", left.String()) } if right.String() != "/1: (/5 - ]" { t.Errorf("tryUnionWith failed, but still modified one of the spans: %v", right.String()) } // Multiple columns. expected := "/1/2: [/'cherry'/true - /'strawberry']" test(t, &evalCtx, &data.cherryRaspberry, &data.mangoStrawberry, expected) test(t, &evalCtx, &data.mangoStrawberry, &data.cherryRaspberry, expected) }
explode_data.jsonl/59305
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1101 }
[ 2830, 3393, 17890, 32658, 1155, 353, 8840, 836, 8, 341, 18185, 1669, 2915, 1155, 353, 8840, 836, 11, 5603, 23684, 353, 9344, 5142, 831, 1972, 11, 2115, 11, 1290, 353, 17890, 11, 3601, 914, 8, 341, 197, 3244, 69282, 741, 197, 197, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestStorageKey_EncodedLength(t *testing.T) { assertEncodedLength(t, []encodedLengthAssert{ {NewStorageKey(MustHexDecodeString("0x00")), 1}, {NewStorageKey(MustHexDecodeString("0xab1234")), 3}, {NewStorageKey(MustHexDecodeString("0x0001")), 2}, }) }
explode_data.jsonl/3060
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 105 }
[ 2830, 3393, 5793, 1592, 93529, 6737, 4373, 1155, 353, 8840, 836, 8, 341, 6948, 46795, 4373, 1155, 11, 3056, 19329, 4373, 8534, 515, 197, 197, 90, 3564, 5793, 1592, 3189, 590, 20335, 32564, 703, 445, 15, 87, 15, 15, 35674, 220, 16, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOperationRequest_Validate(t *testing.T) { valid := testOperationRequest noReqId := testOperationRequest noReqId.RequestId = "" invalidReqId := testOperationRequest invalidReqId.RequestId = "abc" noServiceName := testOperationRequest noServiceName.ServiceName = "" noAction := testOperationRequest noAction.Action = "" invalidAction := testOperationRequest invalidAction.Action = "remove" tests := []struct { name string request OperationRequest expectedErr bool }{ {"valid", valid, false}, {"valid - no Request Id", noReqId, false}, {"invalid - RequestId is not an uuid", invalidReqId, true}, {"invalid - no ServiceName", noServiceName, true}, {"invalid - no Action", noAction, true}, {"invalid - invalid Action", invalidAction, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.request.Validate() if tt.expectedErr { assert.Error(t, err) } else { assert.NoError(t, err) } }) } }
explode_data.jsonl/6543
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 366 }
[ 2830, 3393, 8432, 1900, 62, 17926, 1155, 353, 8840, 836, 8, 341, 56322, 1669, 1273, 8432, 1900, 198, 72104, 27234, 764, 1669, 1273, 8432, 1900, 198, 72104, 27234, 764, 9659, 764, 284, 8389, 197, 11808, 27234, 764, 1669, 1273, 8432, 1900...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGet_badSchema(t *testing.T) { dst := tempDir(t) u := testModule("basic") u = strings.Replace(u, "file", "nope", -1) if err := Get(dst, u); err == nil { t.Fatal("should error") } }
explode_data.jsonl/806
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 1949, 34199, 8632, 1155, 353, 8840, 836, 8, 341, 52051, 1669, 2730, 6184, 1155, 340, 10676, 1669, 1273, 3332, 445, 22342, 1138, 10676, 284, 9069, 20858, 8154, 11, 330, 1192, 497, 330, 2152, 375, 497, 481, 16, 692, 743, 184...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestBatchCommit(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("set tidb_batch_commit = 1") tk.MustExec("set tidb_disable_txn_auto_retry = 0") tk.MustExec("create table t (id int)") defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.Performance.StmtCountLimit = 3 }) tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") tk.MustExec("SET SESSION autocommit = 1") tk.MustExec("begin") tk.MustExec("insert into t values (1)") tk1.MustQuery("select * from t").Check(testkit.Rows()) tk.MustExec("insert into t values (2)") tk1.MustQuery("select * from t").Check(testkit.Rows()) tk.MustExec("rollback") tk1.MustQuery("select * from t").Check(testkit.Rows()) // The above rollback will not make the session in transaction. tk.MustExec("insert into t values (1)") tk1.MustQuery("select * from t").Check(testkit.Rows("1")) tk.MustExec("delete from t") tk.MustExec("begin") tk.MustExec("insert into t values (5)") tk1.MustQuery("select * from t").Check(testkit.Rows()) tk.MustExec("insert into t values (6)") tk1.MustQuery("select * from t").Check(testkit.Rows()) tk.MustExec("insert into t values (7)") tk1.MustQuery("select * from t").Check(testkit.Rows("5", "6", "7")) // The session is still in transaction. tk.MustExec("insert into t values (8)") tk1.MustQuery("select * from t").Check(testkit.Rows("5", "6", "7")) tk.MustExec("insert into t values (9)") tk1.MustQuery("select * from t").Check(testkit.Rows("5", "6", "7")) tk.MustExec("insert into t values (10)") tk1.MustQuery("select * from t").Check(testkit.Rows("5", "6", "7")) tk.MustExec("commit") tk1.MustQuery("select * from t").Check(testkit.Rows("5", "6", "7", "8", "9", "10")) // The above commit will not make the session in transaction. tk.MustExec("insert into t values (11)") tk1.MustQuery("select * from t").Check(testkit.Rows("5", "6", "7", "8", "9", "10", "11")) tk.MustExec("delete from t") tk.MustExec("SET SESSION autocommit = 0") tk.MustExec("insert into t values (1)") tk.MustExec("insert into t values (2)") tk.MustExec("insert into t values (3)") tk.MustExec("rollback") tk1.MustExec("insert into t values (4)") tk1.MustExec("insert into t values (5)") tk.MustQuery("select * from t").Check(testkit.Rows("4", "5")) }
explode_data.jsonl/5737
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 911 }
[ 2830, 3393, 21074, 33441, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1931, 83, 1579, 85, 1944, 7251, 11571, 6093, 3036, 21821, 1155, 340, 16867, 4240, 741, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3553, 340, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRowsError(t *testing.T) { xlsx, err := OpenFile(filepath.Join("test", "Book1.xlsx")) if !assert.NoError(t, err) { t.FailNow() } _, err = xlsx.Rows("SheetN") assert.EqualError(t, err, "sheet SheetN is not exist") }
explode_data.jsonl/30487
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 103 }
[ 2830, 3393, 9024, 1454, 1155, 353, 8840, 836, 8, 341, 10225, 29017, 11, 1848, 1669, 5264, 1703, 34793, 22363, 445, 1944, 497, 330, 7134, 16, 46838, 5455, 743, 753, 2207, 35699, 1155, 11, 1848, 8, 341, 197, 3244, 57243, 7039, 741, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSnapCreateTransactionTokenWithMap(t *testing.T) { midtrans.ServerKey = sandboxServerKey //assert.Equal(t, sandboxServerKey, midtrans.ServerKey ) res, err := CreateTransactionTokenWithMap(generateReqWithMap()) if err != nil { fmt.Println("Snap Request Error", err.GetMessage()) } fmt.Println("Snap response", res) assert.Equal(t, IsValidUUID(res), true) }
explode_data.jsonl/13458
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 127 }
[ 2830, 3393, 61871, 4021, 8070, 3323, 2354, 2227, 1155, 353, 8840, 836, 8, 341, 2109, 307, 1458, 22997, 1592, 284, 42754, 5475, 1592, 198, 197, 322, 2207, 12808, 1155, 11, 42754, 5475, 1592, 11, 5099, 1458, 22997, 1592, 5125, 10202, 11, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestExpand(t *testing.T) { testCases := []struct { name string withApp api.Application }{ { name: "Should produce expected output when expanding a simple app", withApp: api.Application{ Name: "dummy-app", Image: "dummygres", Version: "0.1.0", }, }, { name: "Should produce expected output when expanding app with optionals", withApp: api.Application{ Name: "dummy-app", Namespace: "dummyns", Image: "dummygres", Version: "8.0.1", ImagePullSecret: "so-secret", Replicas: 3, Environment: map[string]string{ "DUMMY_VAR": "avalue", "DUMMY_HOST": "somehost", }, }, }, { name: "Should produce expected output when expanding app with service", withApp: api.Application{ Name: "dummy-app", Image: "dummyredis", Version: "8.2.1", Port: 3000, }, }, { name: "Should produce expected output when expanding app with service and ingress", withApp: api.Application{ Name: "dummy-app", Image: "dummyredis", Version: "8.2.1", Port: 3000, Url: "http://dummy.io", }, }, { name: "Should produce expected output when expanding app with annotated ingress", withApp: api.Application{ Name: "dummy-app", Image: "dummyredis", Version: "8.2.1", Port: 3000, Url: "http://dummy.io", Ingress: api.IngressConfig{ Annotations: map[string]string{ "cert-manager.io/cluster-issuer": "letsencrypt-production", }, }, }, }, { name: "Should produce expected output when expanding app with service and tls enabled ingress", withApp: api.Application{ Name: "dummy-app", Image: "dummyredis", Version: "8.2.1", Port: 3000, Url: "https://dummy.io", }, }, { name: "Should produce expected output when expanding app with volumes", withApp: api.Application{ Name: "dummy-app", Image: "dummygres", Version: "0.1.0", Volumes: []map[string]string{ {"/etc/config": "4Gi"}, }, }, }, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { buf := bytes.NewBufferString("") err := api.Expand(buf, tc.withApp, false) if err != nil { t.Fatal(err) } g := goldie.New(t) g.Assert(t, t.Name(), buf.Bytes()) }) } }
explode_data.jsonl/71429
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1131 }
[ 2830, 3393, 38946, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 914, 271, 197, 46948, 2164, 6330, 17521, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, 330, 14996, 8193, 3601, 2550, 979, 23175, 264, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnconnectableRanUpdateNodebInfoFailure(t *testing.T) { _, _, readerMock, writerMock, ranReconnectionManager, _ := initRanLostConnectionTest(t) origNodebInfo := &entities.NodebInfo{RanName: ranName, GlobalNbId: &entities.GlobalNbId{PlmnId: "xxx", NbId: "yyy"}, ConnectionStatus: entities.ConnectionStatus_SHUTTING_DOWN} var rnibErr error readerMock.On("GetNodeb", ranName).Return(origNodebInfo, rnibErr) updatedNodebInfo := *origNodebInfo updatedNodebInfo.ConnectionStatus = entities.ConnectionStatus_SHUT_DOWN writerMock.On("UpdateNodebInfo", &updatedNodebInfo).Return(common.NewInternalError(errors.New("Error"))) err := ranReconnectionManager.ReconnectRan(ranName) assert.NotNil(t, err) readerMock.AssertCalled(t, "GetNodeb", ranName) writerMock.AssertNumberOfCalls(t, "UpdateNodebInfo", 1) }
explode_data.jsonl/42902
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 297 }
[ 2830, 3393, 1806, 6459, 480, 49, 276, 4289, 1955, 65, 1731, 17507, 1155, 353, 8840, 836, 8, 341, 197, 6878, 8358, 6604, 11571, 11, 6916, 11571, 11, 10613, 693, 7742, 2043, 11, 716, 1669, 2930, 49, 276, 47253, 4526, 2271, 1155, 692, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDiscard(t *testing.T) { d := Discard{D: conn.Half} if s := d.String(); s != "discard" { t.Fatal(s) } if v := d.Duplex(); v != conn.Half { t.Fatal(v) } if err := d.Tx(nil, nil); err != nil { t.Fatal(err) } if err := d.Tx([]byte{0}, []byte{0}); err != nil { t.Fatal(err) } }
explode_data.jsonl/44859
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 152 }
[ 2830, 3393, 23477, 567, 1155, 353, 8840, 836, 8, 341, 2698, 1669, 11735, 567, 90, 35, 25, 4534, 3839, 3104, 532, 743, 274, 1669, 294, 6431, 2129, 274, 961, 330, 76492, 1, 341, 197, 3244, 26133, 1141, 340, 197, 532, 743, 348, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestIsInt8(t *testing.T) { var v *Value v = &Value{data: int8(1)} assert.True(t, v.IsInt8()) v = &Value{data: []int8{int8(1)}} assert.True(t, v.IsInt8Slice()) }
explode_data.jsonl/23426
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 85 }
[ 2830, 3393, 3872, 1072, 23, 1155, 353, 8840, 836, 8, 1476, 2405, 348, 353, 1130, 271, 5195, 284, 609, 1130, 90, 691, 25, 526, 23, 7, 16, 10569, 6948, 32443, 1155, 11, 348, 4506, 1072, 23, 12367, 5195, 284, 609, 1130, 90, 691, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConfigLoadCrap(t *testing.T) { mk := NewMockKubeSettings() cfg := config.NewConfig(mk) assert.NotNil(t, cfg.Load("testdata/k9s_not_there.yml")) }
explode_data.jsonl/19261
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 67 }
[ 2830, 3393, 2648, 5879, 34, 4611, 1155, 353, 8840, 836, 8, 341, 2109, 74, 1669, 1532, 11571, 42, 3760, 6086, 741, 50286, 1669, 2193, 7121, 2648, 1255, 74, 340, 6948, 93882, 1155, 11, 13286, 13969, 445, 92425, 14109, 24, 82, 7913, 5854...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestServerSourcemapElasticsearch(t *testing.T) { cases := []struct { expected []string config m outputConfig m }{ { expected: nil, config: m{}, }, { // source_mapping.elasticsearch.hosts set expected: []string{"localhost:5200"}, config: m{ "frontend": m{ "enabled": "true", "source_mapping.elasticsearch.hosts": []string{"localhost:5200"}, }, }, }, { // source_mapping.elasticsearch.hosts not set, elasticsearch.enabled = true expected: []string{"localhost:5201"}, config: m{ "frontend": m{ "enabled": "true", }, }, outputConfig: m{ "elasticsearch": m{ "enabled": true, "hosts": []string{"localhost:5201"}, }, }, }, { // source_mapping.elasticsearch.hosts not set, elasticsearch.enabled = false expected: nil, config: m{ "frontend": m{ "enabled": "true", }, }, outputConfig: m{ "elasticsearch": m{ "enabled": false, "hosts": []string{"localhost:5202"}, }, }, }, } for _, testCase := range cases { ucfg, err := common.NewConfigFrom(testCase.config) if !assert.NoError(t, err) { continue } var beatConfig beat.BeatConfig ocfg, err := common.NewConfigFrom(testCase.outputConfig) if !assert.NoError(t, err) { continue } beatConfig.Output.Unpack(ocfg) apm, teardown, err := setupServer(t, ucfg, &beatConfig) if assert.NoError(t, err) { assert.Equal(t, testCase.expected, apm.smapElasticsearchHosts()) } teardown() } }
explode_data.jsonl/4946
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 719 }
[ 2830, 3393, 5475, 50, 413, 66, 42040, 36, 51179, 1836, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 42400, 257, 3056, 917, 198, 197, 25873, 981, 296, 198, 197, 21170, 2648, 296, 198, 197, 59403, 197, 197, 51...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestControllerApplyGameServerAddressAndPort(t *testing.T) { t.Parallel() c, m := newFakeController() gsFixture := &v1alpha1.GameServer{ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, Spec: newSingleContainerSpec(), Status: v1alpha1.GameServerStatus{State: v1alpha1.RequestReady}} gsFixture.ApplyDefaults() node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: corev1.NodeStatus{Addresses: []corev1.NodeAddress{{Address: ipFixture, Type: corev1.NodeExternalIP}}}} pod, err := gsFixture.Pod() assert.Nil(t, err) pod.Spec.NodeName = node.ObjectMeta.Name m.KubeClient.AddReactor("list", "nodes", func(action k8stesting.Action) (bool, runtime.Object, error) { return true, &corev1.NodeList{Items: []corev1.Node{node}}, nil }) _, cancel := agtesting.StartInformers(m, c.gameServerSynced) defer cancel() gs, err := c.applyGameServerAddressAndPort(gsFixture, pod) assert.Nil(t, err) assert.Equal(t, gs.Spec.Ports[0].HostPort, gs.Status.Ports[0].Port) assert.Equal(t, ipFixture, gs.Status.Address) assert.Equal(t, node.ObjectMeta.Name, gs.Status.NodeName) }
explode_data.jsonl/25431
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 422 }
[ 2830, 3393, 2051, 28497, 4868, 5475, 4286, 3036, 7084, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 1444, 11, 296, 1669, 501, 52317, 2051, 2822, 3174, 82, 18930, 1669, 609, 85, 16, 7141, 16, 20940, 5475, 90, 1190, 12175, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDSNUnsafeCollation(t *testing.T) { _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") if err != errInvalidDSNUnsafeCollation { t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err) } _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") if err != nil { t.Errorf("expected %v, got %v", nil, err) } _, err = ParseDSN("/dbname?collation=gbk_chinese_ci") if err != nil { t.Errorf("expected %v, got %v", nil, err) } _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true") if err != nil { t.Errorf("expected %v, got %v", nil, err) } _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") if err != nil { t.Errorf("expected %v, got %v", nil, err) } _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") if err != nil { t.Errorf("expected %v, got %v", nil, err) } _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") if err != nil { t.Errorf("expected %v, got %v", nil, err) } }
explode_data.jsonl/70846
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 513 }
[ 2830, 3393, 5936, 45, 78770, 15265, 367, 1155, 353, 8840, 836, 8, 972, 197, 6878, 1848, 1669, 14775, 5936, 45, 4283, 35265, 30, 17222, 367, 28, 9511, 74, 4138, 7346, 43514, 5, 2245, 45429, 4870, 11265, 6060, 743, 1848, 961, 1848, 7928...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestSqrtNegative(t *testing.T) { defer func() { checkErr(t, recover(), "Sqrt of negative number") }() x := new(big.Float).SetInt64(-2) var z big.Float pslq := New(64) pslq.Sqrt(x, &z) }
explode_data.jsonl/66841
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 90 }
[ 2830, 3393, 50, 8140, 38489, 1155, 353, 8840, 836, 8, 341, 16867, 2915, 368, 314, 1779, 7747, 1155, 11, 11731, 1507, 330, 50, 8140, 315, 8225, 1372, 899, 50746, 10225, 1669, 501, 75616, 29794, 568, 1649, 1072, 21, 19, 4080, 17, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQuickClose(t *testing.T) { db := openTestConn(t) defer db.Close() tx, err := db.Begin() if err != nil { t.Fatal(err) } rows, err := tx.Query("SELECT 1; SELECT 2;") if err != nil { t.Fatal(err) } if err := rows.Close(); err != nil { t.Fatal(err) } var id int if err := tx.QueryRow("SELECT 3").Scan(&id); err != nil { t.Fatal(err) } if id != 3 { t.Fatalf("unexpected %d", id) } if err := tx.Commit(); err != nil { t.Fatal(err) } }
explode_data.jsonl/73508
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 24318, 7925, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 1787, 2271, 9701, 1155, 340, 16867, 2927, 10421, 2822, 46237, 11, 1848, 1669, 2927, 28467, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 68438,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func Test_signalfxeceiver_New(t *testing.T) { defaultConfig := (&Factory{}).CreateDefaultConfig().(*Config) type args struct { config Config nextConsumer consumer.MetricsConsumer } tests := []struct { name string args args wantErr error }{ { name: "nil_nextConsumer", args: args{ config: *defaultConfig, }, wantErr: errNilNextConsumer, }, { name: "empty_endpoint", args: args{ config: *defaultConfig, nextConsumer: new(exportertest.SinkMetricsExporter), }, wantErr: errEmptyEndpoint, }, { name: "happy_path", args: args{ config: Config{ ReceiverSettings: configmodels.ReceiverSettings{ Endpoint: "localhost:1234", }, }, nextConsumer: new(exportertest.SinkMetricsExporter), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := New(zap.NewNop(), tt.args.config, tt.args.nextConsumer) assert.Equal(t, tt.wantErr, err) if err == nil { assert.NotNil(t, got) } else { assert.Nil(t, got) } }) } }
explode_data.jsonl/3842
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 492 }
[ 2830, 3393, 11172, 3104, 8371, 12862, 39582, 1155, 353, 8840, 836, 8, 341, 11940, 2648, 1669, 15899, 4153, 6257, 568, 4021, 3675, 2648, 1005, 4071, 2648, 340, 13158, 2827, 2036, 341, 197, 25873, 981, 5532, 198, 197, 28144, 29968, 11502, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestBisectFixJob(t *testing.T) { c := NewCtx(t) defer c.Close() // Upload a crash report build := testBuild(1) c.client2.UploadBuild(build) crash := testCrashWithRepro(build, 1) c.client2.ReportCrash(crash) c.client2.pollEmailBug() // Receive the JobBisectCause resp := c.client2.pollJobs(build.Manager) c.client2.expectNE(resp.ID, "") c.client2.expectEQ(resp.Type, dashapi.JobBisectCause) done := &dashapi.JobDoneReq{ ID: resp.ID, Error: []byte("testBisectFixJob:JobBisectCause"), } c.client2.expectOK(c.client2.JobDone(done)) // Ensure no more jobs resp = c.client2.pollJobs(build.Manager) c.client2.expectEQ(resp.ID, "") // Advance time by 30 days and read out any notification emails { c.advanceTime(30 * 24 * time.Hour) msg := c.client2.pollEmailBug() c.expectEQ(msg.Subject, "title1") c.expectTrue(strings.Contains(msg.Body, "Sending this report upstream.")) msg = c.client2.pollEmailBug() c.expectEQ(msg.Subject, "title1") c.expectTrue(strings.Contains(msg.Body, "syzbot found the following crash")) } // Ensure that we get a JobBisectFix resp = c.client2.pollJobs(build.Manager) c.client2.expectNE(resp.ID, "") c.client2.expectEQ(resp.Type, dashapi.JobBisectFix) done = &dashapi.JobDoneReq{ ID: resp.ID, Error: []byte("testBisectFixJob:JobBisectFix"), } c.client2.expectOK(c.client2.JobDone(done)) }
explode_data.jsonl/51143
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 580 }
[ 2830, 3393, 33, 285, 439, 25958, 12245, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 1532, 23684, 1155, 340, 16867, 272, 10421, 2822, 197, 322, 24996, 264, 9920, 1895, 198, 69371, 1669, 1273, 11066, 7, 16, 340, 1444, 6581, 17, 86597, 110...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTakeOwnership(t *testing.T) { // This only works in limited circumstances, so it's disabled in general. t.Skip() rwc := openTPMOrSkip(t) defer rwc.Close() ownerAuth := getAuth(ownerAuthEnvVar) srkAuth := getAuth(srkAuthEnvVar) // This test assumes that the TPM has been cleared using OwnerClear. pubek, err := ReadPubEK(rwc) if err != nil { t.Fatal("Couldn't read the public endorsement key from the TPM:", err) } if err := TakeOwnership(rwc, ownerAuth, srkAuth, pubek); err != nil { t.Fatal("Couldn't take ownership of the TPM:", err) } }
explode_data.jsonl/75359
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 207 }
[ 2830, 3393, 17814, 77166, 1155, 353, 8840, 836, 8, 341, 197, 322, 1096, 1172, 4278, 304, 7199, 13161, 11, 773, 432, 594, 8386, 304, 4586, 624, 3244, 57776, 741, 7000, 24028, 1669, 1787, 4239, 44, 2195, 35134, 1155, 340, 16867, 435, 24...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestValidateDL3014(t *testing.T) { cases := []struct { dockerfileStr string expectedRst []ValidateResult expectedErr error }{ { dockerfileStr: `FROM debian RUN apt-get install python=2.7 `, expectedRst: []ValidateResult{ {line: 2}, }, expectedErr: nil, }, { dockerfileStr: `FROM debian RUN apt-get install python=2.7 && apt-get install ruby `, expectedRst: []ValidateResult{ {line: 2}, }, expectedErr: nil, }, } for i, tc := range cases { rst, err := parseDockerfile(tc.dockerfileStr) if err != nil { t.Errorf("#%d parse error %s", i, tc.dockerfileStr) } gotRst, gotErr := validateDL3014(rst.AST) if !isValidateResultEq(gotRst, tc.expectedRst) { t.Errorf("#%d results deep equal has returned: want %v, got %v", i, tc.expectedRst, gotRst) } if gotErr != tc.expectedErr { t.Errorf("#%d error has returned: want %s, got %s", i, tc.expectedErr, gotErr) } cleanup(t) } }
explode_data.jsonl/12044
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 421 }
[ 2830, 3393, 17926, 16524, 18, 15, 16, 19, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 2698, 13659, 1192, 2580, 914, 198, 197, 42400, 49, 267, 256, 3056, 17926, 2077, 198, 197, 42400, 7747, 256, 1465, 198, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestSetClaims(t *testing.T) { c := Context{} now := time.Unix(time.Now().Unix(), 0) claims := map[string]interface{}{ apiProductListClaim: time.Now(), audienceClaim: "aud", clientIDClaim: nil, applicationNameClaim: "app", scopeClaim: nil, expClaim: float64(now.Unix()), developerEmailClaim: "email", } if err := c.setClaims(claims); err == nil { t.Errorf("setClaims without client_id should get error") } claims[clientIDClaim] = "clientID" if err := c.setClaims(claims); err == nil { t.Errorf("bad product list should error") } productsWant := []string{"product 1", "product 2"} claims[apiProductListClaim] = `["product 1", "product 2"]` if err := c.setClaims(claims); err != nil { t.Errorf("valid setClaims, got: %v", err) } if !reflect.DeepEqual(c.APIProducts, productsWant) { t.Errorf("apiProducts want: %s, got: %v", productsWant, c.APIProducts) } claimsWant := []string{"scope1", "scope2"} claims[scopeClaim] = "scope1 scope2" if err := c.setClaims(claims); err != nil { t.Errorf("valid setClaims, got: %v", err) } if !reflect.DeepEqual(claimsWant, c.Scopes) { t.Errorf("claims want: %s, got: %v", claimsWant, claims[scopeClaim]) } }
explode_data.jsonl/67151
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 510 }
[ 2830, 3393, 1649, 51133, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 9608, 16094, 80922, 1669, 882, 10616, 941, 9730, 13244, 1005, 55832, 1507, 220, 15, 340, 197, 48561, 1669, 2415, 14032, 31344, 67066, 197, 54299, 4816, 852, 45544, 25, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestFileCmd(t *testing.T) { mockUpdaterFromFlags := rotor.NewMockUpdaterFromFlags(nil) cmd := FileCmd(mockUpdaterFromFlags) cmd.Flags.Parse([]string{}) runner := cmd.Runner.(*fileRunner) assert.Equal(t, runner.updaterFlags, mockUpdaterFromFlags) assert.NonNil(t, runner.codecFlags) }
explode_data.jsonl/10926
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 116 }
[ 2830, 3393, 1703, 15613, 1155, 353, 8840, 836, 8, 341, 77333, 79854, 3830, 9195, 1669, 62025, 7121, 11571, 79854, 3830, 9195, 27907, 692, 25920, 1669, 2887, 15613, 30389, 79854, 3830, 9195, 340, 25920, 51887, 8937, 10556, 917, 37790, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRowsCanHaveSingleStatPanels(t *testing.T) { req := require.New(t) board := sdk.NewBoard("") panel := New(board, "", WithSingleStat("Some stat")) req.Len(panel.builder.Panels, 1) }
explode_data.jsonl/39823
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 73 }
[ 2830, 3393, 9024, 6713, 12116, 10888, 15878, 35693, 2010, 1155, 353, 8840, 836, 8, 341, 24395, 1669, 1373, 7121, 1155, 340, 59868, 1669, 45402, 7121, 11932, 445, 5130, 44952, 1669, 1532, 20770, 11, 7342, 3085, 10888, 15878, 445, 8373, 279...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestClientRestartLeaseTime(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() clientStack, _, clientEP, _, c := setupTestEnv(ctx, t, defaultServerCfg) // Always return the same arbitrary time. c.now = func() time.Time { return time.Monotonic((1234 * time.Second.Nanoseconds()) + 5678) } acquiredDone := make(chan struct{}) c.acquiredFunc = func(lost, acquired tcpip.AddressWithPrefix, _ Config) { removeLostAddAcquired(t, clientStack, lost, acquired) acquiredDone <- struct{}{} } clientCtx, clientCancel := context.WithCancel(ctx) // Acquire address and transition to bound state. go c.Run(clientCtx) <-acquiredDone checkTimes := func(now time.Time, leaseLength, renew, rebind Seconds) { info := c.Info() if got, want := info.LeaseExpiration, now.Add(leaseLength.Duration()); got != want { t.Errorf("info.LeaseExpiration=%s, want=%s", got, want) } if got, want := info.RenewTime, now.Add(renew.Duration()); got != want { t.Errorf("info.RenewTime=%s, want=%s", got, want) } if got, want := info.RebindTime, now.Add(rebind.Duration()); got != want { t.Errorf("info.RebindTime=%s, want=%s", got, want) } if info.Config.LeaseLength != leaseLength { t.Errorf("info.Config.LeaseLength=%s, want=%s", info.Config.LeaseLength, leaseLength) } if info.Config.RenewTime != renew { t.Errorf("info.Config.RenewTime=%s, want=%s", info.Config.RenewTime, renew) } if info.Config.RebindTime != rebind { t.Errorf("info.Config.RebindTime=%s, want=%s", info.Config.RebindTime, rebind) } } renewTime, rebindTime := defaultRenewTime(defaultLeaseLength), defaultRebindTime(defaultLeaseLength) checkTimes(c.now(), defaultLeaseLength, renewTime, rebindTime) // Simulate interface going down. clientCancel() <-acquiredDone zero := Seconds(0) checkTimes(time.Time{}, zero, zero, zero) clientCtx, clientCancel = context.WithCancel(ctx) defer clientCancel() writeIntercept := make(chan struct{}) intercepts := 0 clientEP.onWritePacket = func(pkt *stack.PacketBuffer) *stack.PacketBuffer { ipv4Packet := header.IPv4(pkt.Data().AsRange().ToOwnedView()) udpPacket := header.UDP(ipv4Packet.Payload()) dhcpPacket := hdr(udpPacket.Payload()) opts, err := dhcpPacket.options() if err != nil { t.Fatalf("packet missing options: %s", err) } typ, err := opts.dhcpMsgType() if err != nil { t.Fatalf("packet missing message type: %s", err) } if typ == dhcpDISCOVER { // maxIntercepts is selected to cause an acquisition attempt to timeout // given the defaultAcquireTimeout and defaultRetransTime. // maxIntercepts * defaultRetransTime >= defaultAcquireTime const maxIntercepts = 3 if intercepts < maxIntercepts { intercepts++ return nil } if intercepts == maxIntercepts { writeIntercept <- struct{}{} } } return pkt } // Restart client and transition to bound. go c.Run(clientCtx) <-writeIntercept <-acquiredDone checkTimes(c.now(), defaultLeaseLength, renewTime, rebindTime) info := c.Info() if info.State != bound { t.Errorf("info.State=%s, want=%s", info.State, bound) } }
explode_data.jsonl/37444
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1197 }
[ 2830, 3393, 2959, 59354, 2304, 519, 1462, 1155, 353, 8840, 836, 8, 341, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 16867, 9121, 2822, 25291, 4336, 11, 8358, 2943, 9197, 11, 8358, 272, 1669, 6505, 2271, 14359, 7502, 11,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChainProviderIsExpired(t *testing.T) { stubProvider := &stubProvider{expired: true} p := &ChainProvider{ Providers: []Provider{ stubProvider, }, } if !p.IsExpired() { t.Errorf("Expect expired to be true before any Retrieve") } _, err := p.Retrieve() if err != nil { t.Errorf("Expect no error, got %v", err) } if p.IsExpired() { t.Errorf("Expect not expired after retrieve") } stubProvider.expired = true if !p.IsExpired() { t.Errorf("Expect return of expired provider") } _, err = p.Retrieve() if err != nil { t.Errorf("Expect no error, got %v", err) } if p.IsExpired() { t.Errorf("Expect not expired after retrieve") } }
explode_data.jsonl/34837
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 273 }
[ 2830, 3393, 18837, 5179, 3872, 54349, 1155, 353, 8840, 836, 8, 341, 18388, 392, 5179, 1669, 609, 59398, 5179, 90, 75532, 25, 830, 532, 3223, 1669, 609, 18837, 5179, 515, 197, 197, 37351, 25, 3056, 5179, 515, 298, 18388, 392, 5179, 345...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestApplicationStatusSuccess(t *testing.T) { for tn, tc := range map[string]struct { givenStatus gateway.Status expectedStatus gqlschema.ApplicationStatus }{ "serving": { givenStatus: gateway.StatusServing, expectedStatus: gqlschema.ApplicationStatusServing, }, "not serving": { givenStatus: gateway.StatusNotServing, expectedStatus: gqlschema.ApplicationStatusNotServing, }, "not configured": { givenStatus: gateway.StatusNotConfigured, expectedStatus: gqlschema.ApplicationStatusGatewayNotConfigured, }, } { t.Run(tn, func(t *testing.T) { // given statusGetterStub := automock.NewStatusGetter() statusGetterStub.On("GetStatus", "ec-prod").Return(tc.givenStatus, nil) resolver := application.NewApplicationResolver(nil, statusGetterStub) // when status, err := resolver.ApplicationStatusField(context.Background(), &gqlschema.Application{ Name: "ec-prod", }) // then require.NoError(t, err) assert.Equal(t, tc.expectedStatus, status) }) } }
explode_data.jsonl/28757
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 389 }
[ 2830, 3393, 4988, 2522, 7188, 1155, 353, 8840, 836, 8, 341, 2023, 43308, 11, 17130, 1669, 2088, 2415, 14032, 60, 1235, 341, 197, 3174, 2071, 2522, 262, 28795, 10538, 198, 197, 42400, 2522, 53045, 17349, 17521, 2522, 198, 197, 59403, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChatSrvEphemeralConvRetention(t *testing.T) { runWithMemberTypes(t, func(mt chat1.ConversationMembersType) { switch mt { case chat1.ConversationMembersType_KBFS: t.Logf("skipping kbfs stage") return default: // Fall through for other member types. } ctc := makeChatTestContext(t, "TestChatSrvRetention", 2) defer ctc.cleanup() users := ctc.users() ctx := ctc.as(t, users[0]).startCtx listener := newServerChatListener() ctc.as(t, users[1]).h.G().NotifyRouter.AddListener(listener) conv := mustCreateConversationForTest(t, ctc, users[0], chat1.TopicType_CHAT, mt, ctc.as(t, users[1]).user()) msgID := mustPostLocalForTest(t, ctc, users[0], conv, chat1.NewMessageBodyWithText(chat1.MessageText{Body: "hello!"})) consumeNewMsgRemote(t, listener, chat1.MessageType_TEXT) // set an ephemeral policy age := gregor1.ToDurationSec(time.Hour * 24) policy := chat1.NewRetentionPolicyWithEphemeral(chat1.RpEphemeral{Age: age}) mustSetConvRetentionLocal(t, ctc, users[0], conv.Id, policy) require.True(t, consumeSetConvRetention(t, listener).Eq(conv.Id)) msg := consumeNewMsgRemote(t, listener, chat1.MessageType_SYSTEM) verifyChangeRetentionSystemMessage(t, msg, chat1.MessageSystemChangeRetention{ IsTeam: false, IsInherit: false, Policy: policy, MembersType: mt, User: users[0].Username, }) // make sure we can supersede existing messages mustReactToMsg(ctx, t, ctc, users[0], conv, msgID, ":+1:") ephemeralMsgID := mustPostLocalEphemeralForTest(t, ctc, users[0], conv, chat1.NewMessageBodyWithText(chat1.MessageText{Body: "hello!"}), &age) mustReactToMsg(ctx, t, ctc, users[0], conv, ephemeralMsgID, ":+1:") }) }
explode_data.jsonl/63708
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 697 }
[ 2830, 3393, 15672, 50, 10553, 36, 59941, 3253, 34892, 86329, 1155, 353, 8840, 836, 8, 341, 56742, 2354, 9366, 4173, 1155, 11, 2915, 81618, 6236, 16, 4801, 22323, 24371, 929, 8, 341, 197, 8961, 11965, 341, 197, 2722, 6236, 16, 4801, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGCControllerHasUpdateFunc(t *testing.T) { backup := velerotest.NewTestBackup().WithName("backup").Backup expected := kube.NamespaceAndName(backup) client := fake.NewSimpleClientset(backup) fakeWatch := watch.NewFake() defer fakeWatch.Stop() client.PrependWatchReactor("backups", core.DefaultWatchReactor(fakeWatch, nil)) sharedInformers := informers.NewSharedInformerFactory(client, 0) controller := NewGCController( velerotest.NewLogger(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().DeleteBackupRequests(), client.VeleroV1(), ).(*gcController) keys := make(chan string) controller.syncHandler = func(key string) error { keys <- key return nil } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() go sharedInformers.Start(ctx.Done()) go controller.Run(ctx, 1) // wait for the AddFunc select { case <-ctx.Done(): t.Fatal("test timed out waiting for AddFunc") case key := <-keys: assert.Equal(t, expected, key) } backup.Status.Version = 1234 fakeWatch.Add(backup) // wait for the UpdateFunc select { case <-ctx.Done(): t.Fatal("test timed out waiting for UpdateFunc") case key := <-keys: assert.Equal(t, expected, key) } }
explode_data.jsonl/64498
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 450 }
[ 2830, 3393, 22863, 2051, 10281, 4289, 9626, 1155, 353, 8840, 836, 8, 341, 197, 31371, 1669, 348, 7865, 354, 477, 7121, 2271, 56245, 1005, 54523, 445, 31371, 1827, 56245, 198, 42400, 1669, 80958, 46011, 3036, 675, 7, 31371, 692, 25291, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestProjectResourceHandler_GetProjectResources(t *testing.T) { type fields struct { ProjectResourceManager *handler_mock.IResourceManagerMock } tests := []struct { name string fields fields request *http.Request wantParams *models.GetResourcesParams wantResult *models.GetResourcesResponse wantStatus int }{ { name: "get resource list", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return &testGetResourcesResponse, nil }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/my-project/resource?gitCommitID=commit-id&pageSize=3&nextPageKey=2", nil), wantParams: &models.GetResourcesParams{ ResourceContext: models.ResourceContext{ Project: models.Project{ProjectName: "my-project"}, }, GetResourcesQuery: models.GetResourcesQuery{ GitCommitID: "commit-id", PageSize: 3, NextPageKey: "2", }, }, wantResult: &testGetResourcesResponse, wantStatus: http.StatusOK, }, { name: "get resource list - use default pageSize", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return &testGetResourcesResponse, nil }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/my-project/resource?gitCommitID=commit-id", nil), wantParams: &models.GetResourcesParams{ ResourceContext: models.ResourceContext{ Project: models.Project{ProjectName: "my-project"}, }, GetResourcesQuery: models.GetResourcesQuery{ GitCommitID: "commit-id", PageSize: 20, }, }, wantResult: &testGetResourcesResponse, wantStatus: http.StatusOK, }, { name: "get resource list - use default pageSize and no git commit ID", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return &testGetResourcesResponse, nil }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/my-project/resource", nil), wantParams: &models.GetResourcesParams{ ResourceContext: models.ResourceContext{ Project: models.Project{ProjectName: "my-project"}, }, GetResourcesQuery: models.GetResourcesQuery{ PageSize: 20, }, }, wantResult: &testGetResourcesResponse, wantStatus: http.StatusOK, }, { name: "get resource list - invalid value for pageSize", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return nil, errors.New("should not have been called") }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/my-project/resource?pageSize=invalid", nil), wantParams: nil, wantResult: nil, wantStatus: http.StatusBadRequest, }, { name: "project not found", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return nil, errors2.ErrProjectNotFound }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/my-project/resource?gitCommitID=commit-id&pageSize=3&nextPageKey=2", nil), wantParams: &models.GetResourcesParams{ ResourceContext: models.ResourceContext{ Project: models.Project{ProjectName: "my-project"}, }, GetResourcesQuery: models.GetResourcesQuery{ GitCommitID: "commit-id", PageSize: 3, NextPageKey: "2", }, }, wantResult: nil, wantStatus: http.StatusNotFound, }, { name: "random error", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return nil, errors.New("oops") }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/my-project/resource?gitCommitID=commit-id&pageSize=3&nextPageKey=2", nil), wantParams: &models.GetResourcesParams{ ResourceContext: models.ResourceContext{ Project: models.Project{ProjectName: "my-project"}, }, GetResourcesQuery: models.GetResourcesQuery{ GitCommitID: "commit-id", PageSize: 3, NextPageKey: "2", }, }, wantResult: nil, wantStatus: http.StatusInternalServerError, }, { name: "project not set", fields: fields{ ProjectResourceManager: &handler_mock.IResourceManagerMock{ GetResourcesFunc: func(params models.GetResourcesParams) (*models.GetResourcesResponse, error) { return nil, errors.New("oops") }, }, }, request: httptest.NewRequest(http.MethodGet, "/project/%20/resource?gitCommitID=commit-id&pageSize=3&nextPageKey=2", nil), wantParams: nil, wantResult: nil, wantStatus: http.StatusBadRequest, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ph := NewProjectResourceHandler(tt.fields.ProjectResourceManager) router := gin.Default() router.GET("/project/:projectName/resource", ph.GetProjectResources) resp := performRequest(router, tt.request) if tt.wantParams != nil { require.Len(t, tt.fields.ProjectResourceManager.GetResourcesCalls(), 1) require.Equal(t, *tt.wantParams, tt.fields.ProjectResourceManager.GetResourcesCalls()[0].Params) } else { require.Empty(t, tt.fields.ProjectResourceManager.GetResourcesCalls()) } require.Equal(t, tt.wantStatus, resp.Code) if tt.wantResult != nil { result := &models.GetResourcesResponse{} err := json.Unmarshal(resp.Body.Bytes(), result) require.Nil(t, err) require.Equal(t, tt.wantResult, result) } }) } }
explode_data.jsonl/7379
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2343 }
[ 2830, 3393, 7849, 4783, 3050, 13614, 7849, 11277, 1155, 353, 8840, 836, 8, 341, 13158, 5043, 2036, 341, 197, 197, 7849, 32498, 353, 17905, 34134, 2447, 32498, 11571, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 981, 914, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestImports_ExistingModel(t *testing.T) { specDoc, err := loads.Spec("../fixtures/codegen/existing-model.yml") require.NoError(t, err) definitions := specDoc.Spec().Definitions opts := opts() k := "JsonWebKeySet" genModel, err := makeGenDefinition(k, "models", definitions[k], specDoc, opts) require.NoError(t, err) require.NotNil(t, genModel) require.NotNil(t, genModel.Imports) assert.Equal(t, "github.com/user/package", genModel.Imports["jwk"]) k = "JsonWebKey" genModel, err = makeGenDefinition(k, "models", definitions[k], specDoc, opts) require.NoError(t, err) require.NotNil(t, genModel) require.NotNil(t, genModel.Imports) assert.Equal(t, "github.com/user/package", genModel.Imports["jwk"]) }
explode_data.jsonl/2555
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 283 }
[ 2830, 3393, 31250, 62, 53067, 1712, 1155, 353, 8840, 836, 8, 341, 98100, 9550, 11, 1848, 1669, 20907, 36473, 17409, 45247, 46928, 4370, 25815, 11083, 28244, 33936, 1138, 17957, 35699, 1155, 11, 1848, 692, 7452, 4054, 82, 1669, 1398, 9550,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEnsureMountPoint(t *testing.T) { errorTarget := "./error_is_likely_target" alreadyExistTarget := "./false_is_likely_exist_target" falseTarget := "./false_is_likely_target" azureFile := "./azure.go" tests := []struct { desc string target string expectedErr error }{ { desc: "[Error] Mocked by IsLikelyNotMountPoint", target: errorTarget, expectedErr: fmt.Errorf("fake IsLikelyNotMountPoint: fake error"), }, { desc: "[Error] Error opening file", target: falseTarget, expectedErr: &os.PathError{Op: "open", Path: "./false_is_likely_target", Err: syscall.ENOENT}, }, { desc: "[Error] Not a directory", target: azureFile, expectedErr: &os.PathError{Op: "mkdir", Path: "./azure.go", Err: syscall.ENOTDIR}, }, { desc: "[Success] Successful run", target: targetTest, expectedErr: nil, }, { desc: "[Success] Already existing mount", target: alreadyExistTarget, expectedErr: nil, }, } // Setup _ = makeDir(alreadyExistTarget) d := NewFakeDriver() fakeMounter := &fakeMounter{} fakeExec := &testingexec.FakeExec{ExactOrder: true} d.mounter = &mount.SafeFormatAndMount{ Interface: fakeMounter, Exec: fakeExec, } for _, test := range tests { _, err := d.ensureMountPoint(test.target) if !reflect.DeepEqual(err, test.expectedErr) { t.Errorf("[%s]: Unexpected Error: %v, expected error: %v", test.desc, err, test.expectedErr) } } // Clean up err := os.RemoveAll(alreadyExistTarget) assert.NoError(t, err) err = os.RemoveAll(targetTest) assert.NoError(t, err) }
explode_data.jsonl/36856
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 706 }
[ 2830, 3393, 64439, 16284, 2609, 1155, 353, 8840, 836, 8, 341, 18290, 6397, 1669, 5924, 841, 6892, 62, 14282, 11123, 698, 197, 45484, 25613, 6397, 1669, 5924, 3849, 6892, 62, 14282, 35906, 11123, 698, 36012, 6397, 1669, 5924, 3849, 6892, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestInt64(t *testing.T) { allocator := DirectAllocator(WithDirectSizer(func(capacity, newCapacity int) int { return newCapacity })) buf := allocator.Allocate(0) assert.Nil(t, buf.WriteInt64(25)) assert.Nil(t, buf.WriteInt64LE(25)) assert.Nil(t, buf.SetInt64(16, 52)) assert.Nil(t, buf.SetInt64LE(24, 52)) assertInt64(t, 25)(buf.GetInt64(0)) assertInt64(t, 25)(buf.GetInt64LE(8)) assertInt64(t, 25)(buf.ReadInt64()) assertInt64(t, 25)(buf.ReadInt64LE()) assertInt64(t, 52)(buf.ReadInt64()) assertInt64(t, 52)(buf.ReadInt64LE()) }
explode_data.jsonl/49173
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 240 }
[ 2830, 3393, 1072, 21, 19, 1155, 353, 8840, 836, 8, 341, 197, 57631, 1669, 7139, 42730, 7, 2354, 16027, 43158, 18552, 51386, 4018, 11, 501, 29392, 526, 8, 526, 341, 197, 853, 501, 29392, 198, 197, 93596, 26398, 1669, 43655, 9636, 22401...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContextRenderAttachment(t *testing.T) { w := httptest.NewRecorder() c, _ := CreateTestContext(w) newFilename := "new_filename.go" c.Request, _ = http.NewRequest("GET", "/", nil) c.FileAttachment("./gin.go", newFilename) assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "func New() *Engine {") assert.Equal(t, fmt.Sprintf("attachment; filename=\"%s\"", newFilename), w.HeaderMap.Get("Content-Disposition")) }
explode_data.jsonl/26794
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 164 }
[ 2830, 3393, 1972, 6750, 33569, 1155, 353, 8840, 836, 8, 341, 6692, 1669, 54320, 70334, 7121, 47023, 741, 1444, 11, 716, 1669, 4230, 2271, 1972, 3622, 340, 8638, 20759, 1669, 330, 931, 13323, 18002, 1837, 1444, 9659, 11, 716, 284, 1758, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestOutputOnStop(t *testing.T) { hitOutput := &HitOutput{} hitOutput2 := &HitOutput{} runner := &runner{} runner.addOutput(hitOutput) runner.addOutput(hitOutput2) runner.outputOnStop() if !hitOutput.onStop { t.Error("hitOutput's OnStop has not been called") } if !hitOutput2.onStop { t.Error("hitOutput2's OnStop has not been called") } }
explode_data.jsonl/11393
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 135 }
[ 2830, 3393, 5097, 1925, 10674, 1155, 353, 8840, 836, 8, 341, 94778, 5097, 1669, 609, 19498, 5097, 16094, 94778, 5097, 17, 1669, 609, 19498, 5097, 16094, 197, 41736, 1669, 609, 41736, 16094, 197, 41736, 1364, 5097, 66474, 5097, 340, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestJQErrorsWithInvalidQuery(t *testing.T) { t.Parallel() input := `[1, 2, 3]` _, err := script.Echo(input).JQ(".foo & .bar").String() if err == nil { t.Error("want error from invalid JQ query, got nil") } }
explode_data.jsonl/51488
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 91 }
[ 2830, 3393, 41, 48, 13877, 2354, 7928, 2859, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 22427, 1669, 77644, 16, 11, 220, 17, 11, 220, 18, 60, 3989, 197, 6878, 1848, 1669, 5316, 5142, 958, 5384, 568, 41, 48, 5680, 7975, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestLifecycleExecution(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) color.Disable(true) defer color.Disable(false) spec.Run(t, "phases", testLifecycleExecution, spec.Report(report.Terminal{}), spec.Sequential()) }
explode_data.jsonl/10188
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 62731, 20294, 1155, 353, 8840, 836, 8, 341, 7000, 437, 5732, 291, 9730, 13244, 1005, 21183, 1005, 55832, 83819, 12367, 21481, 10166, 480, 3715, 340, 16867, 1894, 10166, 480, 3576, 692, 98100, 16708, 1155, 11, 330, 759, 2264, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestUpdateNode(t *testing.T) { var enqueued bool cases := []struct { test string newNode *v1.Node oldNode *v1.Node ds *apps.DaemonSet shouldEnqueue bool }{ { test: "Nothing changed, should not enqueue", oldNode: newNode("node1", nil), newNode: newNode("node1", nil), ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel return ds }(), shouldEnqueue: false, }, { test: "Node labels changed", oldNode: newNode("node1", nil), newNode: newNode("node1", simpleNodeLabel), ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel return ds }(), shouldEnqueue: true, }, { test: "Node taints changed", oldNode: func() *v1.Node { node := newNode("node1", nil) setNodeTaint(node, noScheduleTaints) return node }(), newNode: newNode("node1", nil), ds: newDaemonSet("ds"), shouldEnqueue: true, }, } for _, c := range cases { for _, strategy := range updateStrategies() { manager, podControl, _, err := newTestController() if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } manager.nodeStore.Add(c.oldNode) c.ds.Spec.UpdateStrategy = *strategy manager.dsStore.Add(c.ds) syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0) manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { if ds.Name == "ds" { enqueued = true } } enqueued = false manager.updateNode(c.oldNode, c.newNode) if enqueued != c.shouldEnqueue { t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) } } } }
explode_data.jsonl/50349
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 813 }
[ 2830, 3393, 4289, 1955, 1155, 353, 8840, 836, 8, 341, 2405, 662, 66547, 1807, 271, 1444, 2264, 1669, 3056, 1235, 341, 197, 18185, 688, 914, 198, 197, 8638, 1955, 981, 353, 85, 16, 21714, 198, 197, 61828, 1955, 981, 353, 85, 16, 2171...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGraphInstallWithK8sNamespace(t *testing.T) { ns := "foo" fkc := fakeKubernetesClientset(t, ns, testCharts) cfg := fakeHelmConfiguration(t) m := Manager{ LogF: t.Logf, K8c: fkc, HCfg: cfg, } ChartWaitPollInterval = 1 * time.Second rm, err := m.Install(context.Background(), testCharts, WithK8sNamespace(ns)) if err != nil { t.Fatalf("error installing: %v", err) } t.Logf("rm: %v\n", rm) for _, v := range rm { r, _ := m.HCfg.Releases.Deployed(v) if r.Namespace != ns { t.Fatalf("error incorrect namespace; expected: (%v), got: (%v)", ns, v) } } }
explode_data.jsonl/69441
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 259 }
[ 2830, 3393, 11212, 24690, 2354, 42, 23, 82, 22699, 1155, 353, 8840, 836, 8, 341, 84041, 1669, 330, 7975, 698, 1166, 31378, 1669, 12418, 42, 29827, 2959, 746, 1155, 11, 12268, 11, 1273, 64878, 340, 50286, 1669, 12418, 39, 23162, 7688, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRuntime_CustomCookieJar(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { authenticated := false for _, cookie := range req.Cookies() { if cookie.Name == "sessionid" && cookie.Value == "abc" { authenticated = true } } if !authenticated { username, password, ok := req.BasicAuth() if ok && username == "username" && password == "password" { authenticated = true http.SetCookie(rw, &http.Cookie{Name: "sessionid", Value: "abc"}) } } if authenticated { rw.Header().Add(httpkit.HeaderContentType, httpkit.JSONMime) rw.WriteHeader(http.StatusOK) jsongen := json.NewEncoder(rw) jsongen.Encode([]task{}) } else { rw.WriteHeader(http.StatusUnauthorized) } })) defer server.Close() rwrtr := client.RequestWriterFunc(func(req client.Request, _ strfmt.Registry) error { return nil }) hu, _ := url.Parse(server.URL) runtime := New(hu.Host, "/", []string{"http"}) runtime.Jar, _ = cookiejar.New(nil) submit := func(authInfo client.AuthInfoWriter) { _, err := runtime.Submit(&client.Operation{ ID: "getTasks", Method: "GET", PathPattern: "/", Params: rwrtr, AuthInfo: authInfo, Reader: client.ResponseReaderFunc(func(response client.Response, consumer httpkit.Consumer) (interface{}, error) { if response.Code() == 200 { return nil, nil } return nil, errors.New("Generic error") }), }) assert.NoError(t, err) } submit(BasicAuth("username", "password")) submit(nil) }
explode_data.jsonl/53821
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 625 }
[ 2830, 3393, 15123, 57402, 20616, 71735, 1155, 353, 8840, 836, 8, 341, 41057, 1669, 54320, 70334, 7121, 5475, 19886, 89164, 18552, 2601, 86, 1758, 37508, 11, 4232, 353, 1254, 9659, 8, 341, 197, 197, 57707, 1669, 895, 198, 197, 2023, 8358...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestAllowNonFullClusterRestoreOfFullBackup(t *testing.T) { defer leaktest.AfterTest(t)() const numAccounts = 10 _, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone) defer cleanupFn() sqlDB.Exec(t, `BACKUP TO $1`, localFoo) sqlDB.Exec(t, `CREATE DATABASE data2`) sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH into_db='data2'`, localFoo) checkResults := "SELECT * FROM data.bank" sqlDB.CheckQueryResults(t, checkResults, sqlDB.QueryStr(t, checkResults)) }
explode_data.jsonl/48481
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 195 }
[ 2830, 3393, 18605, 8121, 9432, 28678, 56284, 2124, 9432, 56245, 1155, 353, 8840, 836, 8, 341, 16867, 23352, 1944, 36892, 2271, 1155, 8, 2822, 4777, 1629, 41369, 284, 220, 16, 15, 198, 197, 6878, 8358, 5704, 3506, 11, 8358, 21290, 24911,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestWatcher(t *testing.T) { s := newStore() wh := s.WatcherHub w, err := wh.watch("/foo", true, false, 1, 1) if err != nil { t.Fatalf("%v", err) } c := w.EventChan() select { case <-c: t.Fatal("should not receive from channel before send the event") default: // do nothing } e := newEvent(Create, "/foo/bar", 1, 1) wh.notify(e) re := <-c if e != re { t.Fatal("recv != send") } w, _ = wh.watch("/foo", false, false, 2, 1) c = w.EventChan() e = newEvent(Create, "/foo/bar", 2, 2) wh.notify(e) select { case re = <-c: t.Fatal("should not receive from channel if not recursive ", re) default: // do nothing } e = newEvent(Create, "/foo", 3, 3) wh.notify(e) re = <-c if e != re { t.Fatal("recv != send") } // ensure we are doing exact matching rather than prefix matching w, _ = wh.watch("/fo", true, false, 1, 1) c = w.EventChan() select { case re = <-c: t.Fatal("should not receive from channel:", re) default: // do nothing } e = newEvent(Create, "/fo/bar", 3, 3) wh.notify(e) re = <-c if e != re { t.Fatal("recv != send") } }
explode_data.jsonl/7474
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 477 }
[ 2830, 3393, 47248, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 501, 6093, 741, 197, 1312, 1669, 274, 1175, 28058, 19316, 198, 6692, 11, 1848, 1669, 420, 46457, 4283, 7975, 497, 830, 11, 895, 11, 220, 16, 11, 220, 16, 340, 743, 1848, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestSaveToFile(t *testing.T) { hashMap := newHashMap(64 << 20) for i := 0; i < 100; i++ { key := []byte("vanakam" + string(i)) value := []byte("nanbare" + string(i)) hashMap.Set(key, value) inv, exist := hashMap.Get(key) if !exist { t.Fatal("key not found in the hashmap") } if bytes.Compare(value, inv) != 0 { t.Fatalf("expected value %s but got value %s", string(value), string(inv)) } } hashMap.toDisk("./", 1) filePath, err := filepath.Abs("./") if err != nil { panic("unable to form path for flushing the disk") } if _, err := os.Stat(fmt.Sprintf("%s/%d.table", filePath, 1)); os.IsNotExist(err) { panic("file not exist") } os.Remove(fmt.Sprintf("%s/%d.table", filePath, 1)) }
explode_data.jsonl/20362
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 309 }
[ 2830, 3393, 8784, 41550, 1155, 353, 8840, 836, 8, 341, 50333, 2227, 1669, 501, 18497, 7, 21, 19, 1115, 220, 17, 15, 340, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 16, 15, 15, 26, 600, 1027, 341, 197, 23634, 1669, 3056, 3782, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestTypeConv(t *testing.T) { gopClTest(t, ` var a = (*struct{})(nil) var b = interface{}(nil) var c = (func())(nil) var x uint32 = uint32(0) var y *uint32 = (*uint32)(nil) `, `package main var a = (*struct { })(nil) var b = interface { }(nil) var c = (func())(nil) var x uint32 = uint32(0) var y *uint32 = (*uint32)(nil) `) }
explode_data.jsonl/73620
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 929, 34892, 1155, 353, 8840, 836, 8, 341, 3174, 453, 5066, 2271, 1155, 11, 22074, 947, 264, 284, 4609, 1235, 6257, 2376, 8385, 340, 947, 293, 284, 3749, 6257, 7, 8385, 340, 947, 272, 284, 320, 2830, 2140, 7, 8385, 340, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConnective(t *testing.T) { // this test requires at least 2 vSwitches 2 nodes vsw := strings.Split(vSwitchIDs, ",") if len(vsw) < 2 { return } crossVSwitch := features.New("Connective/MultiVSwitch").WithLabel("env", "trunking"). Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { for _, args := range [][]interface{}{ { "use-vsw-1", []string{vsw[0]}, }, { "use-vsw-2", []string{vsw[1]}, }, } { pn := newPodNetworking(args[0].(string), args[1].([]string), nil, &metav1.LabelSelector{ MatchLabels: map[string]string{args[0].(string): ""}, }, nil) err := cfg.Client().Resources().Create(ctx, pn) if err != nil && !errors.IsAlreadyExists(err) { t.Fatal(err) } } return ctx }). Assess("podNetworking status ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { for _, arg := range []string{"use-vsw-1", "use-vsw-2"} { err := WaitPodNetworkingReady(arg, cfg.Client()) if err != nil { t.Fatal(err) } } return ctx }). Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { ports := []corev1.ServicePort{ { Name: "http", Protocol: corev1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80), }, } for _, args := range [][]interface{}{ { "pod-1", "connective-test", "l1b0k/echo:v0.0.1", "use-vsw-1", }, { "pod-2", "connective-test", "l1b0k/echo:v0.0.1", "use-vsw-2", }, } { pod := utils.NewPod(args[0].(string), cfg.Namespace(), args[1].(string), args[2].(string)) pod.Pod.Labels[args[3].(string)] = "" err := cfg.Client().Resources().Create(ctx, pod.Pod) if err != nil { t.Error(err) } svc := pod.Expose("") svc.Spec.Ports = ports err = cfg.Client().Resources().Create(ctx, svc) if err != nil { t.Error(err) } } return ctx }). Assess("wait for pod ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { for _, name := range []string{"pod-1", "pod-2"} { pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: cfg.Namespace()}, } err := wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&pod, func(object k8s.Object) bool { p := object.(*corev1.Pod) if !terwayTypes.PodUseENI(p) { return false } return p.Status.Phase == corev1.PodRunning }), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2)) if err != nil { t.Fatal(err) } } return ctx }). Assess("test connective", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { pod := utils.NewPod("client", cfg.Namespace(), "client", "l1b0k/echo:v0.0.1") pod.Pod.Spec.Containers[0].Command = []string{"/usr/bin/echo", "-mode", "client", "-cases", "dns://aliyun.com,http://pod-1,http://pod-2,tcp://100.100.100.200:80"} pod.Pod.Labels["use-vsw-1"] = "" pod.Pod.Spec.RestartPolicy = corev1.RestartPolicyNever err := cfg.Client().Resources().Create(ctx, pod.Pod) if err != nil { t.Fatal(err) } p := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: cfg.Namespace()}, } err = wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&p, func(object k8s.Object) bool { p := object.(*corev1.Pod) return p.Status.Phase == corev1.PodSucceeded }), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2)) if err != nil { t.Fatal(err) } err = cfg.Client().Resources().Delete(ctx, pod.Pod) if err != nil { t.Fatal(err) } return ctx }). Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { for _, arg := range []string{"use-vsw-1", "use-vsw-2"} { pn1 := &v1beta1.PodNetworking{ ObjectMeta: metav1.ObjectMeta{Name: arg}, } _ = config.Client().Resources().Delete(ctx, pn1) } return ctx }).Feature() testenv.Test(t, crossVSwitch) }
explode_data.jsonl/4263
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1927 }
[ 2830, 3393, 14611, 533, 1155, 353, 8840, 836, 8, 341, 197, 322, 419, 1273, 7460, 518, 3245, 220, 17, 348, 16837, 288, 220, 17, 7798, 198, 5195, 2280, 1669, 9069, 19823, 3747, 16837, 30466, 11, 3670, 1138, 743, 2422, 3747, 2280, 8, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRenderSimpleIfstatement(t *testing.T) { template, _ := ParseString("A-{% if 2 == 2 %}in if{% endif %}-Z", nil) assertRender(t, template, nil, `A-in if-Z`) }
explode_data.jsonl/42413
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 68 }
[ 2830, 3393, 6750, 16374, 2679, 24184, 1155, 353, 8840, 836, 8, 341, 22832, 11, 716, 1669, 14775, 703, 445, 32, 63347, 4, 421, 220, 17, 621, 220, 17, 1018, 92, 258, 421, 66365, 12330, 1018, 19732, 57, 497, 2092, 340, 6948, 6750, 1155...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestTokenRequestValues(t *testing.T) { tr := TokenRequest{} assert.Equal(t, url.Values{}, TokenRequestValues(tr)) tr = TokenRequest{ GrantType: "password", Scope: Scope{"foo", "bar"}, ClientID: "client-id", ClientSecret: "client-secret", Username: "username", Password: "password", RefreshToken: "refresh-token", RedirectURI: "http://redirect.uri", Code: "code", } assert.Equal(t, url.Values{ "grant_type": []string{"password"}, "scope": []string{"foo bar"}, "username": []string{"username"}, "password": []string{"password"}, "refresh_token": []string{"refresh-token"}, "redirect_uri": []string{"http%3A%2F%2Fredirect.uri"}, "code": []string{"code"}, }, TokenRequestValues(tr)) }
explode_data.jsonl/1730
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 335 }
[ 2830, 3393, 3323, 1900, 6227, 1155, 353, 8840, 836, 8, 341, 25583, 1669, 9660, 1900, 16094, 6948, 12808, 1155, 11, 2515, 35145, 22655, 9660, 1900, 6227, 7624, 4390, 25583, 284, 9660, 1900, 515, 197, 197, 67971, 929, 25, 262, 330, 3833, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTxSearch(t *testing.T) { // first we broadcast a tx c := getHTTPClient() _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(tx) require.Nil(t, err, "%+v", err) txHeight := bres.Height txHash := bres.Hash anotherTxHash := types.Tx("a different tx").Hash() for i, c := range GetClients() { t.Logf("client %d", i) // now we query for the tx. // since there's only one tx, we know index=0. results, err := c.TxSearch(fmt.Sprintf("tx.hash='%v'", txHash), true) require.Nil(t, err, "%+v", err) require.Len(t, results, 1) ptx := results[0] assert.EqualValues(t, txHeight, ptx.Height) assert.EqualValues(t, tx, ptx.Tx) assert.Zero(t, ptx.Index) assert.True(t, ptx.DeliverResult.IsOK()) assert.EqualValues(t, txHash, ptx.Hash) // time to verify the proof proof := ptx.Proof if assert.EqualValues(t, tx, proof.Data) { assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) } // we query for non existing tx results, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false) require.Nil(t, err, "%+v", err) require.Len(t, results, 0) // we query using a tag (see kvstore application) results, err = c.TxSearch("app.creator='jae'", false) require.Nil(t, err, "%+v", err) if len(results) == 0 { t.Fatal("expected a lot of transactions") } } }
explode_data.jsonl/48951
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 563 }
[ 2830, 3393, 31584, 5890, 1155, 353, 8840, 836, 8, 341, 197, 322, 1156, 582, 12899, 264, 9854, 198, 1444, 1669, 633, 9230, 2959, 741, 197, 6878, 8358, 9854, 1669, 7405, 31584, 82707, 741, 2233, 416, 11, 1848, 1669, 272, 84788, 31584, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func Test_ddosxDomainWAFAdvancedRuleUpdateCmd_Args(t *testing.T) { t.Run("ValidArgs_NoError", func(t *testing.T) { cmd := ddosxDomainWAFAdvancedRuleUpdateCmd() err := cmd.Args(nil, []string{"testdomain1.co.uk", "00000000-0000-0000-0000-000000000000"}) assert.Nil(t, err) }) t.Run("MissingDomain_Error", func(t *testing.T) { cmd := ddosxDomainWAFAdvancedRuleUpdateCmd() err := cmd.Args(nil, []string{}) assert.NotNil(t, err) assert.Equal(t, "Missing domain", err.Error()) }) t.Run("MissingAdvancedRule_Error", func(t *testing.T) { cmd := ddosxDomainWAFAdvancedRuleUpdateCmd() err := cmd.Args(nil, []string{"testdomain1.co.uk"}) assert.NotNil(t, err) assert.Equal(t, "Missing advanced rule", err.Error()) }) }
explode_data.jsonl/43035
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 292 }
[ 2830, 3393, 43174, 436, 87, 13636, 54, 8276, 35457, 11337, 4289, 15613, 87581, 82, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 4088, 4117, 36989, 1454, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 25920, 1669, 19647, 436, 87, 136...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNamespaceNeedsFlushRangeMultipleShardConflict(t *testing.T) { ctrl := xtest.NewController(t) defer ctrl.Finish() var ( shards = []uint32{0, 2, 4} ns = newNeedsFlushNamespace(t, shards) ropts = ns.Options().RetentionOptions() blockSize = ropts.BlockSize() t2 = retention.FlushTimeEnd(ropts, xtime.ToUnixNano(ns.opts.ClockOptions().NowFn()())) t1 = t2.Add(-blockSize) t0 = t1.Add(-blockSize) ) inputCases := []needsFlushTestCase{ {0, map[xtime.UnixNano]bool{t0: false, t1: true, t2: true}}, {2, map[xtime.UnixNano]bool{t0: true, t1: false, t2: true}}, {4, map[xtime.UnixNano]bool{t0: false, t1: true, t2: true}}, } setShardExpects(ns, ctrl, inputCases) assertNeedsFlush(t, ns, t0, t0, true) assertNeedsFlush(t, ns, t1, t1, true) assertNeedsFlush(t, ns, t2, t2, true) assertNeedsFlush(t, ns, t0, t1, true) assertNeedsFlush(t, ns, t0, t2, true) assertNeedsFlush(t, ns, t1, t2, true) assertNeedsFlush(t, ns, t2, t1, false) assertNeedsFlush(t, ns, t2, t0, false) }
explode_data.jsonl/35368
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 509 }
[ 2830, 3393, 22699, 65064, 46874, 6046, 32089, 2016, 567, 57974, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 856, 1944, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 2405, 2399, 197, 36196, 2347, 262, 284, 3056, 2496, 18, 17, 90,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateClient(t *testing.T) { assert := assert.New(t) type validateClientTest struct { ctx context.Context commonNames map[string]interface{} expectedError error } cache := make(map[certificate.CommonName]certificate.Certificater) certManager := tresor.NewFakeCertManager(&cache, nil) cn := certificate.CommonName(fmt.Sprintf("%s.%s.%s", uuid.New(), tests.BookstoreServiceAccountName, tests.Namespace)) certPEM, _ := certManager.IssueCertificate(cn, 1*time.Hour) cert, _ := certificate.DecodePEMCertificate(certPEM.GetCertificateChain()) goodCommonNameMapping := map[string]interface{}{string(cn): cn} badCommonNameMapping := map[string]interface{}{"apple": "pear"} validateClientTests := []validateClientTest{ {context.Background(), nil, status.Error(codes.Unauthenticated, "no peer found")}, {peer.NewContext(context.TODO(), &peer.Peer{}), nil, status.Error(codes.Unauthenticated, "unexpected peer transport credentials")}, {peer.NewContext(context.TODO(), &peer.Peer{AuthInfo: credentials.TLSInfo{}}), nil, status.Error(codes.Unauthenticated, "could not verify peer certificate")}, {peer.NewContext(context.TODO(), &peer.Peer{AuthInfo: tests.NewMockAuthInfo(cert)}), badCommonNameMapping, status.Error(codes.Unauthenticated, "disallowed subject common name")}, {peer.NewContext(context.TODO(), &peer.Peer{AuthInfo: tests.NewMockAuthInfo(cert)}), goodCommonNameMapping, nil}, } for _, vct := range validateClientTests { result, err := ValidateClient(vct.ctx, vct.commonNames) if err != nil { assert.Equal(result, certificate.CommonName("")) assert.True(errors.Is(err, vct.expectedError)) } else { assert.NotNil(result) assert.Empty(vct.expectedError) } } }
explode_data.jsonl/3391
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 605 }
[ 2830, 3393, 17926, 2959, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 692, 13158, 9593, 2959, 2271, 2036, 341, 197, 20985, 1843, 2266, 9328, 198, 197, 83825, 7980, 256, 2415, 14032, 31344, 16094, 197, 42400, 1454, 1465, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestIndexDatabase_checkSync(t *testing.T) { syncInterval = 100 ctrl := gomock.NewController(t) defer func() { syncInterval = 2 * timeutil.OneSecond _ = fileutil.RemoveDir(testPath) createSeriesWAL = wal.NewSeriesWAL ctrl.Finish() }() var count atomic.Int32 mockSeriesWAL := wal.NewMockSeriesWAL(ctrl) mockSeriesWAL.EXPECT().NeedRecovery().DoAndReturn(func() bool { count.Inc() return count.Load() != 1 }).AnyTimes() mockSeriesWAL.EXPECT().Recovery(gomock.Any(), gomock.Any()).AnyTimes() createSeriesWAL = func(path string) (wal.SeriesWAL, error) { return mockSeriesWAL, nil } meta := metadb.NewMockMetadata(ctrl) meta.EXPECT().DatabaseName().Return("test").AnyTimes() db, err := NewIndexDatabase(context.TODO(), testPath, meta, nil, nil) assert.NoError(t, err) assert.NotNil(t, db) time.Sleep(time.Second) mockSeriesWAL.EXPECT().Close().Return(nil) err = db.Close() assert.NoError(t, err) }
explode_data.jsonl/33831
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 362 }
[ 2830, 3393, 1552, 5988, 7200, 12154, 1155, 353, 8840, 836, 8, 341, 1903, 1721, 10256, 284, 220, 16, 15, 15, 198, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 2915, 368, 341, 197, 1903, 1721, 10256, 284, 220, 17, 353, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRequestSent_ExecuteInbound(t *testing.T) { followup, action, err := (&requestSent{}).ExecuteInbound(&metaData{}) require.NoError(t, err) require.Equal(t, &noOp{}, followup) require.NoError(t, action(nil)) }
explode_data.jsonl/53019
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 84 }
[ 2830, 3393, 1900, 31358, 83453, 641, 10891, 1155, 353, 8840, 836, 8, 341, 1166, 1544, 454, 11, 1917, 11, 1848, 1669, 15899, 2035, 31358, 6257, 568, 17174, 641, 10891, 2099, 5490, 1043, 37790, 17957, 35699, 1155, 11, 1848, 340, 17957, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestGetLog(t *testing.T) { kc := fkc{ prowapi.ProwJob{ Spec: prowapi.ProwJobSpec{ Agent: prowapi.KubernetesAgent, Job: "job", }, Status: prowapi.ProwJobStatus{ PodName: "wowowow", BuildID: "123", }, }, prowapi.ProwJob{ Spec: prowapi.ProwJobSpec{ Agent: prowapi.KubernetesAgent, Job: "jib", Cluster: "trusted", }, Status: prowapi.ProwJobStatus{ PodName: "powowow", BuildID: "123", }, }, } ja := &JobAgent{ kc: kc, pkcs: map[string]PodLogClient{kube.DefaultClusterAlias: fpkc("clusterA"), "trusted": fpkc("clusterB")}, } if err := ja.update(); err != nil { t.Fatalf("Updating: %v", err) } if res, err := ja.GetJobLog("job", "123", kube.TestContainerName); err != nil { t.Fatalf("Failed to get log: %v", err) } else if got, expect := string(res), fmt.Sprintf("clusterA.%s", kube.TestContainerName); got != expect { t.Errorf("Unexpected result getting logs for job 'job'. Expected %q, but got %q.", expect, got) } if res, err := ja.GetJobLog("jib", "123", kube.TestContainerName); err != nil { t.Fatalf("Failed to get log: %v", err) } else if got, expect := string(res), fmt.Sprintf("clusterB.%s", kube.TestContainerName); got != expect { t.Errorf("Unexpected result getting logs for job 'job'. Expected %q, but got %q.", expect, got) } customContainerName := "custom-container-name" if res, err := ja.GetJobLog("jib", "123", customContainerName); err != nil { t.Fatalf("Failed to get log: %v", err) } else if got, expect := string(res), fmt.Sprintf("clusterB.%s", customContainerName); got != expect { t.Errorf("Unexpected result getting logs for job 'job'. Expected %q, but got %q.", expect, got) } }
explode_data.jsonl/11059
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 720 }
[ 2830, 3393, 1949, 2201, 1155, 353, 8840, 836, 8, 341, 16463, 66, 1669, 282, 31378, 515, 197, 3223, 651, 2068, 1069, 651, 12245, 515, 298, 7568, 992, 25, 47558, 2068, 1069, 651, 12245, 8327, 515, 571, 197, 16810, 25, 47558, 2068, 11352...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestAcquireIfWrongTimeoutRequested(t *testing.T) { // GIVEN permitFunction := func() bool { return true } lock := state.NewLock(1) handler := NewLockHandler(lock, timeout, permitFunction) req, _ := http.NewRequest("GET", "/", nil) q := req.URL.Query() q.Add("duration", "a") req.URL.RawQuery = q.Encode() prepareResponseRecorder(req, handler) // WHEN rr := prepareResponseRecorder(req, handler) // THEN assertResponseStatusCode(http.StatusLocked, rr.Code, t) }
explode_data.jsonl/66023
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 175 }
[ 2830, 3393, 11654, 984, 2679, 29185, 7636, 37630, 1155, 353, 8840, 836, 8, 341, 197, 322, 89836, 198, 197, 39681, 5152, 1669, 2915, 368, 1807, 341, 197, 853, 830, 198, 197, 630, 58871, 1669, 1584, 7121, 11989, 7, 16, 340, 53326, 1669,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewServiceConfigError(t *testing.T) { tbl := []struct { Initial Config ServiceError bool }{ {Config{}, false}, {Config{APIEncoding: "json"}, false}, {Config{APIEncoding: "JSON"}, false}, {Config{APIEncoding: "jsonFlat"}, false}, {Config{APIEncoding: "jsonflat"}, false}, {Config{APIEncoding: "test"}, true}, } for i, r := range tbl { cfg := r.Initial cfg.SetDefault() _, err := NewService(nil, cfg) if err != nil && !r.ServiceError { t.Fatalf("expected no error, but got:\n%s\nin test #%d", err, i+1) } else if err == nil && r.ServiceError { t.Fatalf("expected an error, but got none, in test #%d", i+1) } } }
explode_data.jsonl/36531
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 270 }
[ 2830, 3393, 3564, 1860, 2648, 1454, 1155, 353, 8840, 836, 8, 341, 3244, 2024, 1669, 3056, 1235, 341, 197, 197, 6341, 414, 5532, 198, 197, 91619, 1454, 1807, 198, 197, 59403, 197, 197, 90, 2648, 22655, 895, 1583, 197, 197, 90, 2648, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestStrangeName(t *testing.T) { assert.NoError(t, PrepareEngine()) err := testEngine.DropTables(new(StrangeName)) assert.NoError(t, err) err = testEngine.CreateTables(new(StrangeName)) assert.NoError(t, err) _, err = testEngine.Insert(&StrangeName{Name: "sfsfdsfds"}) assert.NoError(t, err) beans := make([]StrangeName, 0) err = testEngine.Find(&beans) assert.NoError(t, err) }
explode_data.jsonl/19204
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 150 }
[ 2830, 3393, 91334, 675, 1155, 353, 8840, 836, 8, 341, 6948, 35699, 1155, 11, 31166, 4571, 12367, 9859, 1669, 1273, 4571, 21688, 21670, 1755, 7, 91334, 675, 1171, 6948, 35699, 1155, 11, 1848, 692, 9859, 284, 1273, 4571, 7251, 21670, 1755...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1