text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestChatSrvStellarMessages(t *testing.T) { runWithMemberTypes(t, func(mt chat1.ConversationMembersType) { runWithEphemeral(t, mt, func(ephemeralLifetime *gregor1.DurationSec) { switch mt { case chat1.ConversationMembersType_KBFS: return default: // Fall through for other member types. } ctc := makeChatTestContext(t, "SrvStellarMessages", 2) defer ctc.cleanup() users := ctc.users() uid := users[0].User.GetUID().ToBytes() tc := ctc.world.Tcs[users[0].Username] ctx := ctc.as(t, users[0]).startCtx listener := newServerChatListener() ctc.as(t, users[0]).h.G().NotifyRouter.AddListener(listener) tc.ChatG.Syncer.(*Syncer).isConnected = true created := mustCreateConversationForTest(t, ctc, users[0], chat1.TopicType_CHAT, mt, ctc.as(t, users[1]).user()) t.Logf("send a request message") body := chat1.NewMessageBodyWithRequestpayment(chat1.MessageRequestPayment{ RequestID: stellar1.KeybaseRequestID("dummy id"), Note: "Test note", }) _, err := postLocalEphemeralForTest(t, ctc, users[0], created, body, ephemeralLifetime) require.NoError(t, err) var unboxed chat1.UIMessage select { case info := <-listener.newMessageRemote: unboxed = info.Message require.True(t, unboxed.IsValid(), "invalid message") require.Equal(t, chat1.MessageType_REQUESTPAYMENT, unboxed.GetMessageType(), "invalid type") require.Equal(t, body.Requestpayment(), unboxed.Valid().MessageBody.Requestpayment()) require.False(t, unboxed.IsEphemeral()) case <-time.After(20 * time.Second): require.Fail(t, "no event received") } consumeNewMsgLocal(t, listener, chat1.MessageType_REQUESTPAYMENT) tv, err := tc.Context().ConvSource.Pull(ctx, created.Id, uid, chat1.GetThreadReason_GENERAL, nil, nil) require.NoError(t, err) require.NotZero(t, len(tv.Messages)) require.Equal(t, chat1.MessageType_REQUESTPAYMENT, tv.Messages[0].GetMessageType()) t.Logf("delete the message") darg := chat1.PostDeleteNonblockArg{ ConversationID: created.Id, TlfName: created.TlfName, TlfPublic: created.Visibility == keybase1.TLFVisibility_PUBLIC, Supersedes: unboxed.GetMessageID(), IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI, } res, err := ctc.as(t, users[0]).chatLocalHandler().PostDeleteNonblock(ctx, darg) require.NoError(t, err) select { case info := <-listener.newMessageRemote: unboxed = info.Message require.True(t, unboxed.IsValid(), "invalid message") require.NotNil(t, unboxed.Valid().OutboxID, "no outbox ID") require.Equal(t, res.OutboxID.String(), *unboxed.Valid().OutboxID, "mismatch outbox ID") require.Equal(t, chat1.MessageType_DELETE, unboxed.GetMessageType(), "invalid type") case <-time.After(20 * time.Second): require.Fail(t, "no event (DELETE) received") } consumeNewMsgLocal(t, listener, chat1.MessageType_DELETE) tv, err = tc.Context().ConvSource.Pull(ctx, created.Id, uid, chat1.GetThreadReason_GENERAL, nil, nil) require.NoError(t, err) require.NotZero(t, len(tv.Messages)) require.Equal(t, chat1.MessageType_DELETE, tv.Messages[0].GetMessageType()) for _, msg := range tv.Messages { require.NotEqual(t, chat1.MessageType_REQUESTPAYMENT, msg.GetMessageType()) } }) }) }
explode_data.jsonl/63724
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1401 }
[ 2830, 3393, 15672, 50, 10553, 623, 26880, 15820, 1155, 353, 8840, 836, 8, 341, 56742, 2354, 9366, 4173, 1155, 11, 2915, 81618, 6236, 16, 4801, 22323, 24371, 929, 8, 341, 197, 56742, 2354, 36, 59941, 3253, 1155, 11, 11965, 11, 2915, 7,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestWebhook_DecodeWebhook(t *testing.T) { setup() defer teardown() jsonData := strings.NewReader(webhookPayload) res, err := DecodeWebhook(jsonData) if err != nil { t.Fatal(err) } if len(res.Messages) != 1 { t.Fatal("Expect 1 message") } incidentDetails := res.Messages[0].Incident if incidentDetails.IncidentNumber != 33 { t.Fatal("Unexpected Incident Number") } if len(incidentDetails.PendingActions) != 2 { t.Fatal("Expected 2 pending actions") } if incidentDetails.Service.ID != "PN49J75" { t.Fatal("Unexpected Service ID") } if len(incidentDetails.Assignments) != 1 { t.Fatal("Expected 1 Assignment") } }
explode_data.jsonl/81948
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 251 }
[ 2830, 3393, 5981, 20873, 78668, 534, 5981, 20873, 1155, 353, 8840, 836, 8, 341, 84571, 741, 16867, 49304, 2822, 30847, 1043, 1669, 9069, 68587, 39769, 20873, 29683, 340, 10202, 11, 1848, 1669, 50194, 5981, 20873, 9304, 1043, 340, 743, 184...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func Test_bot_disconnect(t *testing.T) { cfg := fakeCfg{} discord := &FakeDiscordClientSpy{} prc := &fakeProcessor{} b := &bot{ cfg: cfg, discord: discord, prc: prc, } t.Run("it should disconnect correctly", func(t *testing.T) { b.disconnect() assertSpySuccess(t, discord, "Close()") }) t.Run("it should not fail on disconnect failure", func(t *testing.T) { discord.failOnClose = true b.disconnect() assertSpyFailure(t, discord, "Close()", fakeError) }) t.Run("it should not fail without client", func(t *testing.T) { b.discord = nil b.disconnect() }) }
explode_data.jsonl/2137
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 247 }
[ 2830, 3393, 38883, 67972, 1155, 353, 8840, 836, 8, 341, 50286, 1669, 12418, 42467, 16094, 2698, 3427, 539, 1669, 609, 52317, 23477, 539, 2959, 44027, 16094, 25653, 66, 1669, 609, 30570, 22946, 31483, 2233, 1669, 609, 6331, 515, 197, 50286...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTPDUAlphabet(t *testing.T) { patterns := []dcsAlphabetPattern{ {0x00, tpdu.Alpha7Bit, nil}, {0x04, tpdu.Alpha8Bit, nil}, {0x08, tpdu.AlphaUCS2, nil}, {0x0c, tpdu.Alpha7Bit, nil}, {0x80, tpdu.Alpha7Bit, tpdu.ErrInvalid}, } for _, p := range patterns { f := func(t *testing.T) { d := tpdu.TPDU{} d.DCS = p.in c, err := d.Alphabet() if err != p.err { t.Fatalf("error converting 0x%02x: %v", p.in, err) } if c != p.out { t.Errorf("expected result %v, got %v", p.out, c) } } t.Run(fmt.Sprintf("%02x", p.in), f) } }
explode_data.jsonl/28771
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 308 }
[ 2830, 3393, 4239, 21547, 2101, 18485, 1155, 353, 8840, 836, 8, 341, 3223, 3227, 82, 1669, 3056, 67, 4837, 2101, 18485, 15760, 515, 197, 197, 90, 15, 87, 15, 15, 11, 18101, 1054, 95872, 22, 8344, 11, 2092, 1583, 197, 197, 90, 15, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestUintStringInvalid(t *testing.T) { ti := Uint{ ValidFlag: false, uint: 123456789, } if ti.String() != "" { t.Errorf("expected empty string, actual:%s", ti.String()) } }
explode_data.jsonl/13402
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 83 }
[ 2830, 3393, 21570, 703, 7928, 1155, 353, 8840, 836, 8, 341, 72859, 1669, 27883, 515, 197, 197, 4088, 12135, 25, 895, 345, 197, 8254, 25, 414, 220, 16, 17, 18, 19, 20, 21, 22, 23, 24, 345, 197, 532, 743, 8988, 6431, 368, 961, 159...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDashboardPermissionApiEndpoint(t *testing.T) { Convey("Dashboard permissions test", t, func() { Convey("Given dashboard not exists", func() { bus.AddHandler("test", func(query *m.GetDashboardQuery) error { return m.ErrDashboardNotFound }) loggedInUserScenarioWithRole("When calling GET on", "GET", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", m.ROLE_EDITOR, func(sc *scenarioContext) { callGetDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 404) }) cmd := dtos.UpdateDashboardAclCommand{ Items: []dtos.DashboardAclUpdateItem{ {UserId: 1000, Permission: m.PERMISSION_ADMIN}, }, } updateDashboardPermissionScenario("When calling POST on", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", cmd, func(sc *scenarioContext) { callUpdateDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 404) }) }) Convey("Given user has no admin permissions", func() { origNewGuardian := guardian.New guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{CanAdminValue: false}) getDashboardQueryResult := m.NewDashboard("Dash") bus.AddHandler("test", func(query *m.GetDashboardQuery) error { query.Result = getDashboardQueryResult return nil }) loggedInUserScenarioWithRole("When calling GET on", "GET", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", m.ROLE_EDITOR, func(sc *scenarioContext) { callGetDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 403) }) cmd := dtos.UpdateDashboardAclCommand{ Items: []dtos.DashboardAclUpdateItem{ {UserId: 1000, Permission: m.PERMISSION_ADMIN}, }, } updateDashboardPermissionScenario("When calling POST on", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", cmd, func(sc *scenarioContext) { callUpdateDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 403) }) Reset(func() { guardian.New = origNewGuardian }) }) Convey("Given user has admin permissions and permissions to update", func() { origNewGuardian := guardian.New guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{ CanAdminValue: true, CheckPermissionBeforeUpdateValue: true, GetAclValue: []*m.DashboardAclInfoDTO{ {OrgId: 1, DashboardId: 1, UserId: 2, Permission: m.PERMISSION_VIEW}, {OrgId: 1, DashboardId: 1, UserId: 3, Permission: m.PERMISSION_EDIT}, {OrgId: 1, DashboardId: 1, UserId: 4, Permission: m.PERMISSION_ADMIN}, {OrgId: 1, DashboardId: 1, TeamId: 1, Permission: m.PERMISSION_VIEW}, {OrgId: 1, DashboardId: 1, TeamId: 2, Permission: m.PERMISSION_ADMIN}, }, }) getDashboardQueryResult := m.NewDashboard("Dash") bus.AddHandler("test", func(query *m.GetDashboardQuery) error { query.Result = getDashboardQueryResult return nil }) loggedInUserScenarioWithRole("When calling GET on", "GET", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", m.ROLE_ADMIN, func(sc *scenarioContext) { callGetDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 200) respJSON, err := simplejson.NewJson(sc.resp.Body.Bytes()) So(err, ShouldBeNil) So(len(respJSON.MustArray()), ShouldEqual, 5) So(respJSON.GetIndex(0).Get("userId").MustInt(), ShouldEqual, 2) So(respJSON.GetIndex(0).Get("permission").MustInt(), ShouldEqual, m.PERMISSION_VIEW) }) cmd := dtos.UpdateDashboardAclCommand{ Items: []dtos.DashboardAclUpdateItem{ {UserId: 1000, Permission: m.PERMISSION_ADMIN}, }, } updateDashboardPermissionScenario("When calling POST on", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", cmd, func(sc *scenarioContext) { callUpdateDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 200) }) Reset(func() { guardian.New = origNewGuardian }) }) Convey("When trying to update permissions with duplicate permissions", func() { origNewGuardian := guardian.New guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{ CanAdminValue: true, CheckPermissionBeforeUpdateValue: false, CheckPermissionBeforeUpdateError: guardian.ErrGuardianPermissionExists, }) getDashboardQueryResult := m.NewDashboard("Dash") bus.AddHandler("test", func(query *m.GetDashboardQuery) error { query.Result = getDashboardQueryResult return nil }) cmd := dtos.UpdateDashboardAclCommand{ Items: []dtos.DashboardAclUpdateItem{ {UserId: 1000, Permission: m.PERMISSION_ADMIN}, }, } updateDashboardPermissionScenario("When calling POST on", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", cmd, func(sc *scenarioContext) { callUpdateDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 400) }) Reset(func() { guardian.New = origNewGuardian }) }) Convey("When trying to override inherited permissions with lower presedence", func() { origNewGuardian := guardian.New guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{ CanAdminValue: true, CheckPermissionBeforeUpdateValue: false, CheckPermissionBeforeUpdateError: guardian.ErrGuardianOverride}, ) getDashboardQueryResult := m.NewDashboard("Dash") bus.AddHandler("test", func(query *m.GetDashboardQuery) error { query.Result = getDashboardQueryResult return nil }) cmd := dtos.UpdateDashboardAclCommand{ Items: []dtos.DashboardAclUpdateItem{ {UserId: 1000, Permission: m.PERMISSION_ADMIN}, }, } updateDashboardPermissionScenario("When calling POST on", "/api/dashboards/id/1/permissions", "/api/dashboards/id/:id/permissions", cmd, func(sc *scenarioContext) { callUpdateDashboardPermissions(sc) So(sc.resp.Code, ShouldEqual, 400) }) Reset(func() { guardian.New = origNewGuardian }) }) }) }
explode_data.jsonl/76515
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2362 }
[ 2830, 3393, 26947, 14966, 6563, 27380, 1155, 353, 8840, 836, 8, 341, 93070, 5617, 445, 26947, 8541, 1273, 497, 259, 11, 2915, 368, 341, 197, 93070, 5617, 445, 22043, 26967, 537, 6724, 497, 2915, 368, 341, 298, 92530, 1904, 3050, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewPolicy(t *testing.T) { tests := []struct { name string want *Policy }{ struct { name string want *Policy }{ "blank", &Policy{ Sources: make(map[string]*orderedset.OrderedSet), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := NewPolicy(); !reflect.DeepEqual(got, tt.want) { t.Errorf("NewPolicy() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/10342
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 200 }
[ 2830, 3393, 3564, 13825, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 50780, 353, 13825, 198, 197, 59403, 197, 6472, 341, 298, 11609, 914, 198, 298, 50780, 353, 13825, 198, 197, 197, 59403, 29...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSaveRestoreViewEmpty(t *testing.T) { var toSave View var v View doSaveAndLoad(t, &toSave, &v) if got := v.pool.avail; got != nil { t.Errorf("pool is not in zero state: v.pool.avail = %v, want nil", got) } if got := v.Flatten(); len(got) != 0 { t.Errorf("v.Flatten() = %x, want []", got) } }
explode_data.jsonl/52513
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 135 }
[ 2830, 3393, 8784, 56284, 851, 3522, 1155, 353, 8840, 836, 8, 341, 2405, 311, 8784, 2738, 198, 2405, 348, 2738, 198, 19935, 8784, 3036, 5879, 1155, 11, 609, 983, 8784, 11, 609, 85, 692, 743, 2684, 1669, 348, 38963, 40506, 604, 26, 26...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestParseSignal(t *testing.T) { _, checkAtoiError := ParseSignal("0") assert.Check(t, is.Error(checkAtoiError, "Invalid signal: 0")) _, error := ParseSignal("SIG") assert.Check(t, is.Error(error, "Invalid signal: SIG")) for sigStr := range SignalMap { responseSignal, error := ParseSignal(sigStr) assert.Check(t, error) signal := SignalMap[sigStr] assert.Check(t, is.DeepEqual(signal, responseSignal)) } }
explode_data.jsonl/62497
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 168 }
[ 2830, 3393, 14463, 26810, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1779, 32, 52609, 1454, 1669, 14775, 26810, 445, 15, 1138, 6948, 10600, 1155, 11, 374, 6141, 24077, 32, 52609, 1454, 11, 330, 7928, 8286, 25, 220, 15, 28075, 197, 6878,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRoundtrip_RSACompact(t *testing.T) { payload := []byte("Hello, World!") for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} { key, err := rsa.GenerateKey(rand.Reader, 2048) if !assert.NoError(t, err, "RSA key generated") { return } buf, err := jws.Sign(payload, alg, key) if !assert.NoError(t, err, "(%s) Signature generated successfully", alg) { return } parsers := map[string]func([]byte) (*jws.Message, error){ "Parse(io.Reader)": func(b []byte) (*jws.Message, error) { return jws.Parse(bytes.NewReader(b)) }, "Parse(string)": func(b []byte) (*jws.Message, error) { return jws.ParseString(string(b)) }, } for name, f := range parsers { m, err := f(buf) if !assert.NoError(t, err, "(%s) %s is successful", alg, name) { return } if !assert.Equal(t, payload, m.Payload(), "(%s) %s: Payload is decoded", alg, name) { return } } verified, err := jws.Verify(buf, alg, &key.PublicKey) if !assert.NoError(t, err, "(%s) Verify is successful", alg) { return } if !assert.Equal(t, payload, verified, "(%s) Verified payload is the same", alg) { return } } }
explode_data.jsonl/1562
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 526 }
[ 2830, 3393, 27497, 32981, 76994, 98335, 1155, 353, 8840, 836, 8, 341, 76272, 1669, 3056, 3782, 445, 9707, 11, 4337, 22988, 2023, 8358, 17345, 1669, 2088, 3056, 73, 9991, 41152, 1568, 27847, 71043, 9991, 2013, 50, 17, 20, 21, 11, 502, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRWFileHandleWriteNoWrite(t *testing.T) { r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t) defer cleanup() // Close the file without writing to it err := fh.Close() if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles { t.Logf("skipping test: %v", err) return } assert.NoError(t, err) // Create a different file (not in the cache) h, err := vfs.OpenFile("file2", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777) require.NoError(t, err) // Close it with Flush and Release err = h.Flush() assert.NoError(t, err) err = h.Release() assert.NoError(t, err) // check vfs root, err := vfs.Root() require.NoError(t, err) checkListing(t, root, []string{"file1,0,false", "file2,0,false"}) // check the underlying r.Fremote but not the modtime file1 := fstest.NewItem("file1", "", t1) file2 := fstest.NewItem("file2", "", t1) vfs.WaitForWriters(waitForWritersDelay) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{}, fs.ModTimeNotSupported) }
explode_data.jsonl/7345
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 405 }
[ 2830, 3393, 56368, 1703, 6999, 7985, 2753, 7985, 1155, 353, 8840, 836, 8, 341, 7000, 11, 92941, 11, 36075, 11, 21290, 1669, 25991, 6999, 4021, 7985, 7308, 1155, 340, 16867, 21290, 2822, 197, 322, 13032, 279, 1034, 2041, 4378, 311, 432, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestJsonOutputValues(t *testing.T) { assert := assert.New(t) settings := testutil.Settings().WithSections().With(&print.Settings{ OutputValues: true, }).Build() expected, err := testutil.GetExpected("json", "json-OutputValues") assert.Nil(err) options, err := module.NewOptions().With(&module.Options{ OutputValues: true, OutputValuesPath: "output_values.json", }) assert.Nil(err) module, err := testutil.GetModule(options) assert.Nil(err) printer := NewJSON(settings) actual, err := printer.Print(module, settings) assert.Nil(err) assert.Equal(expected, actual) }
explode_data.jsonl/40858
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 212 }
[ 2830, 3393, 5014, 5097, 6227, 1155, 353, 8840, 836, 8, 341, 6948, 1669, 2060, 7121, 1155, 340, 62930, 1669, 1273, 1314, 27000, 1005, 2354, 38122, 1005, 2354, 2099, 1350, 27000, 515, 197, 80487, 6227, 25, 830, 345, 197, 16630, 11066, 282...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCloneRequest(t *testing.T) { req1, err := http.NewRequest("GET", "https://example.com", nil) assert.NoError(t, err) req2, err := http.NewRequest("GET", "https://example.com", nil) assert.NoError(t, err) req2.Header.Add("Header1", "val1") tests := []struct { name string request *http.Request headers http.Header }{ { "no_headers", req1, http.Header{}, }, { "headers", req2, http.Header{"Header1": []string{"val1"}}, }, } // run tests for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r2 := cloneRequest(tt.request) assert.EqualValues(t, tt.request.Header, r2.Header) }) } }
explode_data.jsonl/19764
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 289 }
[ 2830, 3393, 37677, 1900, 1155, 353, 8840, 836, 8, 341, 24395, 16, 11, 1848, 1669, 1758, 75274, 445, 3806, 497, 330, 2428, 1110, 8687, 905, 497, 2092, 340, 6948, 35699, 1155, 11, 1848, 692, 24395, 17, 11, 1848, 1669, 1758, 75274, 445, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestExecutorSetRollback(t *testing.T) { txe, tsv, db := newTestTxExecutor(t) defer db.Close() defer tsv.StopService() rollbackTransition := fmt.Sprintf("update _vt.dt_state set state = %d where dtid = 'aa' and state = %d", int(querypb.TransactionState_ROLLBACK), int(querypb.TransactionState_PREPARE)) db.AddQuery(rollbackTransition, &sqltypes.Result{RowsAffected: 1}) txid := newTxForPrep(tsv) err := txe.SetRollback("aa", txid) require.NoError(t, err) db.AddQuery(rollbackTransition, &sqltypes.Result{}) txid = newTxForPrep(tsv) err = txe.SetRollback("aa", txid) require.Error(t, err) require.Contains(t, err.Error(), "could not transition to ROLLBACK: aa") }
explode_data.jsonl/25173
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 264 }
[ 2830, 3393, 25255, 1649, 32355, 1419, 1155, 353, 8840, 836, 8, 341, 3244, 8371, 11, 259, 3492, 11, 2927, 1669, 501, 2271, 31584, 25255, 1155, 340, 16867, 2927, 10421, 741, 16867, 259, 3492, 30213, 1860, 2822, 197, 33559, 21768, 1669, 88...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSimulateMsgVote(t *testing.T) { app, ctx := createTestApp(false) blockTime := time.Now().UTC() ctx = ctx.WithBlockTime(blockTime) // setup 3 accounts s := rand.NewSource(1) r := rand.New(s) accounts := getTestingAccounts(t, r, app, ctx, 3) // setup a proposal content := types.NewTextProposal("Test", "description") submitTime := ctx.BlockHeader().Time depositPeriod := app.GovKeeper.GetDepositParams(ctx).MaxDepositPeriod proposal, err := types.NewProposal(content, 1, submitTime, submitTime.Add(depositPeriod)) require.NoError(t, err) app.GovKeeper.ActivateVotingPeriod(ctx, proposal) // begin a new block app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: app.LastBlockHeight() + 1, AppHash: app.LastCommitID().Hash, Time: blockTime}}) // execute operation op := simulation.SimulateMsgVote(app.AccountKeeper, app.BankKeeper, app.GovKeeper) operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "") require.NoError(t, err) var msg types.MsgVote types.ModuleCdc.UnmarshalJSON(operationMsg.Msg, &msg) require.True(t, operationMsg.OK) require.Equal(t, uint64(1), msg.ProposalId) require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Voter.String()) require.Equal(t, types.OptionYes, msg.Option) require.Equal(t, "gov", msg.Route()) require.Equal(t, types.TypeMsgVote, msg.Type()) }
explode_data.jsonl/53984
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 515 }
[ 2830, 3393, 14027, 6334, 6611, 41412, 1155, 353, 8840, 836, 8, 341, 28236, 11, 5635, 1669, 1855, 2271, 2164, 3576, 340, 47996, 1462, 1669, 882, 13244, 1005, 21183, 741, 20985, 284, 5635, 26124, 4713, 1462, 18682, 1462, 692, 197, 322, 65...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCreateInvalidRequest(t *testing.T) { api := BaseFeature{} api.SetBaseUrl("https://jsonplaceholder.typicode.com") err := api.CreatePathRequest(http.MethodPost, "/posts") require.NoError(t, err) err = api.ExecuteInvalidRequest() require.NoError(t, err) err = api.AssertResponseCode(http.StatusInternalServerError) require.NoError(t, err) }
explode_data.jsonl/81226
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 123 }
[ 2830, 3393, 4021, 7928, 1900, 1155, 353, 8840, 836, 8, 341, 54299, 1669, 5351, 13859, 16094, 54299, 4202, 71587, 445, 2428, 1110, 2236, 12384, 49286, 13634, 905, 5130, 9859, 1669, 6330, 7251, 1820, 1900, 19886, 20798, 4133, 11, 3521, 1266...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) { // Arrange volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) dsw := NewDesiredStateOfWorld(volumePluginMgr) node1Name := "node1-name" pod1Name := "pod1-name" volume1Name := api.UniqueDeviceName("volume1-name") volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) dsw.AddNode(node1Name) generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name) if podAddErr != nil { t.Fatalf( "AddPod failed for pod %q. Expected: <no error> Actual: <%v>", pod1Name, podAddErr) } node2Name := "node2-name" pod2aName := "pod2a-name" volume2Name := api.UniqueDeviceName("volume2-name") volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) dsw.AddNode(node2Name) generatedVolume2Name1, podAddErr := dsw.AddPod(pod2aName, volume2Spec, node2Name) if podAddErr != nil { t.Fatalf( "AddPod failed for pod %q. Expected: <no error> Actual: <%v>", pod2aName, podAddErr) } pod2bName := "pod2b-name" generatedVolume2Name2, podAddErr := dsw.AddPod(pod2bName, volume2Spec, node2Name) if podAddErr != nil { t.Fatalf( "AddPod failed for pod %q. Expected: <no error> Actual: <%v>", pod2bName, podAddErr) } if generatedVolume2Name1 != generatedVolume2Name2 { t.Fatalf( "Generated volume names for the same volume should be the same but they are not: %q and %q", generatedVolume2Name1, generatedVolume2Name2) } pod3Name := "pod3-name" volume3Name := api.UniqueDeviceName("volume3-name") volume3Spec := controllervolumetesting.GetTestVolumeSpec(string(volume3Name), volume3Name) generatedVolume3Name, podAddErr := dsw.AddPod(pod3Name, volume3Spec, node1Name) if podAddErr != nil { t.Fatalf( "AddPod failed for pod %q. Expected: <no error> Actual: <%v>", pod3Name, podAddErr) } // Act volumesToAttach := dsw.GetVolumesToAttach() // Assert if len(volumesToAttach) != 3 { t.Fatalf("len(volumesToAttach) Expected: <3> Actual: <%v>", len(volumesToAttach)) } verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, string(volume1Name)) verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name1, string(volume2Name)) verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume3Name, string(volume3Name)) }
explode_data.jsonl/40757
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 938 }
[ 2830, 3393, 13614, 96325, 1249, 30485, 44246, 3404, 82989, 12288, 19641, 96325, 1155, 353, 8840, 836, 8, 341, 197, 322, 40580, 198, 5195, 4661, 11546, 25567, 11, 716, 1669, 683, 1100, 648, 1132, 57824, 287, 2234, 2271, 18902, 11546, 25567...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestIndexMultipleNodeHighConcurrency(t *testing.T) { if testing.Short() { t.SkipNow() // Just skip if we're doing a short run } var ( concurrency = 10 writeEach = 100 numTags = 10 ) levels := []topology.ReadConsistencyLevel{ topology.ReadConsistencyLevelOne, topology.ReadConsistencyLevelUnstrictMajority, topology.ReadConsistencyLevelMajority, topology.ReadConsistencyLevelUnstrictAll, topology.ReadConsistencyLevelAll, } for _, lvl := range levels { t.Run( fmt.Sprintf("running test for %v", lvl), func(t *testing.T) { numShards := defaultNumShards minShard := uint32(0) maxShard := uint32(numShards - 1) // nodes = m3db nodes nodes, closeFn, clientopts := makeMultiNodeSetup(t, numShards, true, true, []services.ServiceInstance{ node(t, 0, newClusterShardsRange(minShard, maxShard, shard.Available)), node(t, 1, newClusterShardsRange(minShard, maxShard, shard.Available)), node(t, 2, newClusterShardsRange(minShard, maxShard, shard.Available)), }) clientopts = clientopts.SetReadConsistencyLevel(lvl) defer closeFn() log := nodes[0].StorageOpts().InstrumentOptions().Logger() // Start the nodes for _, n := range nodes { require.NoError(t, n.StartServer()) } c, err := client.NewClient(clientopts) require.NoError(t, err) session, err := c.NewSession() require.NoError(t, err) defer session.Close() var ( insertWg sync.WaitGroup numTotalErrors uint32 ) now := nodes[0].DB().Options().ClockOptions().NowFn()() start := time.Now() log.Info("starting data write") for i := 0; i < concurrency; i++ { insertWg.Add(1) idx := i go func() { numErrors := uint32(0) for j := 0; j < writeEach; j++ { id, tags := genIDTags(idx, j, numTags) err := session.WriteTagged(testNamespaces[0], id, tags, now, float64(1.0), xtime.Second, nil) if err != nil { numErrors++ } } atomic.AddUint32(&numTotalErrors, numErrors) insertWg.Done() }() } insertWg.Wait() require.Zero(t, numTotalErrors) log.Info("test data written", zap.Duration("took", time.Since(start))) log.Info("waiting to see if data is indexed") var ( indexTimeout = 10 * time.Second fetchWg sync.WaitGroup ) for i := 0; i < concurrency; i++ { fetchWg.Add(1) idx := i go func() { id, tags := genIDTags(idx, writeEach-1, numTags) indexed := xclock.WaitUntil(func() bool { found := isIndexed(t, session, testNamespaces[0], id, tags) return found }, indexTimeout) assert.True(t, indexed, "timed out waiting for index retrieval") fetchWg.Done() }() } fetchWg.Wait() log.Info("data is indexed", zap.Duration("took", time.Since(start))) }) } }
explode_data.jsonl/64682
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1291 }
[ 2830, 3393, 1552, 32089, 1955, 11976, 79611, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 7039, 368, 442, 4599, 10706, 421, 582, 2299, 3730, 264, 2805, 1598, 198, 197, 532, 2405, 2399, 197, 37203, 15973, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestLdapQuery(t *testing.T) { m := newMockLdap() l := &ldapBackend{l: m} _, err := l.Query(nil) if err != onedb.ErrQueryIsNil { t.Error("expected error") } m.SearchErr = errors.New("fail") r := ldap.NewSearchRequest("baseDn", ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, "filter", []string{"attributes"}, nil) _, err = l.Query(r) queries := m.MethodsCalled["Search"] if err == nil || len(m.MethodsCalled) != 1 || len(queries) != 1 { t.Error("expected Search method to be called on backend and return err") } m.SearchErr = nil entries := []*ldap.Entry{{DN: "item1"}, {DN: "item2"}} m.SearchReturn = &ldap.SearchResult{Entries: entries} s, err := l.Query(r) if rows := s.Entries; len(rows) != len(entries) || rows[0].DN != "item1" || rows[1].DN != "item2" { t.Error("expected rows that were passed in") } }
explode_data.jsonl/9457
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 347 }
[ 2830, 3393, 43, 91294, 2859, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 501, 11571, 43, 91294, 741, 8810, 1669, 609, 38665, 29699, 94617, 25, 296, 532, 197, 6878, 1848, 1669, 326, 15685, 27907, 340, 743, 1848, 961, 389, 93727, 27862, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestGraph(t *testing.T) { api_token, err := get_session_token() if err != nil { log.Fatal(err) } rc := resty.New() rc.SetHeader("Apitoken", api_token) Convey("Get endpoint list: GET /graph/endpoint", t, func() { r := []map[string]interface{}{} resp, _ := rc.R().SetQueryParam("q", ".+"). SetResult(&r). Get(fmt.Sprintf("%s/graph/endpoint", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(len(r), ShouldBeGreaterThanOrEqualTo, 0) if len(r) == 0 { return } eid := r[0]["id"] r = []map[string]interface{}{} Convey("Get counter list: GET /graph/endpoint_counter", func() { resp, _ := rc.R(). SetQueryParam("eid", fmt.Sprintf("%v", eid)). SetQueryParam("metricQuery", ".+"). SetQueryParam("limit", "1"). SetResult(&r). Get(fmt.Sprintf("%s/graph/endpoint_counter", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(r, ShouldNotBeEmpty) }) }) }
explode_data.jsonl/48356
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 407 }
[ 2830, 3393, 11212, 1155, 353, 8840, 836, 8, 341, 54299, 6458, 11, 1848, 1669, 633, 12316, 6458, 741, 743, 1848, 961, 2092, 341, 197, 6725, 26133, 3964, 340, 197, 630, 30295, 1669, 2732, 88, 7121, 741, 30295, 4202, 4047, 445, 10611, 27...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidRandom(t *testing.T) { rand.Seed(time.Now().UnixNano()) b := make([]byte, 100000) start := time.Now() for time.Since(start) < time.Second*3 { n := rand.Int() % len(b) rand.Read(b[:n]) validpayload(b[:n], 0) } start = time.Now() for time.Since(start) < time.Second*3 { n := rand.Int() % len(b) makeRandomJSONChars(b[:n]) validpayload(b[:n], 0) } }
explode_data.jsonl/43444
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 177 }
[ 2830, 3393, 4088, 13999, 1155, 353, 8840, 836, 8, 341, 7000, 437, 5732, 291, 9730, 13244, 1005, 55832, 83819, 2398, 2233, 1669, 1281, 10556, 3782, 11, 220, 16, 15, 15, 15, 15, 15, 340, 21375, 1669, 882, 13244, 741, 2023, 882, 93404, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestLauncherNotControlledByUs(t *testing.T) { f := newFixture(t) startTime := metav1.Now() completionTime := metav1.Now() mpiJob := newMPIJob("test", int32Ptr(64), 1, gpuResourceName, &startTime, &completionTime) f.setUpMPIJob(mpiJob) fmjc := f.newFakeMPIJobController() launcher := fmjc.newLauncher(mpiJob, "kubectl-delivery", isGPULauncher(mpiJob)) launcher.OwnerReferences = nil f.setUpLauncher(launcher) f.runExpectError(getKey(mpiJob, t)) }
explode_data.jsonl/29950
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 189 }
[ 2830, 3393, 91176, 2623, 3273, 832, 1359, 3558, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 501, 18930, 1155, 340, 21375, 1462, 1669, 77520, 16, 13244, 741, 32810, 14386, 1462, 1669, 77520, 16, 13244, 2822, 197, 39479, 12245, 1669, 501, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_docs_reindex_9a4d5e41c52c20635d1fd9c6e13f6c7a(t *testing.T) { es, _ := elasticsearch.NewDefaultClient() // tag:9a4d5e41c52c20635d1fd9c6e13f6c7a[] { res, err := es.Index( "metricbeat-2016.05.30", strings.NewReader(`{ "system.cpu.idle.pct": 0.908 }`), es.Index.WithDocumentID("1"), es.Index.WithRefresh("true"), es.Index.WithPretty(), ) fmt.Println(res, err) if err != nil { // SKIP t.Fatalf("Error getting the response: %s", err) // SKIP } // SKIP defer res.Body.Close() // SKIP } { res, err := es.Index( "metricbeat-2016.05.31", strings.NewReader(`{ "system.cpu.idle.pct": 0.105 }`), es.Index.WithDocumentID("1"), es.Index.WithRefresh("true"), es.Index.WithPretty(), ) fmt.Println(res, err) if err != nil { // SKIP t.Fatalf("Error getting the response: %s", err) // SKIP } // SKIP defer res.Body.Close() // SKIP } // end:9a4d5e41c52c20635d1fd9c6e13f6c7a[] }
explode_data.jsonl/2234
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 473 }
[ 2830, 3393, 49692, 1288, 1252, 62, 24, 64, 19, 67, 20, 68, 19, 16, 66, 20, 17, 66, 17, 15, 21, 18, 20, 67, 16, 6902, 24, 66, 21, 68, 16, 18, 69, 21, 66, 22, 64, 1155, 353, 8840, 836, 8, 341, 78966, 11, 716, 1669, 655, 27...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStatefulPodControlUpdatesIdentity(t *testing.T) { recorder := record.NewFakeRecorder(10) set := newStatefulSet(3) pod := newStatefulSetPod(set, 0) fakeClient := fake.NewSimpleClientset(set, pod) control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder) var updated *v1.Pod fakeClient.PrependReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) updated = update.GetObject().(*v1.Pod) return true, update.GetObject(), nil }) pod.Name = "goo-0" if err := control.UpdateStatefulPod(set, pod); err != nil { t.Errorf("Successful update returned an error: %s", err) } events := collectEvents(recorder.Events) if eventCount := len(events); eventCount != 1 { t.Errorf("Pod update successful:got %d events,but want 1", eventCount) } else if !strings.Contains(events[0], v1.EventTypeNormal) { t.Errorf("Found unexpected non-normal event %s", events[0]) } if !identityMatches(set, updated) { t.Error("Name update failed identity does not match") } }
explode_data.jsonl/17903
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 367 }
[ 2830, 3393, 1397, 1262, 23527, 3273, 37091, 18558, 1155, 353, 8840, 836, 8, 341, 67904, 1358, 1669, 3255, 7121, 52317, 47023, 7, 16, 15, 340, 8196, 1669, 36848, 1262, 1649, 7, 18, 340, 3223, 347, 1669, 36848, 1262, 1649, 23527, 14171, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} err := fx.Initialize(&vm) if err != nil { t.Fatal(err) } }
explode_data.jsonl/56448
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 99 }
[ 2830, 3393, 81856, 9928, 1155, 353, 8840, 836, 8, 341, 54879, 1669, 511, 4672, 17, 20, 21, 74, 16, 8298, 8787, 11187, 515, 197, 197, 36913, 25, 13482, 34607, 7121, 3675, 3148, 197, 24201, 25, 256, 8392, 16766, 2201, 38837, 197, 532, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAssociationEdge(t *testing.T) { edgeInfo := getTestEdgeInfo(t, "account") edge := edgeInfo.GetAssociationEdgeByName("Folders") expectedAssocEdge := &AssociationEdge{ EdgeConst: "AccountToFoldersEdge", commonEdgeInfo: getCommonEdgeInfo( "Folders", schemaparser.GetEntConfigFromName("folder"), ), TableName: "account_folders_edges", EdgeActions: []*EdgeAction{ { Action: "ent.AddEdgeAction", CustomActionName: "AccountAddFolderAction", CustomGraphQLName: "accountFolderAdd", ExposeToGraphQL: true, }, { Action: "ent.RemoveEdgeAction", ExposeToGraphQL: true, }, }, } testAssocEdge(t, edge, expectedAssocEdge) // singular version of edge assert.Equal(t, "Folder", edge.Singular()) }
explode_data.jsonl/73723
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 317 }
[ 2830, 3393, 63461, 11656, 1155, 353, 8840, 836, 8, 341, 197, 7186, 1731, 1669, 633, 2271, 11656, 1731, 1155, 11, 330, 4608, 1138, 197, 7186, 1669, 6821, 1731, 2234, 63461, 11656, 16898, 445, 92779, 5130, 42400, 98628, 11656, 1669, 609, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestArchiveReaderPreReadShort(t *testing.T) { dataStr := "short data" ar, err := NewArchiveReader(bytes.NewReader([]byte(dataStr))) if err != nil { t.Errorf("newArchiveReader(bytes.NewReader([]byte(%s))) returned err: %v, want nil", dataStr, err) } got, err := io.ReadAll(ar) if err != nil { t.Errorf("got error reading archive reader: %v, want nil", err) } if string(got) != dataStr { t.Errorf("got %s, want %s", string(got), dataStr) } // Pre-read nothing. dataStr = "" ar, err = NewArchiveReader(bytes.NewReader([]byte(dataStr))) if err != ErrPreReadError { t.Errorf("newArchiveReader(bytes.NewReader([]byte(%s))) returned err: %v, want %v", dataStr, err, ErrPreReadError) } got, err = io.ReadAll(ar) if err != nil { t.Errorf("got error reading archive reader: %v, want nil", err) } if string(got) != dataStr { t.Errorf("got %s, want %s", string(got), dataStr) } }
explode_data.jsonl/56414
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 354 }
[ 2830, 3393, 42502, 5062, 4703, 4418, 12472, 1155, 353, 8840, 836, 8, 341, 8924, 2580, 1669, 330, 8676, 821, 698, 69340, 11, 1848, 1669, 1532, 42502, 5062, 23158, 68587, 10556, 3782, 2592, 2580, 5929, 743, 1848, 961, 2092, 341, 197, 3244...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestLong2IP(t *testing.T) { for _, v := range []struct { ip string long uint }{ {"127.0.0.1", 2130706433}, {"0.0.0.0", 0}, {"255.255.255.255", 4294967295}, {"192.168.1.1", 3232235777}, } { expected, err := Long2IP(v.long) if err != nil { t.Errorf("ip:%s long:%d err:%v", v.ip, v.long, err) } if expected.String() != v.ip { t.Errorf(" long:%d ip:%s != expected:%s", v.long, v.ip, expected.String()) } } // 在64位机器上运行,否者输入值将超过限制 if 32<<(^uint(0)>>63) == 64 { _, err := Long2IP(1<<64 - 1) if err == nil { t.Errorf("long:%d out of range", uint64(1<<64-1)) } } }
explode_data.jsonl/68041
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 343 }
[ 2830, 3393, 6583, 17, 3298, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 348, 1669, 2088, 3056, 1235, 341, 197, 46531, 256, 914, 198, 197, 17514, 2622, 198, 197, 59403, 197, 197, 4913, 16, 17, 22, 13, 15, 13, 15, 13, 16, 497, 220, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestScaleTo(t *testing.T) { t.Parallel() s := []float64{3, 4, 1, 7, 5} sCopy := make([]float64, len(s)) copy(sCopy, s) c := 5.0 truth := []float64{15, 20, 5, 35, 25} dst := make([]float64, len(s)) ScaleTo(dst, c, s) if !Same(dst, truth) { t.Errorf("Scale to does not match. Got %v, want %v", dst, truth) } if !Same(s, sCopy) { t.Errorf("Source modified during call. Got %v, want %v", s, sCopy) } if !Panics(func() { ScaleTo(dst, 0, []float64{1}) }) { t.Errorf("Expected panic with different slice lengths") } }
explode_data.jsonl/1239
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 242 }
[ 2830, 3393, 6947, 1249, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 1903, 1669, 3056, 3649, 21, 19, 90, 18, 11, 220, 19, 11, 220, 16, 11, 220, 22, 11, 220, 20, 532, 1903, 12106, 1669, 1281, 10556, 3649, 21, 19, 11, 242...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBodyStructure_Format(t *testing.T) { for i, test := range bodyStructureTests { fields := test.bodyStructure.Format() got, err := formatFields(fields) if err != nil { t.Error(err) continue } expected, _ := formatFields(test.fields) if got != expected { t.Errorf("Invalid body structure fields for #%v: has \n%v\n but expected \n%v", i, got, expected) } } }
explode_data.jsonl/43055
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 152 }
[ 2830, 3393, 5444, 22952, 72999, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 1273, 1669, 2088, 2487, 22952, 18200, 341, 197, 55276, 1669, 1273, 5079, 22952, 9978, 741, 197, 3174, 354, 11, 1848, 1669, 3561, 8941, 37701, 340, 197, 743, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestCreateSparseCerts(t *testing.T) { for _, test := range certstestutil.GetSparseCertTestCases(t) { t.Run(test.Name, func(t *testing.T) { tmpdir := testutil.SetupTempDir(t) defer os.RemoveAll(tmpdir) certstestutil.WritePKIFiles(t, tmpdir, test.Files) r := workflow.NewRunner() r.AppendPhase(NewCertsPhase()) r.SetDataInitializer(func(*cobra.Command) (workflow.RunData, error) { certsData := &testCertsData{ cfg: testutil.GetDefaultInternalConfig(t), } certsData.cfg.CertificatesDir = tmpdir return certsData, nil }) if err := r.Run(); (err != nil) != test.ExpectError { t.Fatalf("expected error to be %t, got %t (%v)", test.ExpectError, (err != nil), err) } }) } }
explode_data.jsonl/4933
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 312 }
[ 2830, 3393, 4021, 98491, 34, 15546, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 1273, 1669, 2088, 2777, 267, 477, 1314, 2234, 98491, 36934, 2271, 37302, 1155, 8, 341, 197, 3244, 16708, 8623, 2967, 11, 2915, 1155, 353, 8840, 836, 8, 341,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMul(t *testing.T) { m1 := Money{123, "EUR"} m2 := Money{200, "EUR"} m3 := m1.Mul(m2) if m3.Get() != 2.46 { t.Errorf("expected money amount to be %v, got %v", 2.46, m3.Get()) } }
explode_data.jsonl/61369
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 99 }
[ 2830, 3393, 59155, 1155, 353, 8840, 836, 8, 341, 2109, 16, 1669, 17633, 90, 16, 17, 18, 11, 330, 54738, 16707, 2109, 17, 1669, 17633, 90, 17, 15, 15, 11, 330, 54738, 16707, 2109, 18, 1669, 296, 16, 1321, 360, 1255, 17, 340, 743, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestWithRequestContext(t *testing.T) { w := httptest.NewRecorder() r, err := http.NewRequest(http.MethodGet, "/random", nil) require.NoError(t, err) th := func(resp http.ResponseWriter, req *http.Request) { assert.Equal(t, "val", req.Context().Value("key")) } middleware.WithRequestContext(th)(w, r) }
explode_data.jsonl/57377
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 117 }
[ 2830, 3393, 2354, 1900, 1972, 1155, 353, 8840, 836, 8, 341, 6692, 1669, 54320, 70334, 7121, 47023, 741, 7000, 11, 1848, 1669, 1758, 75274, 19886, 20798, 1949, 11, 3521, 11463, 497, 2092, 340, 17957, 35699, 1155, 11, 1848, 692, 70479, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIntegration_DirectRequest(t *testing.T) { config := cltest.NewTestEVMConfig(t) httpAwaiter := cltest.NewAwaiter() httpServer, assertCalled := cltest.NewHTTPMockServer( t, http.StatusOK, "GET", `{"USD": "31982"}`, func(header http.Header, _ string) { httpAwaiter.ItHappened() }, ) defer assertCalled() ethClient, sub, assertMockCalls := cltest.NewEthMocks(t) defer assertMockCalls() app, cleanup := cltest.NewApplication(t, ethClient, ) defer cleanup() blocks := cltest.NewBlocks(t, 12) sub.On("Err").Return(nil).Maybe() sub.On("Unsubscribe").Return(nil).Maybe() ethClient.On("HeadByNumber", mock.Anything, mock.AnythingOfType("*big.Int")).Return(blocks.Head(10), nil) var headCh chan<- *models.Head ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe(). Run(func(args mock.Arguments) { headCh = args.Get(1).(chan<- *models.Head) }). Return(sub, nil) ethClient.On("Dial", mock.Anything).Return(nil) ethClient.On("ChainID", mock.Anything).Maybe().Return(app.Store.Config.ChainID(), nil) ethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]types.Log{}, nil) ethClient.On("HeadByNumber", mock.Anything, mock.AnythingOfType("*big.Int")).Return(blocks.Head(0), nil) logsCh := cltest.MockSubscribeToLogsCh(ethClient, sub) require.NoError(t, app.Start()) store := app.Store eventBroadcaster := postgres.NewEventBroadcaster(config.DatabaseURL(), 0, 0) eventBroadcaster.Start() defer eventBroadcaster.Close() pipelineORM := pipeline.NewORM(store.DB) jobORM := job.NewORM(store.ORM.DB, config, pipelineORM, eventBroadcaster, &postgres.NullAdvisoryLocker{}) directRequestSpec := string(cltest.MustReadFile(t, "../testdata/tomlspecs/direct-request-spec.toml")) directRequestSpec = strings.Replace(directRequestSpec, "http://example.com", httpServer.URL, 1) request := web.CreateJobRequest{TOML: directRequestSpec} output, err := json.Marshal(request) require.NoError(t, err) job := cltest.CreateJobViaWeb(t, app, output) eventBroadcaster.Notify(postgres.ChannelJobCreated, "") runLog := cltest.NewRunLog(t, job.ExternalIDEncodeStringToTopic(), job.DirectRequestSpec.ContractAddress.Address(), cltest.NewAddress(), 1, `{}`) runLog.BlockHash = blocks.Head(1).Hash var logs chan<- types.Log cltest.CallbackOrTimeout(t, "obtain log channel", func() { logs = <-logsCh }, 5*time.Second) cltest.CallbackOrTimeout(t, "send run log", func() { logs <- runLog }, 30*time.Second) eventBroadcaster.Notify(postgres.ChannelRunStarted, "") for i := 0; i < 12; i++ { headCh <- blocks.Head(uint64(i)) } httpAwaiter.AwaitOrFail(t) runs := cltest.WaitForPipelineComplete(t, 0, job.ID, 1, 3, jobORM, 5*time.Second, 300*time.Millisecond) require.Len(t, runs, 1) run := runs[0] require.Len(t, run.PipelineTaskRuns, 3) require.Empty(t, run.PipelineTaskRuns[0].Error) require.Empty(t, run.PipelineTaskRuns[1].Error) require.Empty(t, run.PipelineTaskRuns[2].Error) }
explode_data.jsonl/29845
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1126 }
[ 2830, 3393, 52464, 1557, 1226, 1900, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 1185, 1944, 7121, 2271, 36, 11187, 2648, 1155, 692, 28080, 37352, 261, 1669, 1185, 1944, 7121, 37352, 261, 741, 28080, 5475, 11, 2060, 20960, 1669, 1185, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPodExists(t *testing.T) { cancel, controller := newController() defer cancel() wf := wfv1.MustUnmarshalWorkflow(helloWorldWf) ctx := context.Background() woc := newWorkflowOperationCtx(wf, controller) err := woc.setExecWorkflow(ctx) assert.NoError(t, err) mainCtr := woc.execWf.Spec.Templates[0].Container pod, err := woc.createWorkflowPod(ctx, wf.Name, []apiv1.Container{*mainCtr}, &wf.Spec.Templates[0], &createWorkflowPodOpts{}) assert.NoError(t, err) assert.NotNil(t, pod) pods, err := listPods(woc) assert.NoError(t, err) assert.Len(t, pods.Items, 1) // Sleep 1 second to wait for informer getting pod info time.Sleep(time.Second) existingPod, doesExist, err := woc.podExists(pod.ObjectMeta.Name) assert.NoError(t, err) assert.NotNil(t, existingPod) assert.True(t, doesExist) assert.EqualValues(t, pod, existingPod) }
explode_data.jsonl/75404
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 340 }
[ 2830, 3393, 23527, 15575, 1155, 353, 8840, 836, 8, 341, 84441, 11, 6461, 1669, 501, 2051, 741, 16867, 9121, 2822, 6692, 69, 1669, 289, 27890, 16, 50463, 1806, 27121, 62768, 3203, 4791, 10134, 54, 69, 340, 20985, 1669, 2266, 19047, 741, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeviceService_PinRelease_UUID(t *testing.T) { // Given client, mux, cleanup := newFixture() defer cleanup() uuid := "123456789123456789" releaseID := int64(14332) mux.HandleFunc( "/"+deviceBasePath, func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, http.MethodPatch) expected := "%24filter=uuid+eq+%27" + uuid + "%27" if r.URL.RawQuery != expected { http.Error(w, fmt.Sprintf("query = %s ; expected %s", r.URL.RawQuery, expected), 500) return } b, err := ioutil.ReadAll(r.Body) assert.NilError(t, err) assert.Equal(t, `{"should_be_running__release":"14332"}`+"\n", string(b)) fmt.Fprint(w, "OK") }, ) // When resp, err := client.Device.PinRelease(context.Background(), DeviceUUID(uuid), releaseID) // Then assert.NilError(t, err) assert.Equal(t, "OK", string(resp)) }
explode_data.jsonl/46820
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 360 }
[ 2830, 3393, 6985, 1860, 35453, 16077, 57499, 1155, 353, 8840, 836, 8, 341, 197, 322, 16246, 198, 25291, 11, 59807, 11, 21290, 1669, 501, 18930, 741, 16867, 21290, 741, 10676, 2423, 1669, 330, 16, 17, 18, 19, 20, 21, 22, 23, 24, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestMarkIndexComplete(t *testing.T) { if testing.Short() { t.Skip() } dbtesting.SetupGlobalTestDB(t) db := &dbImpl{db: dbconn.Global} insertIndexes(t, dbconn.Global, Index{ID: 1, State: "queued"}) if err := db.MarkIndexComplete(context.Background(), 1); err != nil { t.Fatalf("unexpected error marking index as complete: %s", err) } if index, exists, err := db.GetIndexByID(context.Background(), 1); err != nil { t.Fatalf("unexpected error getting index: %s", err) } else if !exists { t.Fatal("expected record to exist") } else if index.State != "completed" { t.Errorf("unexpected state. want=%q have=%q", "completed", index.State) } }
explode_data.jsonl/11070
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 244 }
[ 2830, 3393, 8949, 1552, 12548, 1155, 353, 8840, 836, 8, 341, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 741, 197, 532, 20939, 8840, 39820, 11646, 2271, 3506, 1155, 340, 20939, 1669, 609, 1999, 9673, 90, 1999, 25, 2927, 5148, 27381, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestRespondentClosePipeRecv(t *testing.T) { s := GetSocket(t, NewSocket) p := GetSocket(t, surveyor.NewSocket) ConnectPair(t, s, p) MustSucceed(t, s.SetOption(mangos.OptionReadQLen, 1)) MustSucceed(t, p.Send([]byte(""))) m, e := s.RecvMsg() MustSucceed(t, e) // Fill the pipe for i := 0; i < 10; i++ { // These all will work, but the back-pressure will go all the // way to the sender. MustSucceed(t, p.Send([]byte(""))) } MustSucceed(t, m.Pipe.Close()) time.Sleep(time.Millisecond * 10) MustSucceed(t, s.Close()) MustSucceed(t, p.Close()) }
explode_data.jsonl/57402
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 250 }
[ 2830, 3393, 65354, 306, 7925, 34077, 63483, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 2126, 10286, 1155, 11, 1532, 10286, 340, 3223, 1669, 2126, 10286, 1155, 11, 10572, 269, 7121, 10286, 340, 197, 14611, 12443, 1155, 11, 274, 11, 281, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGenerateNormalizedName(t *testing.T) { pLabels := map[string]string{"user": "dev"} req1 := metav1.LabelSelectorRequirement{ Key: "role", Operator: metav1.LabelSelectorOpIn, Values: []string{"db", "app"}, } pExprs := []metav1.LabelSelectorRequirement{req1} normalizedPodSelector := "role In [db app] And user In [dev]" nLabels := map[string]string{"scope": "test"} req2 := metav1.LabelSelectorRequirement{ Key: "env", Operator: metav1.LabelSelectorOpNotIn, Values: []string{"staging", "prod"}, } nExprs := []metav1.LabelSelectorRequirement{req2} pSelector := metav1.LabelSelector{ MatchLabels: pLabels, MatchExpressions: pExprs, } nSelector := metav1.LabelSelector{ MatchLabels: nLabels, MatchExpressions: nExprs, } normalizedNSSelector := "env NotIn [staging prod] And scope In [test]" tables := []struct { namespace string pSelector *metav1.LabelSelector nSelector *metav1.LabelSelector expName string }{ { "nsName", &pSelector, nil, fmt.Sprintf("namespace=nsName And podSelector=%s", normalizedPodSelector), }, { "nsName", nil, nil, "namespace=nsName", }, { "nsName", nil, &nSelector, fmt.Sprintf("namespaceSelector=%s", normalizedNSSelector), }, { "nsName", &pSelector, &nSelector, fmt.Sprintf("namespaceSelector=%s And podSelector=%s", normalizedNSSelector, normalizedPodSelector), }, } for _, table := range tables { name := generateNormalizedName(table.namespace, table.pSelector, table.nSelector) if table.expName != name { t.Errorf("Unexpected normalized name. Expected %s, got %s", table.expName, name) } } }
explode_data.jsonl/82438
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 716 }
[ 2830, 3393, 31115, 12206, 20167, 1155, 353, 8840, 836, 8, 341, 3223, 23674, 1669, 2415, 14032, 30953, 4913, 872, 788, 330, 3583, 16707, 24395, 16, 1669, 77520, 16, 4679, 5877, 75802, 515, 197, 55242, 25, 414, 330, 5778, 756, 197, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPaymentsService_List(t *testing.T) { setup() defer teardown() _ = tClient.WithAuthenticationValue("test_token") tMux.HandleFunc("/v2/payments", func(w http.ResponseWriter, r *http.Request) { testHeader(t, r, AuthHeader, "Bearer test_token") testMethod(t, r, "GET") if _, ok := r.Header[AuthHeader]; !ok { w.WriteHeader(http.StatusUnauthorized) } w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(testdata.ListPaymentsResponse)) }) res, err := tClient.Payments.List(nil) if err != nil { t.Fatal(err) } if res.Count == 0 { t.Errorf("mismatching info. want %v, got %v", 0, res.Count) } }
explode_data.jsonl/16985
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 262 }
[ 2830, 3393, 87646, 1860, 27104, 1155, 353, 8840, 836, 8, 341, 84571, 741, 16867, 49304, 741, 197, 62, 284, 259, 2959, 26124, 19297, 1130, 445, 1944, 6458, 1138, 3244, 44, 2200, 63623, 4283, 85, 17, 72423, 1368, 497, 2915, 3622, 1758, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func Test_newEmptyTags(t *testing.T) { type args struct { regexp string globalTags map[string]string } tests := []struct { name string args args want *dkTags }{ { name: "case", args: args{regexp: "", globalTags: map[string]string{}}, want: &dkTags{ regexpString: "", globalTags: map[string]string{}, tags: make(map[string]string), replaceTags: make(map[string]string), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := newEmptyTags(tt.args.regexp, tt.args.globalTags); !reflect.DeepEqual(got, tt.want) { t.Errorf("newEmptyTags() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/14405
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 316 }
[ 2830, 3393, 5921, 3522, 15930, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 37013, 4580, 257, 914, 198, 197, 18842, 15930, 2415, 14032, 30953, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 31215, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnmarshalBinary(t *testing.T) { tests := map[string]struct { input []byte output []byte expectedError error }{ "[]byte: \\x01\\x02\\x03": { []byte("s:3:\"\x01\x02\x03\";"), []byte{1, 2, 3}, nil, }, "not a string": {[]byte("N;"), []byte{}, errors.New("not a string")}, } for testName, test := range tests { t.Run(testName, func(t *testing.T) { var result []byte err := phpserialize.Unmarshal(test.input, &result) if test.expectedError == nil { expectErrorToNotHaveOccurred(t, err) if string(result) != string(test.output) { t.Errorf("Expected '%v', got '%v'", result, test.output) } } else { expectErrorToEqual(t, err, test.expectedError) } }) } }
explode_data.jsonl/27020
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 344 }
[ 2830, 3393, 1806, 27121, 21338, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 2415, 14032, 60, 1235, 341, 197, 22427, 260, 3056, 3782, 198, 197, 21170, 286, 3056, 3782, 198, 197, 42400, 1454, 1465, 198, 197, 59403, 197, 197, 1, 1294, 378...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetJobsError(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() ctx := context.Background() httpmock.RegisterResponder("GET", fakeJobsURL, nil) client := getTestJobManagerClient() resp, err := client.GetJobs(ctx, testURL) assert.Nil(t, resp) assert.NotNil(t, err) assert.True(t, strings.HasPrefix(err.Error(), "GetJobs call failed with status FAILED")) }
explode_data.jsonl/32349
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 146 }
[ 2830, 3393, 1949, 40667, 1454, 1155, 353, 8840, 836, 8, 341, 28080, 16712, 14140, 731, 741, 16867, 1758, 16712, 8934, 16856, 3036, 14828, 741, 20985, 1669, 2266, 19047, 741, 28080, 16712, 19983, 30884, 445, 3806, 497, 12418, 40667, 3144, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGocloak_RestyClient(t *testing.T) { t.Parallel() client := NewClientWithDebug(t) restyClient := client.RestyClient() FailIf( t, restyClient == resty.New(), "Resty client of the GoCloak client and the Default resty client are equal", ) }
explode_data.jsonl/79502
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 97 }
[ 2830, 3393, 38, 509, 385, 585, 2568, 40239, 2959, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 25291, 1669, 1532, 2959, 2354, 7939, 1155, 340, 197, 3927, 88, 2959, 1669, 2943, 31129, 88, 2959, 741, 12727, 604, 2679, 1006, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetLabel(t *testing.T) { var expectedLabel = "test.LABEL" t.Log("We need to test the SetLabel.") { fmt.Println("[WHAT TO DO] Click on \"Confirm\"") str, msgType := common.Call(client, client.SetLabel(expectedLabel)) if msgType != 2 { t.Errorf("\t\tExpected msgType=2, received %d", msgType) } else { t.Log("\tChecking SetLabel") { str, msgType = common.Call(client, client.GetFeatures()) if msgType != 17 { t.Error("\t\tError initializing the device") } else { var features messages.Features err := json.Unmarshal([]byte(str), &features) if err == nil { if features.GetLabel() != expectedLabel { t.Errorf("\t\tExpected label=%s, received %s", expectedLabel, features.GetLabel()) } else { t.Log("\t\tEverything went fine, \\ʕ◔ϖ◔ʔ/ YAY!") } } } } } } }
explode_data.jsonl/46202
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 385 }
[ 2830, 3393, 1649, 2476, 1155, 353, 8840, 836, 8, 1476, 2405, 3601, 2476, 284, 330, 1944, 1214, 17114, 698, 3244, 5247, 445, 1654, 1184, 311, 1273, 279, 2573, 2476, 13053, 197, 515, 197, 11009, 12419, 10937, 59860, 5146, 9319, 60, 9189, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestServerHandshakeContextCancellation(t *testing.T) { c, s := localPipe(t) ctx, cancel := context.WithCancel(context.Background()) unblockClient := make(chan struct{}) defer close(unblockClient) go func() { cancel() <-unblockClient _ = c.Close() }() conn := Server(s, testConfig) // Initiates server side handshake, which will block until a client hello is read // unless the cancellation works. err := conn.HandshakeContext(ctx) if err == nil { t.Fatal("Server handshake did not error when the context was canceled") } if err != context.Canceled { t.Errorf("Unexpected server handshake error: %v", err) } if runtime.GOARCH == "wasm" { t.Skip("conn.Close does not error as expected when called multiple times on WASM") } err = conn.Close() if err == nil { t.Error("Server connection was not closed when the context was canceled") } }
explode_data.jsonl/36361
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 289 }
[ 2830, 3393, 5475, 2314, 29661, 1972, 82298, 1155, 353, 8840, 836, 8, 341, 1444, 11, 274, 1669, 2205, 34077, 1155, 340, 20985, 11, 9121, 1669, 2266, 26124, 9269, 5378, 19047, 2398, 20479, 4574, 2959, 1669, 1281, 35190, 2036, 37790, 16867, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAccAWSCloudwatchLogDestination_basic(t *testing.T) { var destination cloudwatchlogs.Destination resourceName := "aws_cloudwatch_log_destination.test" rstring := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloudwatchLogDestinationDestroy, Steps: []resource.TestStep{ { Config: testAccAWSCloudwatchLogDestinationConfig(rstring), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCloudwatchLogDestinationExists(resourceName, &destination), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, }, }) }
explode_data.jsonl/74152
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 285 }
[ 2830, 3393, 14603, 14419, 3540, 52178, 14321, 2201, 33605, 34729, 1155, 353, 8840, 836, 8, 341, 2405, 9106, 9437, 14321, 22081, 909, 20646, 198, 50346, 675, 1669, 330, 8635, 37356, 14321, 5224, 56344, 5958, 698, 7000, 917, 1669, 1613, 678...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_singleFileRotate(t *testing.T) { fileName := filepath.Join(os.TempDir(), "test.singleFile") fileNameRotated := filepath.Join(os.TempDir(), "test.singleFile.rotated") metaDir := filepath.Join(os.TempDir(), "rotates") //create file & write file CreateFile(fileName, "12345") //create sf meta, err := NewMeta(metaDir, metaDir, testlogpath, ModeFile, "", DefautFileRetention) if err != nil { t.Error(err) } sf, err := NewSingleFile(meta, fileName, WhenceOldest, false) if err != nil { t.Error(err) } absPath, err := filepath.Abs(fileName) assert.NoError(t, err) assert.Equal(t, absPath, sf.Source()) oldInode, err := utilsos.GetIdentifyIDByPath(absPath) assert.NoError(t, err) //rotate file(rename old file + create new file) renameTestFile(fileName, fileNameRotated) CreateFile(fileName, "67890") //read file 正常读 p := make([]byte, 5) n, err := sf.Read(p) if err != nil { t.Error(err) } assert.Equal(t, 5, n) assert.Equal(t, "12345", string(p)) //应该遇到EOF,pfi被更新 n, err = sf.Read(p) if err != nil { t.Error(err) } newInode, err := utilsos.GetIdentifyIDByPath(fileName) assert.NoError(t, err) assert.NotEqual(t, newInode, oldInode) assert.Equal(t, 5, n) assert.Equal(t, "67890", string(p)) filedone, err := ioutil.ReadFile(sf.meta.DoneFile()) assert.NoError(t, err) assert.True(t, strings.Contains(string(filedone), fileNameRotated)) }
explode_data.jsonl/6869
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 597 }
[ 2830, 3393, 19487, 1703, 34540, 1155, 353, 8840, 836, 8, 341, 17661, 675, 1669, 26054, 22363, 9638, 65009, 6184, 1507, 330, 1944, 32301, 1703, 1138, 17661, 675, 36936, 657, 1669, 26054, 22363, 9638, 65009, 6184, 1507, 330, 1944, 32301, 17...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestGenerateTemplateCommands(t *testing.T) { chartRef := "stable/cert-manager" chartVer := "v0.5.1" valuesFile := "values-certmanager.yaml" releaseFile := path.Join(os.TempDir(), "releases.yaml") expectedValuesFile := path.Join(os.TempDir(), valuesFile) releaseName := "cert-manager" envName := "kube-system" envNamespace := "kube-system" release := &model.Release{ Name: releaseName, Chart: &model.Chart{ Reference: &chartRef, Version: &chartVer, }, ValuesFile: &valuesFile, Triggers: []model.ReleaseUpdateTrigger{ {Chart: &model.HelmTrigger{Track: semver.TrackMinorVersion}}, }, FromFile: releaseFile, } env := &model.Environment{ Name: envName, KubeNamespace: envNamespace, } tmpDir := path.Join(os.TempDir(), fmt.Sprintf("kcd-template.%d", os.Getpid())) t.Run("generate template commands", func(t *testing.T) { cmds, err := GenerateTemplateCommands(release, env) assert.NoError(t, err) assert.Equal(t, [][]string{{"mkdir", "-m", "700", "-p", tmpDir}, {"helm", "fetch", chartRef, "--version", chartVer, "--untar", "--untardir", tmpDir}, {"helm", "--kube-context", "env:" + envName, "template", tmpDir + "/" + releaseName, "-n", releaseName, "--namespace", envNamespace, "--values", expectedValuesFile}, {"rm", "-rf", tmpDir}}, cmds) }) }
explode_data.jsonl/3668
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 499 }
[ 2830, 3393, 31115, 7275, 30479, 1155, 353, 8840, 836, 8, 341, 197, 15941, 3945, 1669, 330, 27992, 2899, 529, 44896, 698, 197, 15941, 10141, 1669, 330, 85, 15, 13, 20, 13, 16, 698, 45939, 1703, 1669, 330, 3661, 61034, 13297, 33406, 698...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidate_ScalarLeafs_ScalarSelectionNotAllowedWithDirectives(t *testing.T) { testutil.ExpectFailsRule(t, graphql.ScalarLeafsRule, ` fragment scalarSelectionsNotAllowedWithDirectives on Dog { name @include(if: true) { isAlsoHumanName } } `, []gqlerrors.FormattedError{ testutil.RuleError(`Field "name" of type "String" must not have a sub selection.`, 3, 33), }) }
explode_data.jsonl/37575
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 157 }
[ 2830, 3393, 17926, 1098, 59153, 31461, 82, 1098, 59153, 11177, 97634, 2354, 16027, 1886, 1155, 353, 8840, 836, 8, 341, 18185, 1314, 81893, 37, 6209, 11337, 1155, 11, 48865, 808, 59153, 31461, 82, 11337, 11, 22074, 414, 12289, 17274, 11177...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestRegistryNew(t *testing.T) { for i, tt := range []struct { prototype *mesosproto.ExecutorInfo resources []*mesosproto.Resource want *mesosproto.ExecutorInfo }{ { prototype: &mesosproto.ExecutorInfo{ ExecutorId: mesosutil.NewExecutorID("exec-id"), }, resources: nil, want: &mesosproto.ExecutorInfo{ ExecutorId: mesosutil.NewExecutorID("exec-id"), }, }, { prototype: &mesosproto.ExecutorInfo{ ExecutorId: mesosutil.NewExecutorID("exec-id"), }, resources: []*mesosproto.Resource{}, want: &mesosproto.ExecutorInfo{ ExecutorId: mesosutil.NewExecutorID("exec-id"), Resources: []*mesosproto.Resource{}, }, }, { prototype: &mesosproto.ExecutorInfo{ ExecutorId: mesosutil.NewExecutorID("exec-id"), Name: proto.String("foo"), }, resources: []*mesosproto.Resource{ scalar("foo", 1.0, "role1"), scalar("bar", 2.0, "role2"), }, want: &mesosproto.ExecutorInfo{ ExecutorId: mesosutil.NewExecutorID("exec-id"), Name: proto.String("foo"), Resources: []*mesosproto.Resource{ scalar("foo", 1.0, "role1"), scalar("bar", 2.0, "role2"), }, }, }, } { lookupNode := node.LookupFunc(func(string) *api.Node { return nil }) c, err := NewCache(1000) if err != nil { t.Error(err) continue } r, err := NewRegistry(lookupNode, tt.prototype, c) if err != nil { t.Error(err) continue } got := r.New("", tt.resources) if !reflect.DeepEqual(got, tt.want) { t.Errorf("test #%d\ngot %v\nwant %v", i, got, tt.want) } } }
explode_data.jsonl/74855
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 749 }
[ 2830, 3393, 15603, 3564, 1155, 353, 8840, 836, 8, 341, 2023, 600, 11, 17853, 1669, 2088, 3056, 1235, 341, 197, 197, 45654, 353, 8828, 436, 15110, 94500, 1731, 198, 197, 10202, 2360, 29838, 8828, 436, 15110, 20766, 198, 197, 50780, 414, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestQueuePush(t *testing.T) { elements := []struct { n int }{ {10}, {50}, {100}, {250}, {500}, {1000}, {2000}, {5000}, } for _, e := range elements { q := &queue{} for i := 0; i < e.n; i++ { q.push(index{}) } t.Logf("indices-number (%d) blocks-number (%d) actual-blocks-number (%d) tail-size (%d)", q.size(), q.bn, q._bn(), q._tailSize()) assert.Equal(t, q.size(), e.n) assert.Equal(t, q._bn(), (e.n+blockCapacity-1)/blockCapacity) assert.Equal(t, q.bn, q._bn()) assert.Equal(t, q._tailSize(), e.n%blockCapacity) } }
explode_data.jsonl/5392
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 279 }
[ 2830, 3393, 7554, 16644, 1155, 353, 8840, 836, 8, 341, 197, 21423, 1669, 3056, 1235, 341, 197, 9038, 526, 198, 197, 59403, 197, 197, 90, 16, 15, 1583, 197, 197, 90, 20, 15, 1583, 197, 197, 90, 16, 15, 15, 1583, 197, 197, 90, 17,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRetryWebsocket(t *testing.T) { testCases := []struct { desc string maxRequestAttempts int expectedRetryAttempts int expectedResponseStatus int expectedError bool amountFaultyEndpoints int }{ { desc: "Switching ok after 2 retries", maxRequestAttempts: 3, expectedRetryAttempts: 2, amountFaultyEndpoints: 2, expectedResponseStatus: http.StatusSwitchingProtocols, }, { desc: "Switching failed", maxRequestAttempts: 2, expectedRetryAttempts: 1, amountFaultyEndpoints: 2, expectedResponseStatus: http.StatusBadGateway, expectedError: true, }, } forwarder, err := forward.New() if err != nil { t.Fatalf("Error creating forwarder: %s", err) } backendServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { upgrader := websocket.Upgrader{} upgrader.Upgrade(rw, req, nil) })) for _, test := range testCases { test := test t.Run(test.desc, func(t *testing.T) { t.Parallel() loadBalancer, err := roundrobin.New(forwarder) if err != nil { t.Fatalf("Error creating load balancer: %s", err) } basePort := 33444 for i := 0; i < test.amountFaultyEndpoints; i++ { // 192.0.2.0 is a non-routable IP for testing purposes. // See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928 // We only use the port specification here because the URL is used as identifier // in the load balancer and using the exact same URL would not add a new server. loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i))) } // add the functioning server to the end of the load balancer list loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL)) retryListener := &countingRetryListener{} retry := NewRetry(test.maxRequestAttempts, loadBalancer, retryListener) retryServer := httptest.NewServer(retry) url := strings.Replace(retryServer.URL, "http", "ws", 1) _, response, err := websocket.DefaultDialer.Dial(url, nil) if !test.expectedError { require.NoError(t, err) } assert.Equal(t, test.expectedResponseStatus, response.StatusCode) assert.Equal(t, test.expectedRetryAttempts, retryListener.timesCalled) }) } }
explode_data.jsonl/44113
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 949 }
[ 2830, 3393, 51560, 5981, 9556, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 41653, 4293, 914, 198, 197, 22543, 1900, 81517, 257, 526, 198, 197, 42400, 51560, 81517, 220, 526, 198, 197, 42400, 92663, 526, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestDeleteJob(t *testing.T) { tc := testutil.SystemTest(t) buf := new(bytes.Buffer) listJobs(buf, tc.ProjectID, "", "RISK_ANALYSIS_JOB") s := buf.String() if len(s) == 0 { // Create job. riskNumerical(tc.ProjectID, "bigquery-public-data", "risk-topic", "risk-sub", "nhtsa_traffic_fatalities", "accident_2015", "state_number") buf.Reset() listJobs(buf, tc.ProjectID, "", "RISK_ANALYSIS_JOB") s = buf.String() } jobName := string(jobIDRegexp.FindSubmatch([]byte(s))[1]) buf.Reset() deleteJob(buf, jobName) if got := buf.String(); got != "Successfully deleted job" { t.Errorf("unable to delete job: %s", s) } }
explode_data.jsonl/45870
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 275 }
[ 2830, 3393, 6435, 12245, 1155, 353, 8840, 836, 8, 341, 78255, 1669, 1273, 1314, 16620, 2271, 1155, 340, 26398, 1669, 501, 23158, 22622, 340, 14440, 40667, 10731, 11, 17130, 30944, 915, 11, 7342, 330, 49, 67699, 59753, 74464, 59690, 1138, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGenesisValidate(t *testing.T) { testCases := []struct { name string genesis GenesisState expError bool }{ { "empty genesis", GenesisState{}, false, }, { "default genesis", *DefaultGenesisState(), false, }, { "custom genesis", NewGenesisState(NewParams(true, time.Hour)), false, }, } for _, tc := range testCases { err := tc.genesis.Validate() if tc.expError { require.Error(t, err, tc.name) } else { require.NoError(t, err, tc.name) } } }
explode_data.jsonl/9762
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 238 }
[ 2830, 3393, 84652, 17926, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 257, 914, 198, 197, 82281, 13774, 220, 40788, 1397, 198, 197, 48558, 1454, 1807, 198, 197, 59403, 197, 197, 515, 298, 197, 1, 319...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestReacjiStore(t *testing.T) { runWithMemberTypes(t, func(mt chat1.ConversationMembersType) { switch mt { case chat1.ConversationMembersType_IMPTEAMNATIVE: default: return } ctc := makeChatTestContext(t, "ReacjiStore", 1) defer ctc.cleanup() user := ctc.users()[0] uid := user.User.GetUID().ToBytes() tc := ctc.world.Tcs[user.Username] ctx := ctc.as(t, user).startCtx listener := newServerChatListener() ctc.as(t, user).h.G().NotifyRouter.AddListener(listener) tc.ChatG.Syncer.(*Syncer).isConnected = true reacjiStore := storage.NewReacjiStore(ctc.as(t, user).h.G()) assertReacjiStore := func(actual, expected keybase1.UserReacjis, expectedData *storage.ReacjiInternalStorage) { require.Equal(t, expected, actual) data := reacjiStore.GetInternalStore(ctx, uid) require.Equal(t, expectedData, data) } expectedData := storage.NewReacjiInternalStorage() for _, el := range storage.DefaultTopReacjis { expectedData.FrequencyMap[el] = 0 } conv := mustCreateConversationForTest(t, ctc, user, chat1.TopicType_CHAT, mt) // if the user has no history we return the default list userReacjis := tc.G.ChatHelper.UserReacjis(ctx, uid) assertReacjiStore(userReacjis, keybase1.UserReacjis{TopReacjis: storage.DefaultTopReacjis}, expectedData) // post a bunch of reactions, we should end up with these reactions // replacing the defaults sorted alphabetically (since they tie on // being used once each) reactionKeys := []string{ ":a:", ":8ball:", ":3rd_place_medal:", ":2nd_place_medal:", ":1st_place_medal:", ":1234:", ":100:", } msg := chat1.NewMessageBodyWithText(chat1.MessageText{Body: "hi"}) textID := mustPostLocalForTest(t, ctc, user, conv, msg) consumeNewMsgRemote(t, listener, chat1.MessageType_TEXT) expected := keybase1.UserReacjis{} for i, reaction := range reactionKeys { expectedData.FrequencyMap[reaction]++ mustReactToMsg(ctx, t, ctc, user, conv, textID, reaction) consumeNewMsgRemote(t, listener, chat1.MessageType_REACTION) info := consumeReactionUpdate(t, listener) expected.TopReacjis = append([]string{reaction}, expected.TopReacjis...) if i < 5 { // remove defaults as user values are added delete(expectedData.FrequencyMap, storage.DefaultTopReacjis[len(storage.DefaultTopReacjis)-i-1]) expected.TopReacjis = append(expected.TopReacjis, storage.DefaultTopReacjis...)[:len(storage.DefaultTopReacjis)] } assertReacjiStore(info.UserReacjis, expected, expectedData) } // bump "a" to the most frequent msg = chat1.NewMessageBodyWithText(chat1.MessageText{Body: "hi"}) textID2 := mustPostLocalForTest(t, ctc, user, conv, msg) consumeNewMsgRemote(t, listener, chat1.MessageType_TEXT) mustReactToMsg(ctx, t, ctc, user, conv, textID2, ":100:") consumeNewMsgRemote(t, listener, chat1.MessageType_REACTION) expectedData.FrequencyMap[":100:"]++ info := consumeReactionUpdate(t, listener) assertReacjiStore(info.UserReacjis, expected, expectedData) // putSkinTone expectedSkinTone := keybase1.ReacjiSkinTone(4) userReacjis, err := ctc.as(t, user).chatLocalHandler().PutReacjiSkinTone(ctx, expectedSkinTone) require.NoError(t, err) expected.SkinTone = expectedSkinTone expectedData.SkinTone = expectedSkinTone assertReacjiStore(userReacjis, expected, expectedData) }) }
explode_data.jsonl/63726
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1306 }
[ 2830, 3393, 693, 33378, 6093, 1155, 353, 8840, 836, 8, 341, 56742, 2354, 9366, 4173, 1155, 11, 2915, 81618, 6236, 16, 4801, 22323, 24371, 929, 8, 341, 197, 8961, 11965, 341, 197, 2722, 6236, 16, 4801, 22323, 24371, 929, 97415, 97052, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewStructuredIdentifierEmptyBranchAndStandardUrl(t *testing.T) { // given source := test.NewGitSource() endpoint, err := gittransport.NewEndpoint("https://github.com/fabric8-services/fabric8-tenant") require.NoError(t, err) // when identifier, err := repository.NewStructuredIdentifier(source, endpoint) // then require.NoError(t, err) assert.Equal(t, "fabric8-services", identifier.Owner) assert.Equal(t, "fabric8-tenant", identifier.Name) assert.Equal(t, "master", identifier.Branch) }
explode_data.jsonl/63411
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 180 }
[ 2830, 3393, 3564, 97457, 8714, 3522, 18197, 3036, 19781, 2864, 1155, 353, 8840, 836, 8, 341, 197, 322, 2661, 198, 47418, 1669, 1273, 7121, 46562, 3608, 741, 6246, 2768, 11, 1848, 1669, 16345, 26445, 7121, 27380, 445, 2428, 1110, 5204, 9...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestNewLoggerText(t *testing.T) { logger := NewLogger("logfmt", "info") err := level.Error(logger).Log("msg", "TestNewLogger-INFO") assert.Nil(t, err) }
explode_data.jsonl/46811
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 66 }
[ 2830, 3393, 3564, 7395, 1178, 1155, 353, 8840, 836, 8, 341, 17060, 1669, 1532, 7395, 445, 839, 12501, 497, 330, 2733, 5130, 9859, 1669, 2188, 6141, 37833, 568, 2201, 445, 3236, 497, 330, 2271, 3564, 7395, 12, 6637, 1138, 6948, 59678, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestDelete(t *testing.T) { type args struct { mg resource.Managed } type want struct { mg resource.Managed err error } cases := map[string]struct { handler http.Handler kube client.Client args args want want }{ "Successful": { handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = r.Body.Close() if diff := cmp.Diff(http.MethodDelete, r.Method); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(&container.Operation{}) }), args: args{ mg: cluster(), }, want: want{ mg: cluster(withConditions(xpv1.Deleting())), err: nil, }, }, "SuccessfulSkipDelete": { handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = r.Body.Close() if diff := cmp.Diff(http.MethodDelete, r.Method); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } // Return bad request for delete to demonstrate that // http call is never made. w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(&container.Operation{}) }), args: args{ mg: cluster(withProviderStatus(v1beta2.ClusterStateStopping)), }, want: want{ mg: cluster( withConditions(xpv1.Deleting()), withProviderStatus(v1beta2.ClusterStateStopping), ), err: nil, }, }, "AlreadyGone": { handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = r.Body.Close() if diff := cmp.Diff(http.MethodDelete, r.Method); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(&container.Operation{}) }), args: args{ mg: cluster(), }, want: want{ mg: cluster(withConditions(xpv1.Deleting())), err: nil, }, }, "Failed": { handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _ = r.Body.Close() if diff := cmp.Diff(http.MethodDelete, r.Method); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(&container.Operation{}) }), args: args{ mg: cluster(), }, want: want{ mg: cluster(withConditions(xpv1.Deleting())), err: errors.Wrap(gError(http.StatusBadRequest, ""), errDeleteCluster), }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { server := httptest.NewServer(tc.handler) defer server.Close() s, _ := container.NewService(context.Background(), option.WithEndpoint(server.URL), option.WithoutAuthentication()) e := clusterExternal{ kube: tc.kube, projectID: projectID, cluster: s, } err := e.Delete(context.Background(), tc.args.mg) if tc.want.err != nil && err != nil { // the case where our mock server returns error. if diff := cmp.Diff(tc.want.err.Error(), err.Error()); diff != "" { t.Errorf("Delete(...): -want, +got:\n%s", diff) } } else { if diff := cmp.Diff(tc.want.err, err); diff != "" { t.Errorf("Delete(...): -want, +got:\n%s", diff) } } if diff := cmp.Diff(tc.want.mg, tc.args.mg); diff != "" { t.Errorf("Delete(...): -want, +got:\n%s", diff) } }) } }
explode_data.jsonl/69615
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1474 }
[ 2830, 3393, 6435, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 2109, 70, 5101, 29902, 3279, 198, 197, 532, 13158, 1366, 2036, 341, 197, 2109, 70, 220, 5101, 29902, 3279, 198, 197, 9859, 1465, 198, 197, 630, 1444, 2264, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUpdateProjectUnknownParmeter(t *testing.T) { testInvalidInputHelper("update project sockshop --git-userr=GIT_USER --git-token=GIT_TOKEN --git-remote-url=GIT_REMOTE_URL", "unknown flag: --git-userr", t) }
explode_data.jsonl/53461
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 76 }
[ 2830, 3393, 4289, 7849, 13790, 40111, 1404, 1155, 353, 8840, 836, 8, 341, 18185, 7928, 2505, 5511, 445, 2386, 2390, 11087, 8675, 1177, 12882, 25682, 615, 28, 90559, 9107, 1177, 12882, 34841, 28, 90559, 18681, 1177, 12882, 12, 18147, 25443...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestMarshalUnmarshalPiece(t *testing.T) { // Create random piece. piece := randomPiece() pieceIndex := uint32(fastrand.Intn(100)) // Marshal the piece. pieceBytes := make([]byte, marshaledPieceSize) putPiece(pieceBytes, pieceIndex, piece) // Unmarshal the piece. unmarshaledPieceIndex, unmarshaledPiece, err := unmarshalPiece(pieceBytes) if err != nil { t.Fatal(err) } // Compare the pieceIndex. if unmarshaledPieceIndex != pieceIndex { t.Fatalf("Piece index should be %v but was %v", pieceIndex, unmarshaledPieceIndex) } // Compare the piece to the original. if !reflect.DeepEqual(unmarshaledPiece, piece) { t.Fatal("Piece doesn't equal unmarshaled piece") } }
explode_data.jsonl/25111
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 250 }
[ 2830, 3393, 55438, 1806, 27121, 31209, 1155, 353, 8840, 836, 8, 341, 197, 322, 4230, 4194, 6573, 624, 3223, 9108, 1669, 4194, 31209, 741, 3223, 9108, 1552, 1669, 2622, 18, 17, 955, 20467, 437, 7371, 77, 7, 16, 15, 15, 4390, 197, 322...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRebalance(t *testing.T) { t.Parallel() mockCaptureInfos := map[model.CaptureID]*model.CaptureInfo{ "capture-1": { ID: "capture-1", AdvertiseAddr: "fakeip:1", }, "capture-2": { ID: "capture-2", AdvertiseAddr: "fakeip:2", }, "capture-3": { ID: "capture-3", AdvertiseAddr: "fakeip:3", }, } ctx := cdcContext.NewBackendContext4Test(false) communicator := NewMockScheduleDispatcherCommunicator() dispatcher := NewBaseScheduleDispatcher("cf-1", communicator, 1000) dispatcher.captureStatus = map[model.CaptureID]*captureStatus{ "capture-1": { SyncStatus: captureSyncFinished, CheckpointTs: 1300, ResolvedTs: 1600, }, "capture-2": { SyncStatus: captureSyncFinished, CheckpointTs: 1500, ResolvedTs: 1550, }, "capture-3": { SyncStatus: captureSyncFinished, CheckpointTs: 1400, ResolvedTs: 1650, }, } for i := 1; i <= 6; i++ { dispatcher.tables.AddTableRecord(&util.TableRecord{ TableID: model.TableID(i), CaptureID: fmt.Sprintf("capture-%d", (i+1)%2+1), Status: util.RunningTable, }) } dispatcher.Rebalance() communicator.On("DispatchTable", mock.Anything, "cf-1", mock.Anything, mock.Anything, true). Return(false, nil) checkpointTs, resolvedTs, err := dispatcher.Tick(ctx, 1300, []model.TableID{1, 2, 3, 4, 5, 6}, mockCaptureInfos) require.NoError(t, err) require.Equal(t, CheckpointCannotProceed, checkpointTs) require.Equal(t, CheckpointCannotProceed, resolvedTs) communicator.AssertExpectations(t) communicator.AssertNumberOfCalls(t, "DispatchTable", 1) communicator.Reset() communicator.On("DispatchTable", mock.Anything, "cf-1", mock.Anything, mock.Anything, true). Return(true, nil) checkpointTs, resolvedTs, err = dispatcher.Tick(ctx, 1300, []model.TableID{1, 2, 3, 4, 5, 6}, mockCaptureInfos) require.NoError(t, err) require.Equal(t, CheckpointCannotProceed, checkpointTs) require.Equal(t, CheckpointCannotProceed, resolvedTs) communicator.AssertNumberOfCalls(t, "DispatchTable", 2) communicator.AssertExpectations(t) }
explode_data.jsonl/28506
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 880 }
[ 2830, 3393, 693, 21571, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 77333, 27429, 38059, 1669, 2415, 79938, 727, 11850, 915, 8465, 2528, 727, 11850, 1731, 515, 197, 197, 1, 45070, 12, 16, 788, 341, 298, 29580, 25, 310, 330,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestNoneInitialize(t *testing.T) { config.L2Connect = "none://" err := cache.Initialize() if err != nil { t.Errorf("Failed to initalize none cacher - %v", err) } }
explode_data.jsonl/60811
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 69 }
[ 2830, 3393, 4064, 9928, 1155, 353, 8840, 836, 8, 341, 25873, 1214, 17, 14611, 284, 330, 6697, 1110, 698, 9859, 1669, 6500, 45829, 741, 743, 1848, 961, 2092, 341, 197, 3244, 13080, 445, 9408, 311, 304, 34313, 6857, 272, 11007, 481, 101...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestPolyPackT1(t *testing.T) { var p1, p2 common.Poly var seed [32]byte var buf [common.PolyT1Size]byte for i := uint16(0); i < 100; i++ { PolyDeriveUniform(&p1, &seed, i) p1.Normalize() for j := 0; j < common.N; j++ { p1[j] &= 0x1ff } p1.PackT1(buf[:]) p2.UnpackT1(buf[:]) if p1 != p2 { t.Fatalf("%v != %v", p1, p2) } } }
explode_data.jsonl/11871
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 194 }
[ 2830, 3393, 38164, 30684, 51, 16, 1155, 353, 8840, 836, 8, 341, 2405, 281, 16, 11, 281, 17, 4185, 1069, 5730, 198, 2405, 10320, 508, 18, 17, 90184, 198, 2405, 6607, 508, 5464, 1069, 5730, 51, 16, 1695, 90184, 271, 2023, 600, 1669, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestSscal(t *testing.T) { ctx, handle, buffers := setupTest(t, []float32{1, 2, 3, 4, -2, -3, 5}, []float32{0.25}) <-ctx.Run(func() error { actions := []func() error{ func() error { return handle.Sscal(4, float32(2), buffers[0], 2) }, func() error { scaler := float32(2) return handle.Sscal(3, &scaler, buffers[0], 1) }, func() error { if err := handle.SetPointerMode(Device); err != nil { t.Error(err) return nil } defer handle.SetPointerMode(Host) return handle.Sscal(7, buffers[1], buffers[0], 1) }, } expected := [][]float32{ {2, 2, 6, 4, -4, -3, 10}, {4, 4, 12, 4, -4, -3, 10}, {1, 1, 3, 1, -1, -0.75, 2.5}, } runTestActions32(t, actions, expected, buffers[0]) return nil }) }
explode_data.jsonl/70349
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 379 }
[ 2830, 3393, 50, 93207, 1155, 353, 8840, 836, 8, 341, 20985, 11, 3705, 11, 27389, 1669, 6505, 2271, 1155, 11, 3056, 3649, 18, 17, 90, 16, 11, 220, 17, 11, 220, 18, 11, 220, 19, 11, 481, 17, 11, 481, 18, 11, 220, 20, 2137, 3056,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLinuxConnConfig(t *testing.T) { tests := []struct { name string config *Config groups uint32 }{ { name: "Default Config", config: &Config{}, groups: 0x0, }, { name: "Config with Groups RTMGRP_IPV4_IFADDR", config: &Config{Groups: 0x10}, groups: 0x10, }, { name: "Config with Groups RTMGRP_IPV4_IFADDR | RTMGRP_IPV4_ROUTE", config: &Config{Groups: 0x10 | 0x40}, groups: 0x50, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c, _ := testLinuxConn(t, tt.config) if want, got := tt.groups, c.sa.Groups; want != got { t.Fatalf("unexpected error:\n- want: %v\n- got: %v", want, got) } }) } }
explode_data.jsonl/33499
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 348 }
[ 2830, 3393, 46324, 9701, 2648, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 256, 914, 198, 197, 25873, 353, 2648, 198, 197, 44260, 82, 2622, 18, 17, 198, 197, 59403, 197, 197, 515, 298, 11609, 25, 256, 330...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCloudPubSub(t *testing.T) { a := assertions.New(t) events.IncludeCaller = true eventCh := make(chan events.Event) handler := events.HandlerFunc(func(e events.Event) { t.Logf("Received event %v", e) a.So(e.Time().IsZero(), should.BeFalse) a.So(e.Context(), should.NotBeNil) eventCh <- e }) pubsub, err := cloud.NewPubSub(test.Context(), "mem://events_test", "mem://events_test") a.So(err, should.BeNil) defer pubsub.Close() pubsub.Subscribe("cloud.**", handler) ctx := events.ContextWithCorrelationID(test.Context(), t.Name()) eui := types.EUI64{1, 2, 3, 4, 5, 6, 7, 8} devAddr := types.DevAddr{1, 2, 3, 4} appID := ttnpb.ApplicationIdentifiers{ ApplicationID: "test-app", } devID := ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: "test-dev", DevEUI: &eui, JoinEUI: &eui, DevAddr: &devAddr, } gtwID := ttnpb.GatewayIdentifiers{ GatewayID: "test-gtw", EUI: &eui, } cloud.SetContentType(pubsub, "application/json") pubsub.Publish(events.New(ctx, "cloud.test.evt0", &appID, nil)) select { case e := <-eventCh: a.So(e.Name(), should.Equal, "cloud.test.evt0") if a.So(e.Identifiers(), should.NotBeNil) && a.So(e.Identifiers(), should.HaveLength, 1) { a.So(e.Identifiers()[0].GetApplicationIDs(), should.Resemble, &appID) } case <-time.After(time.Second): t.Error("Did not receive expected event") t.FailNow() } cloud.SetContentType(pubsub, "application/protobuf") pubsub.Publish(events.New(ctx, "cloud.test.evt1", ttnpb.CombineIdentifiers(&devID, &gtwID), nil)) select { case e := <-eventCh: a.So(e.Name(), should.Equal, "cloud.test.evt1") if a.So(e.Identifiers(), should.NotBeNil) && a.So(e.Identifiers(), should.HaveLength, 2) { a.So(e.Identifiers()[0].GetDeviceIDs(), should.Resemble, &devID) a.So(e.Identifiers()[1].GetGatewayIDs(), should.Resemble, &gtwID) } case <-time.After(time.Second): t.Error("Did not receive expected event") t.FailNow() } }
explode_data.jsonl/21440
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 897 }
[ 2830, 3393, 16055, 29162, 3136, 1155, 353, 8840, 836, 8, 341, 11323, 1669, 54836, 7121, 1155, 692, 90873, 55528, 58735, 284, 830, 271, 28302, 1143, 1669, 1281, 35190, 4357, 6904, 340, 53326, 1669, 4357, 89164, 18552, 2026, 4357, 6904, 8, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestLargeSendSystemError(t *testing.T) { largeStr := strings.Repeat("0123456789", 10000) tests := []struct { msg string err error wantErr string }{ { msg: "error message too long", err: errors.New(largeStr), wantErr: "too long", }, { msg: "max allowed error message", err: errors.New(largeStr[:math.MaxUint16-1]), wantErr: typed.ErrBufferFull.Error(), // error message is within length, but it overflows the frame. }, } for _, tt := range tests { t.Run(tt.msg, func(t *testing.T) { testutils.WithTestServer(t, nil, func(t testing.TB, ts *testutils.TestServer) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() opts := testutils.NewOpts().AddLogFilter("Couldn't create outbound frame.", 1) client := ts.NewClient(opts) conn, err := client.Connect(ctx, ts.HostPort()) require.NoError(t, err, "Connect failed") err = conn.SendSystemError(1, Span{}, tt.err) require.Error(t, err, "Expect err") assert.Contains(t, err.Error(), tt.wantErr, "unexpected error") }) }) } }
explode_data.jsonl/78192
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 459 }
[ 2830, 3393, 34253, 11505, 2320, 1454, 1155, 353, 8840, 836, 8, 341, 8810, 2744, 2580, 1669, 9069, 2817, 10979, 445, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 497, 220, 16, 15, 15, 15, 15, 692, 78216, 1669, 3056, 1235, 341, 197, 21...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStats(t *testing.T) { opts := nsqdNs.NewOptions() opts.Logger = newTestLogger(t) tcpAddr, _, nsqd, nsqdServer := mustStartNSQD(opts) defer os.RemoveAll(opts.DataPath) defer nsqdServer.Exit() topicName := "test_stats" + strconv.Itoa(int(time.Now().Unix())) topic := nsqd.GetTopicIgnPart(topicName) msg := nsqdNs.NewMessage(0, []byte("test body")) topic.PutMessage(msg) conn, err := mustConnectNSQD(tcpAddr) test.Equal(t, err, nil) defer conn.Close() identify(t, conn, nil, frameTypeResponse) sub(t, conn, topicName, "ch") stats := nsqd.GetStats(false, false) t.Logf("stats: %+v", stats) test.Equal(t, len(stats), 1) test.Equal(t, len(stats[0].Channels), 1) test.Equal(t, len(stats[0].Channels[0].Clients), 1) stats = nsqd.GetStats(false, true) t.Logf("stats: %+v", stats) test.Equal(t, len(stats), 1) test.Equal(t, len(stats[0].Channels), 1) test.Equal(t, len(stats[0].Channels[0].Clients), 0) test.Equal(t, stats[0].Channels[0].ClientNum, int64(1)) }
explode_data.jsonl/59838
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 438 }
[ 2830, 3393, 16635, 1155, 353, 8840, 836, 8, 341, 64734, 1669, 12268, 76727, 47360, 7121, 3798, 741, 64734, 12750, 284, 501, 2271, 7395, 1155, 340, 3244, 4672, 13986, 11, 8358, 12268, 76727, 11, 12268, 76727, 5475, 1669, 1969, 3479, 2448, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSafeWriteAsConfig(t *testing.T) { v := New() fs := afero.NewMemMapFs() v.SetFs(fs) err := v.ReadConfig(bytes.NewBuffer(yamlExample)) if err != nil { t.Fatal(err) } require.NoError(t, v.SafeWriteConfigAs("/test/c.yaml")) if _, err = afero.ReadFile(fs, "/test/c.yaml"); err != nil { t.Fatal(err) } }
explode_data.jsonl/9913
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 147 }
[ 2830, 3393, 25663, 7985, 2121, 2648, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 1532, 741, 53584, 1669, 264, 802, 78, 7121, 18816, 2227, 48300, 741, 5195, 4202, 48300, 31856, 340, 9859, 1669, 348, 6503, 2648, 23158, 7121, 4095, 7021, 946...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNewSCClientAggregate(t *testing.T) { registry.Configuration().ClusterAddresses = "sc-1=127.0.0.1:2379,127.0.0.2:2379" registry.Configuration().InitClusterInfo() c := GetOrCreateSCClient() if len(*c) == 0 { t.Fatalf("TestNewSCClientAggregate failed") } }
explode_data.jsonl/45340
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 107 }
[ 2830, 3393, 3564, 3540, 2959, 64580, 1155, 353, 8840, 836, 8, 341, 197, 29172, 17334, 1005, 28678, 52290, 284, 330, 2388, 12, 16, 28, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, 17, 18, 22, 24, 11, 16, 17, 22, 13, 15, 13, 15, 13...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestParseCiphers(t *testing.T) { testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305" v, err := ParseCiphers(testOk) if err != nil { t.Fatal(err) } if len(v) != 17 { t.Fatal("missed ciphers after parse") } testBad := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,cipherX" if _, err := ParseCiphers(testBad); err == nil { t.Fatal("should fail on unsupported cipherX") } testOrder := "TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" v, _ = ParseCiphers(testOrder) expected := []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_RSA_WITH_AES_128_GCM_SHA256} if !reflect.DeepEqual(expected, v) { t.Fatal("cipher order is not preserved") } }
explode_data.jsonl/60937
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 663 }
[ 2830, 3393, 14463, 34, 82077, 1155, 353, 8840, 836, 8, 341, 18185, 11578, 1669, 330, 45439, 2089, 6484, 1799, 69510, 72638, 23929, 69381, 62, 16, 17, 23, 90764, 38096, 19997, 7268, 2089, 6484, 1799, 69510, 72638, 23929, 69381, 62, 16, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestTaskUpdate(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( ctx, cancel = testContext() id = t.Name() ) defer cancel() image, err := client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } limit := int64(32 * 1024 * 1024) memory := func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { s.Linux.Resources.Memory = &specs.LinuxMemory{ Limit: &limit, } return nil } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"), memory)) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } // check that the task has a limit of 32mb cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid()))) if err != nil { t.Fatal(err) } stat, err := cgroup.Stat(cgroups.IgnoreNotExist) if err != nil { t.Fatal(err) } if int64(stat.Memory.Usage.Limit) != limit { t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) } limit = 64 * 1024 * 1024 if err := task.Update(ctx, WithResources(&specs.LinuxResources{ Memory: &specs.LinuxMemory{ Limit: &limit, }, })); err != nil { t.Error(err) } // check that the task has a limit of 64mb if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil { t.Fatal(err) } if int64(stat.Memory.Usage.Limit) != limit { t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) } if err := task.Kill(ctx, unix.SIGKILL); err != nil { t.Fatal(err) } <-statusC }
explode_data.jsonl/39630
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 764 }
[ 2830, 3393, 6262, 4289, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 25291, 11, 1848, 1669, 501, 2959, 1155, 11, 2621, 340, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 16867, 2943, 10421, 2822, 2405, 23...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestResourcePermissionsCreate_invalid(t *testing.T) { _, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{}, Resource: ResourcePermissions(), Create: true, }.Apply(t) qa.AssertErrorStartsWith(t, err, "At least one type of resource identifiers must be set") }
explode_data.jsonl/50875
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 98 }
[ 2830, 3393, 4783, 23851, 4021, 31433, 1155, 353, 8840, 836, 8, 341, 197, 6878, 1848, 1669, 88496, 20766, 18930, 515, 197, 12727, 941, 18513, 25, 3056, 15445, 27358, 18930, 38837, 197, 79487, 25, 11765, 23851, 3148, 197, 75569, 25, 256, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContainerSpecReadonlyRootfs(t *testing.T) { testID := "test-id" testSandboxID := "sandbox-id" testPid := uint32(1234) containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData() ociRuntime := config.Runtime{} c := newTestCRIService() for _, readonly := range []bool{true, false} { containerConfig.Linux.SecurityContext.ReadonlyRootfs = readonly spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime) require.NoError(t, err) specCheck(t, testID, testSandboxID, testPid, spec) assert.Equal(t, readonly, spec.Root.Readonly) } }
explode_data.jsonl/6407
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 227 }
[ 2830, 3393, 4502, 8327, 4418, 3243, 8439, 3848, 1155, 353, 8840, 836, 8, 341, 18185, 915, 1669, 330, 1944, 12897, 698, 18185, 50, 31536, 915, 1669, 330, 76756, 12897, 698, 18185, 32339, 1669, 2622, 18, 17, 7, 16, 17, 18, 19, 340, 53...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) { t.Parallel() // We'll create the arbitrator and its backing log in a default state. log := &mockArbitratorLog{ state: StateDefault, newStates: make(chan ArbitratorState, 5), resolvers: make(map[ContractResolver]struct{}), } chanArbCtx, err := createTestChannelArbitrator(t, log) if err != nil { t.Fatalf("unable to create ChannelArbitrator: %v", err) } chanArb := chanArbCtx.chanArb // We'll inject a test clock implementation so we can control the uptime. startTime := time.Date(2020, time.February, 3, 13, 0, 0, 0, time.UTC) testClock := clock.NewTestClock(startTime) chanArb.cfg.Clock = testClock // We also configure the grace period and the IsForwardedHTLC to identify // the htlc as our initiated payment. chanArb.cfg.PaymentsExpirationGracePeriod = time.Second * 15 chanArb.cfg.IsForwardedHTLC = func(chanID lnwire.ShortChannelID, htlcIndex uint64) bool { return false } if err := chanArb.Start(); err != nil { t.Fatalf("unable to start ChannelArbitrator: %v", err) } defer func() { if err := chanArb.Stop(); err != nil { t.Fatalf("unable to stop chan arb: %v", err) } }() // Now that our channel arb has started, we'll set up // its contract signals channel so we can send it // various HTLC updates for this test. htlcUpdates := make(chan *ContractUpdate) signals := &ContractSignals{ HtlcUpdates: htlcUpdates, ShortChanID: lnwire.ShortChannelID{}, } chanArb.UpdateContractSignals(signals) // Next, we'll send it a new HTLC that is set to expire // in 10 blocks. htlcIndex := uint64(99) htlcExpiry := uint32(10) pendingHTLC := channeldb.HTLC{ Incoming: false, Amt: 10000, HtlcIndex: htlcIndex, RefundTimeout: htlcExpiry, } htlcUpdates <- &ContractUpdate{ HtlcKey: RemoteHtlcSet, Htlcs: []channeldb.HTLC{pendingHTLC}, } // We will advance the uptime to 10 seconds which should be still within // the grace period and should not trigger going to chain. testClock.SetTime(startTime.Add(time.Second * 10)) chanArbCtx.blockEpochs <- &chainntnfs.BlockEpoch{Height: 5} chanArbCtx.AssertState(StateDefault) // We will advance the uptime to 16 seconds which should trigger going // to chain. testClock.SetTime(startTime.Add(time.Second * 16)) chanArbCtx.blockEpochs <- &chainntnfs.BlockEpoch{Height: 6} chanArbCtx.AssertStateTransitions( StateBroadcastCommit, StateCommitmentBroadcasted, ) }
explode_data.jsonl/3703
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 931 }
[ 2830, 3393, 9629, 6953, 4489, 81, 850, 32027, 54349, 2545, 8556, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 322, 1205, 3278, 1855, 279, 57957, 850, 323, 1181, 24668, 1487, 304, 264, 1638, 1584, 624, 6725, 1669, 609, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestValidateConnectionPool(t *testing.T) { cases := []struct { name string in networking.ConnectionPoolSettings valid bool }{ {name: "valid connection pool, tcp and http", in: networking.ConnectionPoolSettings{ Tcp: &networking.ConnectionPoolSettings_TCPSettings{ MaxConnections: 7, ConnectTimeout: &types.Duration{Seconds: 2}, }, Http: &networking.ConnectionPoolSettings_HTTPSettings{ Http1MaxPendingRequests: 2, Http2MaxRequests: 11, MaxRequestsPerConnection: 5, MaxRetries: 4, }, }, valid: true}, {name: "valid connection pool, tcp only", in: networking.ConnectionPoolSettings{ Tcp: &networking.ConnectionPoolSettings_TCPSettings{ MaxConnections: 7, ConnectTimeout: &types.Duration{Seconds: 2}, }, }, valid: true}, {name: "valid connection pool, http only", in: networking.ConnectionPoolSettings{ Http: &networking.ConnectionPoolSettings_HTTPSettings{ Http1MaxPendingRequests: 2, Http2MaxRequests: 11, MaxRequestsPerConnection: 5, MaxRetries: 4, }, }, valid: true}, {name: "invalid connection pool, empty", in: networking.ConnectionPoolSettings{}, valid: false}, {name: "invalid connection pool, bad max connections", in: networking.ConnectionPoolSettings{ Tcp: &networking.ConnectionPoolSettings_TCPSettings{MaxConnections: -1}}, valid: false}, {name: "invalid connection pool, bad connect timeout", in: networking.ConnectionPoolSettings{ Tcp: &networking.ConnectionPoolSettings_TCPSettings{ ConnectTimeout: &types.Duration{Seconds: 2, Nanos: 5}}}, valid: false}, {name: "invalid connection pool, bad max pending requests", in: networking.ConnectionPoolSettings{ Http: &networking.ConnectionPoolSettings_HTTPSettings{Http1MaxPendingRequests: -1}}, valid: false}, {name: "invalid connection pool, bad max requests", in: networking.ConnectionPoolSettings{ Http: &networking.ConnectionPoolSettings_HTTPSettings{Http2MaxRequests: -1}}, valid: false}, {name: "invalid connection pool, bad max requests per connection", in: networking.ConnectionPoolSettings{ Http: &networking.ConnectionPoolSettings_HTTPSettings{MaxRequestsPerConnection: -1}}, valid: false}, {name: "invalid connection pool, bad max retries", in: networking.ConnectionPoolSettings{ Http: &networking.ConnectionPoolSettings_HTTPSettings{MaxRetries: -1}}, valid: false}, } for _, c := range cases { if got := validateConnectionPool(&c.in); (got == nil) != c.valid { t.Errorf("ValidateConnectionSettings failed on %v: got valid=%v but wanted valid=%v: %v", c.name, got == nil, c.valid, got) } } }
explode_data.jsonl/56924
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 955 }
[ 2830, 3393, 17926, 4526, 10551, 1155, 353, 8840, 836, 8, 341, 1444, 2264, 1669, 3056, 1235, 341, 197, 11609, 220, 914, 198, 197, 17430, 262, 28030, 17463, 10551, 6086, 198, 197, 56322, 1807, 198, 197, 59403, 197, 197, 47006, 25, 330, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestForBoolDefault(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) b := prompt.ForBool("", false, prompt.WithInput(&in), prompt.WithOutput(&out)) if b != false { t.Fatal("expected default of false to be returned when user accepts.") } out.Reset() in.Reset() in.Write([]byte("\n")) b = prompt.ForBool("", true, prompt.WithInput(&in), prompt.WithOutput(&out)) if b != true { t.Fatal("expected default of true to be returned when user accepts.") } }
explode_data.jsonl/6617
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 188 }
[ 2830, 3393, 2461, 11233, 3675, 1155, 353, 8840, 836, 8, 341, 2405, 700, 5820, 22622, 198, 2405, 304, 5820, 22622, 198, 17430, 4073, 10556, 3782, 4921, 77, 28075, 2233, 1669, 9934, 26676, 11233, 19814, 895, 345, 197, 3223, 14749, 26124, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestGetBoardById_as_html(t *testing.T) { board := testData.EmptyBoard req := httptest.NewRequest("GET", fmt.Sprintf("/boards/%d", board.ID), nil) req.Header.Set("Accept", "text/html") w := httptest.NewRecorder() router.ServeHTTP(w, req) httpassert.Success(t, w) httpassert.HtmlContentType(t, w) }
explode_data.jsonl/12541
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 123 }
[ 2830, 3393, 1949, 11932, 2720, 11898, 9564, 1155, 353, 8840, 836, 8, 341, 59868, 1669, 67348, 11180, 11932, 271, 24395, 1669, 54320, 70334, 75274, 445, 3806, 497, 8879, 17305, 4283, 19270, 12627, 67, 497, 4479, 9910, 701, 2092, 340, 24395...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReactorBroadcastTxMessage(t *testing.T) { config := cfg.TestConfig() const N = 4 reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() } }() for _, r := range reactors { for _, peer := range r.Switch.Peers().List() { peer.Set(types.PeerStateKey, peerState{1}) } } // send a bunch of txs to the first reactor's mempool // and wait for them all to be received in the others txs := checkTxs(t, reactors[0].mempool, NUM_TXS, UnknownPeerID) waitForTxsOnReactors(t, txs, reactors) }
explode_data.jsonl/7251
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 216 }
[ 2830, 3393, 693, 5621, 43362, 31584, 2052, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 13286, 8787, 2648, 741, 4777, 451, 284, 220, 19, 198, 197, 2934, 1087, 1669, 1281, 3036, 14611, 14799, 1087, 8754, 11, 451, 340, 16867, 2915, 368, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestVarIntWire(t *testing.T) { pver := ProtocolVersion tests := []struct { in uint64 // Value to encode out uint64 // Expected decoded value buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding }{ // Latest protocol version. // Single byte {0, 0, []byte{0x00}, pver}, // Max single byte {0xfc, 0xfc, []byte{0xfc}, pver}, // Min 2-byte {0xfd, 0xfd, []byte{0xfd, 0x0fd, 0x00}, pver}, // Max 2-byte {0xffff, 0xffff, []byte{0xfd, 0xff, 0xff}, pver}, // Min 4-byte {0x10000, 0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}, pver}, // Max 4-byte {0xffffffff, 0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}, pver}, // Min 8-byte { 0x100000000, 0x100000000, []byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00}, pver, }, // Max 8-byte { 0xffffffffffffffff, 0xffffffffffffffff, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, pver, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. var buf bytes.Buffer err := WriteVarInt(&buf, test.pver, test.in) if err != nil { t.Errorf("WriteVarInt #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("WriteVarInt #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Decode from wire format. rbuf := bytes.NewReader(test.buf) val, err := ReadVarInt(rbuf, test.pver) if err != nil { t.Errorf("ReadVarInt #%d error %v", i, err) continue } if val != test.out { t.Errorf("ReadVarInt #%d\n got: %d want: %d", i, val, test.out) continue } } }
explode_data.jsonl/15296
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 793 }
[ 2830, 3393, 3962, 1072, 37845, 1155, 353, 8840, 836, 8, 341, 3223, 423, 1669, 24572, 5637, 271, 78216, 1669, 3056, 1235, 341, 197, 17430, 256, 2622, 21, 19, 442, 5162, 311, 16164, 198, 197, 13967, 220, 2622, 21, 19, 442, 31021, 29213,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestGet(t *testing.T) { storage := map[string]RESTStorage{} simpleStorage := SimpleRESTStorage{ item: Simple{ Name: "foo", }, } storage["simple"] = &simpleStorage handler := New(storage, "/prefix/version") server := httptest.NewServer(handler) resp, err := http.Get(server.URL + "/prefix/version/simple/id") var itemOut Simple body, err := extractBody(resp, &itemOut) expectNoError(t, err) if itemOut.Name != simpleStorage.item.Name { t.Errorf("Unexpected data: %#v, expected %#v (%s)", itemOut, simpleStorage.item, string(body)) } }
explode_data.jsonl/30458
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 203 }
[ 2830, 3393, 1949, 1155, 353, 8840, 836, 8, 341, 197, 16172, 1669, 2415, 14032, 60, 38307, 5793, 16094, 1903, 6456, 5793, 1669, 8993, 38307, 5793, 515, 197, 22339, 25, 8993, 515, 298, 21297, 25, 330, 7975, 756, 197, 197, 1583, 197, 532...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestServiceAccountNotControlledByUs(t *testing.T) { f := newFixture(t) startTime := metav1.Now() completionTime := metav1.Now() mpiJob := newMPIJob("test", int32Ptr(64), &startTime, &completionTime) f.setUpMPIJob(mpiJob) f.setUpConfigMap(newConfigMap(mpiJob, 8, 8)) serviceAccount := newLauncherServiceAccount(mpiJob) serviceAccount.OwnerReferences = nil f.setUpServiceAccount(serviceAccount) f.runExpectError(getKey(mpiJob, t), gpuResourceName) }
explode_data.jsonl/75011
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 174 }
[ 2830, 3393, 1860, 7365, 2623, 3273, 832, 1359, 3558, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 501, 18930, 1155, 340, 21375, 1462, 1669, 77520, 16, 13244, 741, 32810, 14386, 1462, 1669, 77520, 16, 13244, 2822, 197, 39479, 12245, 1669, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIP(t *testing.T) { var args struct { Host net.IP } err := parse("--host 192.168.0.1", &args) require.NoError(t, err) assert.Equal(t, "192.168.0.1", args.Host.String()) }
explode_data.jsonl/13054
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 83 }
[ 2830, 3393, 3298, 1155, 353, 8840, 836, 8, 341, 2405, 2827, 2036, 341, 197, 197, 9296, 4179, 46917, 198, 197, 532, 9859, 1669, 4715, 21549, 3790, 220, 16, 24, 17, 13, 16, 21, 23, 13, 15, 13, 16, 497, 609, 2116, 340, 17957, 35699, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestVtctlServer(t *testing.T) { ts := vtctlclienttest.CreateTopoServer(t) // Listen on a random port listener, err := net.Listen("tcp", ":0") if err != nil { t.Fatalf("Cannot listen: %v", err) } port := listener.Addr().(*net.TCPAddr).Port // Create a gRPC server and listen on the port server := grpc.NewServer() vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts)) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server client, err := gRPCVtctlClientFactory(fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatalf("Cannot create client: %v", err) } defer client.Close() vtctlclienttest.TestSuite(t, ts, client) }
explode_data.jsonl/31887
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 268 }
[ 2830, 3393, 53, 83, 12373, 5475, 1155, 353, 8840, 836, 8, 341, 57441, 1669, 39105, 12373, 2972, 1944, 7251, 5366, 78, 5475, 1155, 692, 197, 322, 32149, 389, 264, 4194, 2635, 198, 14440, 798, 11, 1848, 1669, 4179, 68334, 445, 27161, 49...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_IsInternetGatewayNotFoundErr(t *testing.T) { testCases := []struct { name string got error want bool }{ { "nil error is not", nil, false, }, { "other error is not", errors.New("some error"), false, }, { "InternetGatewayNotFoundErr is", &smithy.GenericAPIError{Code: InternetGatewayIDNotFound}, true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if diff := cmp.Diff(tc.want, IsInternetGatewayNotFoundErr(tc.got), test.EquateConditions()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } }) } }
explode_data.jsonl/29894
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 274 }
[ 2830, 3393, 31879, 35679, 40709, 10372, 7747, 1155, 353, 8840, 836, 8, 1476, 18185, 37302, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 3174, 354, 220, 1465, 198, 197, 50780, 1807, 198, 197, 59403, 197, 197, 515, 298, 197, 79925, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestIntegration_ExternalInitiatorV2(t *testing.T) { t.Parallel() ethClient, _, assertMockCalls := cltest.NewEthMocksWithStartupAssertions(t) defer assertMockCalls() cfg := cltest.NewTestEVMConfig(t) cfg.GeneralConfig.Overrides.FeatureExternalInitiators = null.BoolFrom(true) cfg.GeneralConfig.Overrides.SetTriggerFallbackDBPollInterval(10 * time.Millisecond) app, cleanup := cltest.NewApplicationWithConfig(t, cfg, ethClient, cltest.UseRealExternalInitiatorManager) defer cleanup() require.NoError(t, app.Start()) var ( eiName = "substrate-ei" eiSpec = map[string]interface{}{"foo": "bar"} eiRequest = map[string]interface{}{"result": 42} jobUUID = uuid.FromStringOrNil("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46") expectedCreateJobRequest = map[string]interface{}{ "jobId": jobUUID.String(), "type": eiName, "params": eiSpec, } ) // Setup EI var eiURL string var eiNotifiedOfCreate bool var eiNotifiedOfDelete bool { mockEI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if !eiNotifiedOfCreate { require.Equal(t, http.MethodPost, r.Method) eiNotifiedOfCreate = true defer r.Body.Close() var gotCreateJobRequest map[string]interface{} err := json.NewDecoder(r.Body).Decode(&gotCreateJobRequest) require.NoError(t, err) require.Equal(t, expectedCreateJobRequest, gotCreateJobRequest) w.WriteHeader(http.StatusOK) } else { require.Equal(t, http.MethodDelete, r.Method) eiNotifiedOfDelete = true defer r.Body.Close() require.Equal(t, fmt.Sprintf("/%v", jobUUID.String()), r.URL.Path) } })) defer mockEI.Close() eiURL = mockEI.URL } // Create the EI record on the Core node var eia *auth.Token { eiCreate := map[string]string{ "name": eiName, "url": eiURL, } eiCreateJSON, err := json.Marshal(eiCreate) require.NoError(t, err) eip := cltest.CreateExternalInitiatorViaWeb(t, app, string(eiCreateJSON)) eia = &auth.Token{ AccessKey: eip.AccessKey, Secret: eip.Secret, } } // Create the bridge on the Core node var bridgeCalled bool { bridgeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { bridgeCalled = true defer r.Body.Close() var gotBridgeRequest map[string]interface{} err := json.NewDecoder(r.Body).Decode(&gotBridgeRequest) require.NoError(t, err) expectedBridgeRequest := map[string]interface{}{ "value": float64(42), } require.Equal(t, expectedBridgeRequest, gotBridgeRequest) w.WriteHeader(http.StatusOK) require.NoError(t, err) io.WriteString(w, `{}`) })) u, _ := url.Parse(bridgeServer.URL) app.Store.CreateBridgeType(&models.BridgeType{ Name: models.TaskType("substrate-adapter1"), URL: models.WebURL(*u), }) defer bridgeServer.Close() } // Create the job spec on the Core node var jobID int32 { tomlSpec := fmt.Sprintf(` type = "webhook" schemaVersion = 1 externalJobID = "%v" externalInitiators = [ { name = "%s", spec = """ %s """ } ] observationSource = """ parse [type=jsonparse path="result" data="$(jobRun.requestBody)"] submit [type=bridge name="substrate-adapter1" requestData=<{ "value": $(parse) }>] parse -> submit """ `, jobUUID, eiName, cltest.MustJSONMarshal(t, eiSpec)) _, err := webhook.ValidatedWebhookSpec(tomlSpec, app.GetExternalInitiatorManager()) require.NoError(t, err) job := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) jobID = job.ID t.Log("JOB created", job.WebhookSpecID) require.Eventually(t, func() bool { return eiNotifiedOfCreate }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of new job") } t.Run("calling webhook_spec with non-matching external_initiator_id returns unauthorized", func(t *testing.T) { eiaWrong := auth.NewToken() body := cltest.MustJSONMarshal(t, eiRequest) headers := make(map[string]string) headers[static.ExternalInitiatorAccessKeyHeader] = eiaWrong.AccessKey headers[static.ExternalInitiatorSecretHeader] = eiaWrong.Secret url := app.Config.ClientNodeURL() + "/v2/jobs/" + jobUUID.String() + "/runs" bodyBuf := bytes.NewBufferString(body) resp, cleanup := cltest.UnauthenticatedPost(t, url, bodyBuf, headers) defer cleanup() cltest.AssertServerResponse(t, resp, 401) cltest.AssertCountStays(t, app.Store, &pipeline.Run{}, 0) }) t.Run("calling webhook_spec with matching external_initiator_id works", func(t *testing.T) { // Simulate request from EI -> Core node cltest.AwaitJobActive(t, app.JobSpawner(), jobID, 3*time.Second) _ = cltest.CreateJobRunViaExternalInitiatorV2(t, app, jobUUID, *eia, cltest.MustJSONMarshal(t, eiRequest)) pipelineORM := pipeline.NewORM(app.Store.DB) jobORM := job.NewORM(app.Store.ORM.DB, cfg, pipelineORM, &postgres.NullEventBroadcaster{}, &postgres.NullAdvisoryLocker{}) runs := cltest.WaitForPipelineComplete(t, 0, jobID, 1, 2, jobORM, 5*time.Second, 300*time.Millisecond) require.Len(t, runs, 1) run := runs[0] require.Len(t, run.PipelineTaskRuns, 2) require.Empty(t, run.PipelineTaskRuns[0].Error) require.Empty(t, run.PipelineTaskRuns[1].Error) assert.True(t, bridgeCalled, "expected bridge server to be called") }) // Delete the job { cltest.DeleteJobViaWeb(t, app, jobID) require.Eventually(t, func() bool { return eiNotifiedOfDelete }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of deleted job") } }
explode_data.jsonl/29841
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2217 }
[ 2830, 3393, 52464, 62, 25913, 3803, 36122, 53, 17, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 769, 2959, 11, 8358, 2060, 11571, 55292, 1669, 1185, 1944, 7121, 65390, 11571, 16056, 39076, 90206, 1155, 340, 16867, 2060, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestFullValidatorSetPowerChange(t *testing.T) { app, ctx, addrs, _ := bootstrapValidatorTest(t, 1000, 20) params := app.StakingKeeper.GetParams(ctx) max := 2 params.MaxValidators = uint32(2) app.StakingKeeper.SetParams(ctx, params) // initialize some validators into the state powers := []int64{0, 100, 400, 400, 200} var validators [5]types.Validator for i, power := range powers { validators[i] = types.NewValidator(sdk.ValAddress(addrs[i]), PKs[i], types.Description{}) tokens := sdk.TokensFromConsensusPower(power) validators[i], _ = validators[i].AddTokensFromDel(tokens) keeper.TestingUpdateValidator(app.StakingKeeper, ctx, validators[i], true) } for i := range powers { var found bool validators[i], found = app.StakingKeeper.GetValidator(ctx, validators[i].OperatorAddress) require.True(t, found) } assert.Equal(t, sdk.Unbonded, validators[0].Status) assert.Equal(t, sdk.Unbonding, validators[1].Status) assert.Equal(t, sdk.Bonded, validators[2].Status) assert.Equal(t, sdk.Bonded, validators[3].Status) assert.Equal(t, sdk.Unbonded, validators[4].Status) resValidators := app.StakingKeeper.GetBondedValidatorsByPower(ctx) assert.Equal(t, max, len(resValidators)) assert.True(ValEq(t, validators[2], resValidators[0])) // in the order of txs assert.True(ValEq(t, validators[3], resValidators[1])) // test a swap in voting power tokens := sdk.TokensFromConsensusPower(600) validators[0], _ = validators[0].AddTokensFromDel(tokens) validators[0] = keeper.TestingUpdateValidator(app.StakingKeeper, ctx, validators[0], true) resValidators = app.StakingKeeper.GetBondedValidatorsByPower(ctx) assert.Equal(t, max, len(resValidators)) assert.True(ValEq(t, validators[0], resValidators[0])) assert.True(ValEq(t, validators[2], resValidators[1])) }
explode_data.jsonl/6101
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 695 }
[ 2830, 3393, 9432, 14256, 1649, 14986, 4072, 1155, 353, 8840, 836, 8, 341, 28236, 11, 5635, 11, 912, 5428, 11, 716, 1669, 26925, 14256, 2271, 1155, 11, 220, 16, 15, 15, 15, 11, 220, 17, 15, 340, 25856, 1669, 906, 7758, 1765, 77233, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStringOnStack(t *testing.T) { s := "" for i := 0; i < 3; i++ { s = "a" + s + "b" + s + "c" } if want := "aaabcbabccbaabcbabccc"; s != want { t.Fatalf("want: '%v', got '%v'", want, s) } }
explode_data.jsonl/79320
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 703, 1925, 4336, 1155, 353, 8840, 836, 8, 341, 1903, 1669, 8389, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 18, 26, 600, 1027, 341, 197, 1903, 284, 330, 64, 1, 488, 274, 488, 330, 65, 1, 488, 274, 488, 330, 66, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFindID(t *testing.T) { svgElement := testElement() equals(t, "Find", &svgparser.Element{ Name: "g", Attributes: map[string]string{"id": "second"}, Children: []*svgparser.Element{ element("path", map[string]string{"d": "M50 50 Q50 100 100 100"}), element("rect", map[string]string{"width": "5", "height": "1"}), }, }, svgElement.FindID("second")) equals(t, "Find", element("rect", map[string]string{"width": "5", "height": "3", "id": "inFirst"}), svgElement.FindID("inFirst"), ) equals(t, "Find", nil, svgElement.FindID("missing")) }
explode_data.jsonl/74166
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 231 }
[ 2830, 3393, 9885, 915, 1155, 353, 8840, 836, 8, 341, 1903, 7239, 1691, 1669, 1273, 1691, 2822, 197, 7176, 1155, 11, 330, 9885, 497, 609, 15196, 9657, 20139, 515, 197, 21297, 25, 981, 330, 70, 756, 197, 197, 10516, 25, 2415, 14032, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestEC2MetadataRetryFailure(t *testing.T) { mux := http.NewServeMux() mux.HandleFunc("/latest/api/token", func(w http.ResponseWriter, r *http.Request) { if r.Method == "PUT" && r.Header.Get(ttlHeader) != "" { w.Header().Set(ttlHeader, "200") http.Error(w, "service unavailable", http.StatusServiceUnavailable) return } http.Error(w, "bad request", http.StatusBadRequest) }) // meta-data endpoint for this test, just returns the token mux.HandleFunc("/latest/meta-data/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("profile_name")) }) server := httptest.NewServer(mux) defer server.Close() c := ec2metadata.New(unit.Session, &aws.Config{ Endpoint: aws.String(server.URL), }) c.Handlers.AfterRetry.PushBack(func(i *request.Request) { t.Logf("%v received, retrying operation %v", i.HTTPResponse.StatusCode, i.Operation.Name) }) c.Handlers.Complete.PushBack(func(i *request.Request) { t.Logf("%v operation exited with status %v", i.Operation.Name, i.HTTPResponse.StatusCode) }) resp, err := c.GetMetadata("some/path") if err != nil { t.Fatalf("Expected none, got error %v", err) } if resp != "profile_name" { t.Fatalf("Expected response to be profile_name, got %v", resp) } resp, err = c.GetMetadata("some/path") if err != nil { t.Fatalf("Expected none, got error %v", err) } if resp != "profile_name" { t.Fatalf("Expected response to be profile_name, got %v", resp) } }
explode_data.jsonl/17844
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 563 }
[ 2830, 3393, 7498, 17, 14610, 51560, 17507, 1155, 353, 8840, 836, 8, 341, 2109, 2200, 1669, 1758, 7121, 60421, 44, 2200, 2822, 2109, 2200, 63623, 4283, 19350, 10508, 54386, 497, 2915, 3622, 1758, 37508, 11, 435, 353, 1254, 9659, 8, 341, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestVolumeRemoveErrors(t *testing.T) { testCases := []struct { args []string volumeRemoveFunc func(volumeID string, force bool) error expectedError string }{ { expectedError: "requires at least 1 argument", }, { args: []string{"nodeID"}, volumeRemoveFunc: func(volumeID string, force bool) error { return errors.Errorf("error removing the volume") }, expectedError: "error removing the volume", }, } for _, tc := range testCases { cmd := newRemoveCommand( test.NewFakeCli(&fakeClient{ volumeRemoveFunc: tc.volumeRemoveFunc, })) cmd.SetArgs(tc.args) cmd.SetOutput(ioutil.Discard) assert.ErrorContains(t, cmd.Execute(), tc.expectedError) } }
explode_data.jsonl/16375
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 280 }
[ 2830, 3393, 18902, 13021, 13877, 1155, 353, 8840, 836, 8, 341, 18185, 37302, 1669, 3056, 1235, 341, 197, 31215, 1797, 3056, 917, 198, 197, 5195, 4661, 13021, 9626, 2915, 74706, 915, 914, 11, 5344, 1807, 8, 1465, 198, 197, 42400, 1454, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetBlock(t *testing.T) { bc := newTestChain(t) blocks, err := bc.genBlocks(100) require.NoError(t, err) // Test unpersisted and persisted access for j := 0; j < 2; j++ { for i := 0; i < len(blocks); i++ { block, err := bc.GetBlock(blocks[i].Hash()) require.NoErrorf(t, err, "can't get block %d: %s, attempt %d", i, err, j) assert.Equal(t, blocks[i].Index, block.Index) assert.Equal(t, blocks[i].Hash(), block.Hash()) } assert.NoError(t, bc.persist()) } }
explode_data.jsonl/74540
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 207 }
[ 2830, 3393, 1949, 4713, 1155, 353, 8840, 836, 8, 341, 2233, 66, 1669, 501, 2271, 18837, 1155, 340, 2233, 34936, 11, 1848, 1669, 17916, 22822, 29804, 7, 16, 15, 15, 340, 17957, 35699, 1155, 11, 1848, 692, 197, 322, 3393, 21624, 4975, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestStackAvailableCongestionControl(t *testing.T) { c := context.New(t, 1500) defer c.Cleanup() s := c.Stack() // Query permitted congestion control algorithms. var aCC tcpip.AvailableCongestionControlOption if err := s.TransportProtocolOption(tcp.ProtocolNumber, &aCC); err != nil { t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &aCC, err) } if got, want := aCC, tcpip.AvailableCongestionControlOption("reno cubic"); got != want { t.Fatalf("got tcpip.AvailableCongestionControlOption: %v, want: %v", got, want) } }
explode_data.jsonl/22330
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 201 }
[ 2830, 3393, 4336, 16485, 30421, 42498, 3273, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 2266, 7121, 1155, 11, 220, 16, 20, 15, 15, 340, 16867, 272, 727, 60639, 2822, 1903, 1669, 272, 58646, 2822, 197, 322, 11361, 15129, 54046, 2524, 25...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func Test_CreateDummyTemplateEngine_CheckReturn(t *testing.T) { api := CreateDummyTemplateEngine() switch tp := api.(type) { case *dummy.Engine: return default: t.Errorf("Invalid returned type: %T", tp) } }
explode_data.jsonl/23673
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 80 }
[ 2830, 3393, 34325, 43344, 7275, 4571, 28188, 5598, 1155, 353, 8840, 836, 8, 341, 54299, 1669, 4230, 43344, 7275, 4571, 2822, 8961, 18101, 1669, 6330, 12832, 1313, 8, 341, 2722, 353, 31390, 54424, 510, 197, 853, 198, 11940, 510, 197, 324...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestBlockPullerHeavyBlocks(t *testing.T) { //场景:单订货节点, //拉块器每拉50块 //重1K,但缓冲器只能容纳 //10公里,所以应该把50个街区分成10个街区, //一次验证5个序列。 osn := newClusterNode(t) defer osn.stop() osn.addExpectProbeAssert() osn.addExpectPullAssert(1) osn.enqueueResponse(100) //最后一个序列是100 enqueueBlockBatch := func(start, end uint64) { for seq := start; seq <= end; seq++ { resp := &orderer.DeliverResponse{ Type: &orderer.DeliverResponse_Block{ Block: common.NewBlock(seq, nil), }, } data := resp.GetBlock().Data.Data resp.GetBlock().Data.Data = append(data, make([]byte, 1024)) osn.blockResponses <- resp } } dialer := newCountingDialer() bp := newBlockPuller(dialer, osn.srv.Address()) var gotBlockMessageCount int bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error { if strings.Contains(entry.Message, "Got block") { gotBlockMessageCount++ } return nil })) bp.MaxTotalBufferBytes = 1024 * 10 //10K //仅将下一批排入医嘱者节点。 //这样可以确保只有10个块被提取到缓冲区中。 //而不是更多。 for i := uint64(0); i < 5; i++ { enqueueBlockBatch(i*10+uint64(1), i*10+uint64(10)) for seq := i*10 + uint64(1); seq <= i*10+uint64(10); seq++ { assert.Equal(t, seq, bp.PullBlock(seq).Header.Number) } } assert.Equal(t, 50, gotBlockMessageCount) bp.Close() dialer.assertAllConnectionsClosed(t) }
explode_data.jsonl/47270
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 727 }
[ 2830, 3393, 4713, 36068, 261, 64469, 29804, 1155, 353, 8840, 836, 8, 341, 322, 102122, 5122, 23990, 99975, 81668, 92374, 41453, 322, 72225, 99922, 31548, 73157, 72225, 20, 15, 99922, 198, 322, 29258, 16, 42, 3837, 77288, 118098, 31548, 10...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSignBytes(t *testing.T) { account := GenerateAccount() message := make([]byte, 15) rand.Read(message) signature, err := SignBytes(account.PrivateKey, message) require.NoError(t, err) require.True(t, VerifyBytes(account.PublicKey, message, signature)) if message[0] == 255 { message[0] = 0 } else { message[0] = message[0] + 1 } require.False(t, VerifyBytes(account.PublicKey, message, signature)) }
explode_data.jsonl/2157
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 150 }
[ 2830, 3393, 7264, 7078, 1155, 353, 8840, 836, 8, 341, 86866, 1669, 19813, 7365, 741, 24753, 1669, 1281, 10556, 3782, 11, 220, 16, 20, 340, 7000, 437, 6503, 7333, 340, 69054, 1568, 11, 1848, 1669, 7075, 7078, 23758, 87738, 1592, 11, 19...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestHas(t *testing.T) { t.Run("exists", func(t *testing.T) { mockStore := new(store.MockStore) mockStore.On("Has", "test key").Return(true) testServer := server.NewServer(mockStore, newLogger()) response, err := testServer.Has(context.Background(), &api.HasRequest{ Key: "test key", }) require.NotNil(t, response) require.True(t, response.Exists) require.Nil(t, err) }) t.Run("does not exist", func(t *testing.T) { mockStore := new(store.MockStore) mockStore.On("Has", "test key").Return(false) testServer := server.NewServer(mockStore, newLogger()) response, err := testServer.Has(context.Background(), &api.HasRequest{ Key: "test key", }) require.NotNil(t, response) require.False(t, response.Exists) require.Nil(t, err) }) }
explode_data.jsonl/28526
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 312 }
[ 2830, 3393, 10281, 1155, 353, 8840, 836, 8, 341, 3244, 16708, 445, 16304, 497, 2915, 1155, 353, 8840, 836, 8, 341, 197, 77333, 6093, 1669, 501, 31200, 24664, 6093, 340, 197, 77333, 6093, 8071, 445, 10281, 497, 330, 1944, 1376, 1827, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestInsertProject(t *testing.T) { db, cache, end := test.SetupPG(t, bootstrap.InitiliazeDB) defer end() project.Delete(db, cache, "key") u, _ := assets.InsertAdminUser(db) proj := sdk.Project{ Name: "test proj", Key: "key", } assert.NoError(t, project.Insert(db, cache, &proj, u)) }
explode_data.jsonl/76250
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 125 }
[ 2830, 3393, 13780, 7849, 1155, 353, 8840, 836, 8, 341, 20939, 11, 6500, 11, 835, 1669, 1273, 39820, 11383, 1155, 11, 26925, 26849, 24078, 2986, 3506, 340, 16867, 835, 741, 72470, 18872, 9791, 11, 6500, 11, 330, 792, 5130, 10676, 11, 7...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestAccounts_Export(t *testing.T) { mutableTree, _ := tree.NewMutableTree(0, db.NewMemDB(), 1024) b := bus.NewBus() busCoins, err := coins.NewCoins(b, mutableTree) if err != nil { t.Fatal(err) } b.SetCoins(coins.NewBus(busCoins)) b.SetChecker(checker.NewChecker(b)) accounts, err := NewAccounts(b, mutableTree) if err != nil { t.Fatal(err) } accounts.SetBalance([20]byte{4}, 0, big.NewInt(1000)) coinsState, err := coins.NewCoins(b, mutableTree) if err != nil { t.Fatal(err) } coinsState.Create(1, types.StrToCoinSymbol("AAA"), "AAACOIN", helpers.BipToPip(big.NewInt(10)), 10, helpers.BipToPip(big.NewInt(10000)), big.NewInt(0).Exp(big.NewInt(10), big.NewInt(10+18), nil), nil) err = coinsState.Commit() if err != nil { t.Fatal(err) } symbol := coinsState.GetCoinBySymbol(types.StrToCoinSymbol("AAA"), 0) if symbol == nil { t.Fatal("coin not found") } accounts.SetBalance([20]byte{4}, symbol.ID(), big.NewInt(1001)) _ = accounts.CreateMultisig([]uint32{1, 1, 2}, []types.Address{[20]byte{1}, [20]byte{2}, [20]byte{3}}, 2, [20]byte{4}) err = accounts.Commit() if err != nil { t.Fatal(err) } state := new(types.AppState) accounts.Export(state) bytes, err := json.Marshal(state.Accounts) if err != nil { t.Fatal(err) } if string(bytes) != "[{\"address\":\"Mx0400000000000000000000000000000000000000\",\"balance\":[{\"coin\":1,\"value\":\"1001\"},{\"coin\":0,\"value\":\"1000\"}],\"nonce\":0,\"multisig_data\":{\"weights\":[1,1,2],\"threshold\":2,\"addresses\":[\"Mx0100000000000000000000000000000000000000\",\"Mx0200000000000000000000000000000000000000\",\"Mx0300000000000000000000000000000000000000\"]}}]" { t.Fatal("not equal JSON") } }
explode_data.jsonl/23605
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 707 }
[ 2830, 3393, 41369, 62, 16894, 1155, 353, 8840, 836, 8, 341, 2109, 5922, 6533, 11, 716, 1669, 4916, 7121, 11217, 6533, 7, 15, 11, 2927, 7121, 18816, 3506, 1507, 220, 16, 15, 17, 19, 340, 2233, 1669, 5828, 7121, 15073, 741, 92530, 696...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9