text
stringlengths
93
16.4k
id
stringlengths
20
40
metadata
dict
input_ids
listlengths
45
2.05k
attention_mask
listlengths
45
2.05k
complexity
int64
1
9
func TestRPMRelease(t *testing.T) { accept(t, acceptParms{ Name: "release_rpm", Conf: "release.rpm.yaml", Format: "rpm", Dockerfile: "release.rpm.dockerfile", }) }
explode_data.jsonl/15463
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 90 }
[ 2830, 3393, 49, 8795, 16077, 1155, 353, 8840, 836, 8, 341, 197, 10330, 1155, 11, 4193, 85440, 515, 197, 21297, 25, 981, 330, 22998, 1710, 5187, 756, 197, 197, 15578, 25, 981, 330, 22998, 97702, 33406, 756, 197, 197, 4061, 25, 257, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestReset(t *testing.T) { FakeNow := time.Now() var testcases = []struct { name string resources []common.Resource rtype string state string dest string expire time.Duration hasContent bool }{ { name: "empty - has no owner", resources: []common.Resource{ { Name: "res", Type: "t", State: "s", Owner: "", LastUpdate: FakeNow.Add(-time.Minute * 20), }, }, rtype: "t", state: "s", expire: time.Minute * 10, dest: "d", }, { name: "empty - not expire", resources: []common.Resource{ { Name: "res", Type: "t", State: "s", Owner: "", LastUpdate: FakeNow, }, }, rtype: "t", state: "s", expire: time.Minute * 10, dest: "d", }, { name: "empty - no match type", resources: []common.Resource{ { Name: "res", Type: "wrong", State: "s", Owner: "", LastUpdate: FakeNow.Add(-time.Minute * 20), }, }, rtype: "t", state: "s", expire: time.Minute * 10, dest: "d", }, { name: "empty - no match state", resources: []common.Resource{ { Name: "res", Type: "t", State: "wrong", Owner: "", LastUpdate: FakeNow.Add(-time.Minute * 20), }, }, rtype: "t", state: "s", expire: time.Minute * 10, dest: "d", }, { name: "ok", resources: []common.Resource{ { Name: "res", Type: "t", State: "s", Owner: "user", LastUpdate: FakeNow.Add(-time.Minute * 20), }, }, rtype: "t", state: "s", expire: time.Minute * 10, dest: "d", hasContent: true, }, } for _, tc := range testcases { c := MakeTestRanch(tc.resources) rmap := c.Reset(tc.rtype, tc.state, tc.expire, tc.dest) if !tc.hasContent { if len(rmap) != 0 { t.Errorf("%s - Expect empty map. Got %v", tc.name, rmap) } } else { if owner, ok := rmap["res"]; !ok || owner != "user" { t.Errorf("%s - Expect res - user. Got %v", tc.name, rmap) } if !c.Resources[0].LastUpdate.After(FakeNow) { t.Errorf("%s - LastUpdate did not update.", tc.name) } } } }
explode_data.jsonl/12948
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1255 }
[ 2830, 3393, 14828, 1155, 353, 8840, 836, 8, 341, 12727, 726, 7039, 1669, 882, 13244, 2822, 2405, 1273, 23910, 284, 3056, 1235, 341, 197, 11609, 981, 914, 198, 197, 10202, 2360, 220, 3056, 5464, 20766, 198, 197, 7000, 1313, 414, 914, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestValidateSecurityContext(t *testing.T) { runAsUser := int64(1) fullValidSC := func() *core.SecurityContext { return &core.SecurityContext{ Privileged: boolPtr(false), Capabilities: &core.Capabilities{ Add: []core.Capability{"foo"}, Drop: []core.Capability{"bar"}, }, SELinuxOptions: &core.SELinuxOptions{ User: "user", Role: "role", Type: "type", Level: "level", }, RunAsUser: &runAsUser, } } //setup data allSettings := fullValidSC() noCaps := fullValidSC() noCaps.Capabilities = nil noSELinux := fullValidSC() noSELinux.SELinuxOptions = nil noPrivRequest := fullValidSC() noPrivRequest.Privileged = nil noRunAsUser := fullValidSC() noRunAsUser.RunAsUser = nil successCases := map[string]struct { sc *core.SecurityContext }{ "all settings": {allSettings}, "no capabilities": {noCaps}, "no selinux": {noSELinux}, "no priv request": {noPrivRequest}, "no run as user": {noRunAsUser}, } for k, v := range successCases { if errs := ValidateSecurityContext(v.sc, field.NewPath("field")); len(errs) != 0 { t.Errorf("[%s] Expected success, got %v", k, errs) } } privRequestWithGlobalDeny := fullValidSC() privRequestWithGlobalDeny.Privileged = boolPtr(true) negativeRunAsUser := fullValidSC() negativeUser := int64(-1) negativeRunAsUser.RunAsUser = &negativeUser privWithoutEscalation := fullValidSC() privWithoutEscalation.Privileged = boolPtr(true) privWithoutEscalation.AllowPrivilegeEscalation = boolPtr(false) capSysAdminWithoutEscalation := fullValidSC() capSysAdminWithoutEscalation.Capabilities.Add = []core.Capability{"CAP_SYS_ADMIN"} capSysAdminWithoutEscalation.AllowPrivilegeEscalation = boolPtr(false) errorCases := map[string]struct { sc *core.SecurityContext errorType field.ErrorType errorDetail string capAllowPriv bool }{ "request privileged when capabilities forbids": { sc: privRequestWithGlobalDeny, errorType: "FieldValueForbidden", errorDetail: "disallowed by cluster policy", }, "negative RunAsUser": { sc: negativeRunAsUser, errorType: "FieldValueInvalid", errorDetail: "must be between", }, "with CAP_SYS_ADMIN and allowPrivilegeEscalation false": { sc: capSysAdminWithoutEscalation, errorType: "FieldValueInvalid", errorDetail: "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN", }, "with privileged and allowPrivilegeEscalation false": { sc: privWithoutEscalation, errorType: "FieldValueInvalid", errorDetail: "cannot set `allowPrivilegeEscalation` to false and `privileged` to true", capAllowPriv: true, }, } for k, v := range errorCases { capabilities.SetForTests(capabilities.Capabilities{ AllowPrivileged: v.capAllowPriv, }) if errs := ValidateSecurityContext(v.sc, field.NewPath("field")); len(errs) == 0 || errs[0].Type != v.errorType || !strings.Contains(errs[0].Detail, v.errorDetail) { t.Errorf("[%s] Expected error type %q with detail %q, got %v", k, v.errorType, v.errorDetail, errs) } } }
explode_data.jsonl/1057
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1213 }
[ 2830, 3393, 17926, 15352, 1972, 1155, 353, 8840, 836, 8, 341, 56742, 2121, 1474, 1669, 526, 21, 19, 7, 16, 340, 94042, 4088, 3540, 1669, 2915, 368, 353, 2153, 21567, 1972, 341, 197, 853, 609, 2153, 21567, 1972, 515, 298, 197, 32124, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSignVerifyData(t *testing.T) { privateKey, err := LoadPrivateKeyFromFile(privateKeyPEMFile, password) require.NoError(t, err) publicKey, err := LoadPublicKeyFromFile(publicKeyPEMFile) require.NoError(t, err) testData, err := ioutil.ReadFile(testDataFile) require.NoError(t, err) signature := ed25519.Sign(privateKey, testData) verified := ed25519.Verify(publicKey, testData, signature) require.Equal(t, true, verified) }
explode_data.jsonl/58514
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 156 }
[ 2830, 3393, 7264, 32627, 1043, 1155, 353, 8840, 836, 8, 341, 2455, 1592, 11, 1848, 1669, 8893, 75981, 43633, 17550, 1592, 1740, 44, 1703, 11, 3552, 340, 17957, 35699, 1155, 11, 1848, 692, 1219, 1592, 11, 1848, 1669, 8893, 61822, 43633, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestReversiAnz21(t *testing.T) { r := NewReversiAnz() r.SetOwnMin(1) if r.GetOwnMin() != 1 { t.Errorf("NG") } }
explode_data.jsonl/23044
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 64 }
[ 2830, 3393, 693, 3004, 72, 2082, 89, 17, 16, 1155, 353, 8840, 836, 8, 341, 7000, 1669, 1532, 693, 3004, 72, 2082, 89, 741, 7000, 4202, 14182, 6217, 7, 16, 340, 743, 435, 2234, 14182, 6217, 368, 961, 220, 16, 341, 197, 3244, 13080,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestFieldIntValue(t *testing.T) { f := newField("Name", "N", 8, 0) f.Offset = 3 recordBuf := []byte(" -2020 ") v := f.intValue(recordBuf) require.Equal(t, int64(-2020), v) }
explode_data.jsonl/79452
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 88 }
[ 2830, 3393, 1877, 1072, 1130, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 501, 1877, 445, 675, 497, 330, 45, 497, 220, 23, 11, 220, 15, 340, 1166, 61958, 284, 220, 18, 198, 71952, 15064, 1669, 3056, 3782, 445, 414, 481, 17, 15, 17, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestContainerImages_configuration(t *testing.T) { for _, tt := range imageTests { t.Run(tt.name, func(rt *testing.T) { if tt.pre != nil { tt.pre(rt) } a := makeTestArgoCD(tt.opts...) image := tt.imageFunc(a) if image != tt.want { rt.Errorf("got %q, want %q", image, tt.want) } }) } }
explode_data.jsonl/11931
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 159 }
[ 2830, 3393, 4502, 14228, 35726, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 2168, 18200, 341, 197, 3244, 16708, 47152, 2644, 11, 2915, 56154, 353, 8840, 836, 8, 341, 298, 743, 17853, 6225, 961, 2092, 341, 571, 3244, 8...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMsgParseAcceptNoSubprotoConf(t *testing.T) { srvConf := message.ServerConfiguration{ MajorProtocolVersion: 22, MinorProtocolVersion: 33, ReadTimeout: 11 * time.Second, MessageBufferSize: 8192, SubProtocolName: []byte(nil), } // Compose encoded message buf, err := message.NewAcceptConfMessage(srvConf) require.NoError(t, err) require.True(t, len(buf) > 0) // Parse actual := tryParseNoErr(t, buf) // Compare require.NotNil(t, actual.MsgBuffer) require.Equal(t, message.MsgAcceptConf, actual.MsgType) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, actual.MsgIdentifierBytes) require.Equal(t, [8]byte{}, actual.MsgIdentifier) require.Nil(t, actual.MsgName) require.Equal(t, pld.Payload{}, actual.MsgPayload) require.Equal(t, srvConf, actual.ServerConfiguration) }
explode_data.jsonl/9309
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 326 }
[ 2830, 3393, 6611, 14463, 16646, 2753, 3136, 15110, 15578, 1155, 353, 8840, 836, 8, 341, 1903, 10553, 15578, 1669, 1943, 22997, 7688, 515, 197, 9209, 3035, 20689, 5637, 25, 220, 17, 17, 345, 197, 197, 57024, 20689, 5637, 25, 220, 18, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMessageDrafts(t *testing.T) { useRemoteMock = false defer func() { useRemoteMock = true }() ctc := makeChatTestContext(t, "TestMessageDrafts", 1) defer ctc.cleanup() user := ctc.users()[0] conv := mustCreateConversationForTest(t, ctc, user, chat1.TopicType_CHAT, chat1.ConversationMembersType_IMPTEAMNATIVE) draft := "NEW MESSAAGE" require.NoError(t, ctc.as(t, user).chatLocalHandler().UpdateUnsentText(context.TODO(), chat1.UpdateUnsentTextArg{ ConversationID: conv.Id, TlfName: conv.TlfName, Text: draft, })) ibres, err := ctc.as(t, user).chatLocalHandler().GetInboxAndUnboxLocal(context.TODO(), chat1.GetInboxAndUnboxLocalArg{ Query: &chat1.GetInboxLocalQuery{ ConvIDs: []chat1.ConversationID{conv.Id}, }, }) require.NoError(t, err) require.Equal(t, 1, len(ibres.Conversations)) require.NotNil(t, ibres.Conversations[0].Info.Draft) require.Equal(t, draft, *ibres.Conversations[0].Info.Draft) _, err = ctc.as(t, user).chatLocalHandler().PostLocalNonblock(context.TODO(), chat1.PostLocalNonblockArg{ ConversationID: conv.Id, Msg: chat1.MessagePlaintext{ ClientHeader: chat1.MessageClientHeader{ TlfName: conv.TlfName, MessageType: chat1.MessageType_TEXT, }, MessageBody: chat1.NewMessageBodyWithText(chat1.MessageText{ Body: "HIHIHI", }), }, }) require.NoError(t, err) worked := false for i := 0; i < 5; i++ { ibres, err = ctc.as(t, user).chatLocalHandler().GetInboxAndUnboxLocal(context.TODO(), chat1.GetInboxAndUnboxLocalArg{ Query: &chat1.GetInboxLocalQuery{ ConvIDs: []chat1.ConversationID{conv.Id}, }, }) require.NoError(t, err) require.Equal(t, 1, len(ibres.Conversations)) if ibres.Conversations[0].Info.Draft == nil { worked = true break } time.Sleep(time.Second) } require.True(t, worked) }
explode_data.jsonl/63728
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 803 }
[ 2830, 3393, 2052, 50086, 82, 1155, 353, 8840, 836, 8, 341, 41819, 24703, 11571, 284, 895, 198, 16867, 2915, 368, 314, 990, 24703, 11571, 284, 830, 50746, 89216, 66, 1669, 1281, 15672, 2271, 1972, 1155, 11, 330, 2271, 2052, 50086, 82, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestCommandStyle(t *testing.T) { gopClTest(t, ` println [] println {} `, `package main import fmt "fmt" func main() { fmt.Println([]interface { }{}) fmt.Println(map[string]interface { }{}) } `) }
explode_data.jsonl/73708
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 87 }
[ 2830, 3393, 4062, 2323, 1155, 353, 8840, 836, 8, 341, 3174, 453, 5066, 2271, 1155, 11, 22074, 33655, 4167, 33655, 5613, 7808, 1565, 1722, 1887, 271, 474, 8879, 330, 12501, 1837, 2830, 1887, 368, 341, 11009, 12419, 10556, 4970, 341, 197,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func Test_Load_Fuzz(t *testing.T) { // Making sure the function never panics for i := 0; i < 50000; i++ { f := fuzz.New() // Prepare arguments var ( raw []byte ) f.Fuzz(&raw) // Execute Load(bytes.NewReader(raw)) } }
explode_data.jsonl/29853
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 104 }
[ 2830, 3393, 19553, 1400, 8889, 1155, 353, 8840, 836, 8, 341, 197, 322, 24288, 2704, 279, 729, 2581, 7215, 1211, 198, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 20, 15, 15, 15, 15, 26, 600, 1027, 341, 197, 1166, 1669, 76142, 7121...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDefaultTimeouts(t *testing.T) { customBackend := getCustomBackend() customBackend.KeyValueMap["client.peer.timeout.connection"] = "" customBackend.KeyValueMap["client.peer.timeout.response"] = "" customBackend.KeyValueMap["client.peer.timeout.discovery.greylistExpiry"] = "" customBackend.KeyValueMap["client.eventService.timeout.connection"] = "" customBackend.KeyValueMap["client.eventService.timeout.registrationResponse"] = "" customBackend.KeyValueMap["client.orderer.timeout.connection"] = "" customBackend.KeyValueMap["client.orderer.timeout.response"] = "" customBackend.KeyValueMap["client.global.timeout.query"] = "" customBackend.KeyValueMap["client.global.timeout.execute"] = "" customBackend.KeyValueMap["client.global.timeout.resmgmt"] = "" customBackend.KeyValueMap["client.global.cache.connectionIdle"] = "" customBackend.KeyValueMap["client.global.cache.eventServiceIdle"] = "" customBackend.KeyValueMap["client.global.cache.channelConfig"] = "" customBackend.KeyValueMap["client.global.cache.channelMembership"] = "" customBackend.KeyValueMap["client.global.cache.discovery"] = "" customBackend.KeyValueMap["client.global.cache.selection"] = "" endpointConfig, err := ConfigFromBackend(customBackend) if err != nil { t.Fatal("Failed to get endpoint config from backend") } errStr := "%s default timeout not read correctly. Got: %s" t1 := endpointConfig.Timeout(fab.PeerConnection) if t1 != defaultPeerConnectionTimeout { t.Fatalf(errStr, "PeerConnection", t1) } t1 = endpointConfig.Timeout(fab.PeerResponse) if t1 != defaultPeerResponseTimeout { t.Fatalf(errStr, "PeerResponse", t1) } t1 = endpointConfig.Timeout(fab.DiscoveryGreylistExpiry) if t1 != defaultDiscoveryGreylistExpiryTimeout { t.Fatalf(errStr, "DiscoveryGreylistExpiry", t1) } t1 = endpointConfig.Timeout(fab.EventReg) if t1 != defaultEventRegTimeout { t.Fatalf(errStr, "EventReg", t1) } t1 = endpointConfig.Timeout(fab.OrdererConnection) if t1 != defaultOrdererConnectionTimeout { t.Fatalf(errStr, "OrdererConnection", t1) } t1 = endpointConfig.Timeout(fab.DiscoveryServiceRefresh) if t1 != defaultDiscoveryRefreshInterval { t.Fatalf(errStr, "DiscoveryRefreshInterval", t1) } t1 = endpointConfig.Timeout(fab.SelectionServiceRefresh) if t1 != defaultSelectionRefreshInterval { t.Fatalf(errStr, "SelectionRefreshInterval", t1) } checkDefaultTimeout(endpointConfig, t, errStr) }
explode_data.jsonl/34071
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 824 }
[ 2830, 3393, 3675, 7636, 82, 1155, 353, 8840, 836, 8, 341, 1444, 1450, 29699, 1669, 633, 10268, 29699, 741, 1444, 1450, 29699, 9610, 1130, 2227, 1183, 2972, 72864, 36110, 20310, 1341, 284, 8389, 1444, 1450, 29699, 9610, 1130, 2227, 1183, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestBytes_ToComplex64(t *testing.T) { tests := []struct { name string e Bytes want Complex64 }{ {name: "", e: Bytes{[]byte("123")}, want: Complex64{123}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.e.ToComplex64(); !got.Equal(tt.want) { t.Errorf("Bytes.ToComplex64() = %v, want %v", got, tt.want) } }) } }
explode_data.jsonl/34768
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 181 }
[ 2830, 3393, 7078, 38346, 31137, 21, 19, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 11609, 914, 198, 197, 7727, 262, 30024, 198, 197, 50780, 22096, 21, 19, 198, 197, 59403, 197, 197, 47006, 25, 7342, 384, 25, 30...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestUnregisterModel(t *testing.T) { data := []*DeptInfo{ { DeptName: "A", EmployeeName: "A1", Salary: 1000, }, { DeptName: "A", EmployeeName: "A2", Salary: 2000, }, { DeptName: "B", EmployeeName: "B1", Salary: 2000, }, { DeptName: "B", EmployeeName: "B2", Salary: 4000, }, { DeptName: "B", EmployeeName: "B3", Salary: 3000, }, } qs := dORM.QueryTable("dept_info") i, _ := qs.PrepareInsert() for _, d := range data { _, err := i.Insert(d) if err != nil { throwFail(t, err) } } f := func() { var res []UnregisterModel n, err := dORM.QueryTable("dept_info").All(&res) throwFail(t, err) throwFail(t, AssertIs(n, 5)) throwFail(t, AssertIs(res[0].EmployeeName, "A1")) type Sum struct { DeptName string Total int } var sun []Sum qs.Aggregate("dept_name,sum(salary) as total").GroupBy("dept_name").OrderBy("dept_name").All(&sun) throwFail(t, AssertIs(sun[0].DeptName, "A")) throwFail(t, AssertIs(sun[0].Total, 3000)) type Max struct { DeptName string Max float64 } var max []Max qs.Aggregate("dept_name,max(salary) as max").GroupBy("dept_name").OrderBy("dept_name").All(&max) throwFail(t, AssertIs(max[1].DeptName, "B")) throwFail(t, AssertIs(max[1].Max, 4000)) } for i := 0; i < 5; i++ { f() } }
explode_data.jsonl/18121
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 711 }
[ 2830, 3393, 1806, 6343, 1712, 1155, 353, 8840, 836, 8, 341, 8924, 1669, 29838, 67366, 1731, 515, 197, 197, 515, 298, 197, 67366, 675, 25, 257, 330, 32, 756, 298, 197, 16984, 675, 25, 330, 32, 16, 756, 298, 7568, 39459, 25, 981, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSymlink(t *testing.T) { for _, format := range formats { format := format t.Run(fmt.Sprintf("symlink-%s", format), func(t *testing.T) { accept(t, acceptParms{ Name: fmt.Sprintf("symlink_%s", format), Conf: "symlink.yaml", Format: format, Dockerfile: fmt.Sprintf("%s.symlink.dockerfile", format), }) }) } }
explode_data.jsonl/15467
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 173 }
[ 2830, 3393, 34667, 44243, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 3561, 1669, 2088, 19856, 341, 197, 59416, 1669, 3561, 198, 197, 3244, 16708, 28197, 17305, 445, 22860, 44243, 11069, 82, 497, 3561, 701, 2915, 1155, 353, 8840, 836, 8, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGitHubCtrollerTestSuite(t *testing.T) { id := test.GetId() test.Setup(t, id) suite.Run(t, new(GitHubControllerTestSuite)) test.Teardown(t, id) }
explode_data.jsonl/55347
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 70 }
[ 2830, 3393, 75615, 34, 83, 1468, 2271, 28000, 1155, 353, 8840, 836, 8, 341, 15710, 1669, 1273, 2234, 764, 741, 18185, 39820, 1155, 11, 877, 340, 96572, 16708, 1155, 11, 501, 6699, 275, 19316, 2051, 2271, 28000, 1171, 18185, 94849, 37496...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestUserService_Save(t *testing.T) { //prepare db test db, _ := database.PrepareTestDB("txdb") defer database.CleanTestDB(db) // create an instance of our test object hashPassword, _ := utils.HashPassword("password") mockUser := user.Dto{ Name: "Ahmad", Email: "ahmad@email.com", Password: hashPassword, } mockUserFailed := user.Dto{ Name: "Ahmad", Email: "ipan@email.com", Password: hashPassword, } u := NewUserService(db) // setup expectations s := t.Run("success", func(t *testing.T) { // success scenario save data, err := u.Save(mockUser) assert.NoError(t, err) assert.NotEmpty(t, data) }) f := t.Run("error-failed", func(t *testing.T) { // failed scenario save (duplicate) _, err := u.Save(mockUserFailed) assert.Error(t, err) }) assert.Equal(t, true, s, "Success scenario failed run") assert.Equal(t, true, f, "Failed scenario failed run") }
explode_data.jsonl/37410
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 366 }
[ 2830, 3393, 60004, 78746, 1155, 353, 8840, 836, 8, 341, 197, 322, 13609, 2927, 1273, 198, 20939, 11, 716, 1669, 4625, 28770, 3380, 2271, 3506, 445, 3998, 1999, 1138, 16867, 4625, 727, 2675, 2271, 3506, 9791, 692, 197, 322, 1855, 458, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParseUnsupportedListLine(t *testing.T) { for _, lt := range listTestsFail { _, err := parseListLine(lt.line, now, time.UTC) if err == nil { t.Errorf("parseListLine(%v) expected to fail", lt.line) } if err != lt.err { t.Errorf("parseListLine(%v) expected to fail with error: '%s'; was: '%s'", lt.line, lt.err.Error(), err.Error()) } } }
explode_data.jsonl/54611
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 157 }
[ 2830, 3393, 14463, 41884, 852, 2460, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 25175, 1669, 2088, 1140, 18200, 19524, 341, 197, 197, 6878, 1848, 1669, 4715, 852, 2460, 2333, 83, 10932, 11, 1431, 11, 882, 87069, 340, 197, 743, 1848, 62...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestResizeDisk(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() diskSizeGB := int32(2) diskName := "disk1" fakeGetDiskFailed := "fakeGetDiskFailed" fakeCreateDiskFailed := "fakeCreateDiskFailed" testCases := []struct { desc string diskName string oldSize resource.Quantity newSize resource.Quantity existedDisk compute.Disk expectedQuantity resource.Quantity expectedErr bool expectedErrMsg error }{ { desc: "new quantity and no error shall be returned if everything is good", diskName: diskName, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), existedDisk: compute.Disk{Name: to.StringPtr("disk1"), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, expectedQuantity: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), expectedErr: false, }, { desc: "new quantity and no error shall be returned if everything is good with DiskProperties is null", diskName: diskName, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), existedDisk: compute.Disk{Name: to.StringPtr("disk1")}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, expectedErrMsg: fmt.Errorf("DiskProperties of disk(%s) is nil", diskName), }, { desc: "new quantity and no error shall be returned if everything is good with disk already of greater or equal size than requested", diskName: diskName, oldSize: *resource.NewQuantity(1*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), existedDisk: compute.Disk{Name: to.StringPtr("disk1"), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: false, }, { desc: "an error shall be returned if everything is good but get disk failed", diskName: fakeGetDiskFailed, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), existedDisk: compute.Disk{Name: to.StringPtr(fakeGetDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Get Disk failed")), }, { desc: "an error shall be returned if everything is good but create disk failed", diskName: fakeCreateDiskFailed, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), existedDisk: compute.Disk{Name: to.StringPtr(fakeCreateDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Create Disk failed")), }, { desc: "an error shall be returned if disk is not in Unattached state", diskName: fakeCreateDiskFailed, oldSize: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), newSize: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI), existedDisk: compute.Disk{Name: to.StringPtr(fakeCreateDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Attached}}, expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI), expectedErr: true, expectedErrMsg: fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: Attached, already attached to "), }, } for i, test := range testCases { testCloud := GetTestCloud(ctrl) managedDiskController := testCloud.ManagedDiskController diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name) mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface) if test.diskName == fakeGetDiskFailed { mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes() } else { mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes() } if test.diskName == fakeCreateDiskFailed { mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(&retry.Error{RawError: fmt.Errorf("Create Disk failed")}).AnyTimes() } else { mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes() } result, err := managedDiskController.ResizeDisk(diskURI, test.oldSize, test.newSize) assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err) assert.Equal(t, test.expectedQuantity.Value(), result.Value(), "TestCase[%d]: %s, expected Quantity: %v, return Quantity: %v", i, test.desc, test.expectedQuantity, result) } }
explode_data.jsonl/36148
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2370 }
[ 2830, 3393, 30561, 47583, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 16867, 23743, 991, 18176, 2822, 2698, 3187, 1695, 5381, 1669, 526, 18, 17, 7, 17, 340, 2698, 3187, 675, 1669, 330, 30496, 16, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) { loadBalancer := NewLoadBalancerRR() serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"} endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false) if err == nil || len(endpoint) != 0 { t.Errorf("Didn't fail with non-existent service") } endpoints := make([]api.Endpoints, 1) endpoints[0] = api.Endpoints{ ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: []api.EndpointSubset{ { Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}}, }, { Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}}, }, { Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}}, }, }, } loadBalancer.OnEndpointsUpdate(endpoints) shuffledEndpoints := loadBalancer.services[serviceP].endpoints if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") { t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) } expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil) expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) shuffledEndpoints = loadBalancer.services[serviceQ].endpoints if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") { t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) } expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil) expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) // Then update the configuration with one fewer endpoints, make sure // we start in the beginning again endpoints[0] = api.Endpoints{ ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: []api.EndpointSubset{ { Addresses: []api.EndpointAddress{{IP: "endpoint4"}}, Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}}, }, { Addresses: []api.EndpointAddress{{IP: "endpoint5"}}, Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}}, }, }, } loadBalancer.OnEndpointsUpdate(endpoints) shuffledEndpoints = loadBalancer.services[serviceP].endpoints if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") { t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) } expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) shuffledEndpoints = loadBalancer.services[serviceQ].endpoints if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") { t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) } expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) // Clear endpoints endpoints[0] = api.Endpoints{ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil} loadBalancer.OnEndpointsUpdate(endpoints) endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false) if err == nil || len(endpoint) != 0 { t.Errorf("Didn't fail with non-existent service") } }
explode_data.jsonl/66177
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1553 }
[ 2830, 3393, 5879, 21190, 6776, 16056, 32089, 80786, 3036, 37091, 1155, 353, 8840, 836, 8, 341, 49386, 93825, 1669, 1532, 5879, 93825, 8106, 741, 52934, 47, 1669, 13291, 13860, 7084, 675, 90, 7980, 68552, 675, 25, 4494, 98932, 68552, 675, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
9
func TestMigrationFailure(t *testing.T) { ctx := context.Background() var err error var version string buildPromscaleImageFromRepo(t) db, dbContainer, closer := startDB(t, ctx) defer testhelpers.StopContainer(ctx, dbContainer, false) defer closer.Close() // start promscale & test upgrade-extensions as false func() { connectorImage := "timescale/promscale:latest" databaseName := "postgres" connector, err := testhelpers.StartConnectorWithImage(ctx, dbContainer, connectorImage, *printLogs, []string{}, databaseName) if err != nil { t.Fatal(err) } defer testhelpers.StopContainer(ctx, connector, *printLogs) err = db.QueryRow(ctx, `SELECT extversion FROM pg_extension where extname='timescaledb'`).Scan(&version) if err != nil { t.Fatal(err) } db.Close(ctx) if version != "2.0.0-rc4" { t.Fatal("failed to verify upgrade extension with -upgrade-prerelease-extension false") } t.Logf("successfully tested extension upgrade flow with --upgrade-prereleases-extensions false.") }() // As the timescaleDB installed version is rc4, lets install the 1.7.3 ext version extVersion := "1.7.3" dropAndCreateExt(t, ctx, extVersion) db, err = pgx.Connect(ctx, testhelpers.PgConnectURL("postgres", testhelpers.Superuser)) if err != nil { t.Fatal(err) } err = db.QueryRow(ctx, `SELECT extversion FROM pg_extension where extname='timescaledb'`).Scan(&version) if err != nil { t.Fatal(err) } db.Close(ctx) if version != extVersion { t.Fatal("failed to verify upgrade extension with -upgrade-prerelease-extension false") } // start a new connector and test --upgrade-extensions as true which is by default set in flags // the migration should fail (upgrade path in tsdb isn't available) but promscale should be running. func() { connectorImage := "timescale/promscale:latest" databaseName := "postgres" connector, err := testhelpers.StartConnectorWithImage(ctx, dbContainer, connectorImage, *printLogs, []string{}, databaseName) if err != nil { t.Fatal(err) } defer testhelpers.StopContainer(ctx, connector, *printLogs) var version string db, err = pgx.Connect(ctx, testhelpers.PgConnectURL("postgres", testhelpers.Superuser)) if err != nil { t.Fatal(err) } err = db.QueryRow(ctx, `SELECT extversion FROM pg_extension where extname='timescaledb'`).Scan(&version) if err != nil { t.Fatal(err) } db.Close(ctx) if version != "1.7.3" { t.Fatal("failed to verify timescaleDB extension version") } // Now from the check we are know that migration failed from 1.7.3 to 1.7.4 // as the upgrade script doesn't exist within timescaleDB image. // Now check promscale is still running on migration failure. exitCode, err := connector.Exec(context.Background(), []string{"echo", "hello"}) if exitCode != 0 || err != nil { t.Fatal("promscale failed to run extension migration failure", err) } t.Logf("successfully tested extension upgrade flow with --upgrade-prereleases-extensions true where migration fails and promscale keeps running.") }() }
explode_data.jsonl/79304
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1044 }
[ 2830, 3393, 20168, 17507, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 741, 2405, 1848, 1465, 198, 2405, 2319, 914, 198, 69371, 35186, 12445, 1906, 3830, 25243, 1155, 340, 20939, 11, 2927, 4502, 11, 12128, 1669, 1191, 3506, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // 1. Initialize tendermint and commit 3 blocks with the following app hashes: // - 0x01 // - 0x02 // - 0x03 cfg, err := ResetConfig("handshake_test_") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) const appVersion = 0x0 pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) stateDB, state, store := stateAndStore(cfg, pubKey, appVersion) stateStore := sm.NewStore(stateDB) genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks blocks := sf.MakeBlocks(3, &state, privVal) store.chain = blocks // 2. Tendermint must panic if app returns wrong hash for the first block // - RANDOM HASH // - 0x02 // - 0x03 { app := &badApp{numBlocks: 3, allHashesAreWrong: true} clientCreator := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) t.Cleanup(func() { if err := proxyApp.Stop(); err != nil { t.Error(err) } }) assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) if err = h.Handshake(proxyApp); err != nil { t.Log(err) } }) } // 3. Tendermint must panic if app returns wrong hash for the last block // - 0x01 // - 0x02 // - RANDOM HASH { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} clientCreator := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) t.Cleanup(func() { if err := proxyApp.Stop(); err != nil { t.Error(err) } }) assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) if err = h.Handshake(proxyApp); err != nil { t.Log(err) } }) } }
explode_data.jsonl/6603
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 830 }
[ 2830, 3393, 2314, 29661, 35693, 1211, 2679, 2164, 16446, 29185, 2164, 6370, 1155, 353, 8840, 836, 8, 341, 197, 322, 220, 16, 13, 9008, 27582, 67791, 323, 5266, 220, 18, 10010, 448, 279, 2701, 906, 50257, 510, 197, 322, 197, 197, 12, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStreamSelectScatterOrderBy(t *testing.T) { // Special setup: Don't use createLegacyExecutorEnv. cell := "aa" hc := discovery.NewFakeLegacyHealthCheck() s := createSandbox("TestExecutor") s.VSchema = executorVSchema getSandbox(KsTestUnsharded).VSchema = unshardedVSchema serv := new(sandboxTopo) resolver := newTestLegacyResolver(hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_MASTER, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, RowsAffected: 1, InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(int32(i % 4)), }}, }}) conns = append(conns, sbc) } executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) query := "select id, col from user order by col desc" gotResult, err := executorStream(executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: query, BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { if !reflect.DeepEqual(conn.Queries, wantQueries) { t.Errorf("conn.Queries = %#v, want %#v", conn.Queries, wantQueries) } } wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, } for i := 0; i < 4; i++ { row := []sqltypes.Value{ sqltypes.NewInt32(1), sqltypes.NewInt32(int32(3 - i)), } wantResult.Rows = append(wantResult.Rows, row, row) } if !reflect.DeepEqual(gotResult, wantResult) { t.Errorf("scatter order by:\n%v, want\n%v", gotResult, wantResult) } }
explode_data.jsonl/67414
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 830 }
[ 2830, 3393, 3027, 3379, 3326, 1650, 34605, 1155, 353, 8840, 836, 8, 341, 197, 322, 9785, 6505, 25, 4320, 944, 990, 1855, 77415, 25255, 14359, 624, 45987, 1669, 330, 5305, 698, 9598, 66, 1669, 18335, 7121, 52317, 77415, 14542, 3973, 741,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestPanicOverridesExpectationChecks(t *testing.T) { ctrl := gomock.NewController(t) reporter := NewErrorReporter(t) reporter.assertFatal(func() { ctrl.RecordCall(new(Subject), "FooMethod", "1") defer ctrl.Finish() reporter.Fatalf("Intentional panic") }) }
explode_data.jsonl/17292
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 106 }
[ 2830, 3393, 47, 31270, 80010, 17536, 367, 49820, 1155, 353, 8840, 836, 8, 341, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 69931, 261, 1669, 1532, 1454, 52766, 1155, 692, 69931, 261, 3713, 62396, 18552, 368, 341, 197, 84381, 499...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHashPasswordWithPbkdf2(t *testing.T) { type args struct { password string } tests := []struct { name string args args wantHashLength int wantSaltLength int }{ { name: "Length", args: args{ password: "password", }, wantHashLength: 32, wantSaltLength: 16, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gotHash, gotSalt := HashPasswordWithPbkdf2(tt.args.password) if len(gotHash) != tt.wantHashLength { t.Errorf("HashPasswordWithPbkdf2() gotHashLength = %v, want %v", len(gotHash), tt.wantHashLength) } if len(gotSalt) != tt.wantSaltLength { t.Errorf("HashPasswordWithPbkdf2() gotSaltLength = %v, want %v", len(gotSalt), tt.wantSaltLength) } }) } }
explode_data.jsonl/17827
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 344 }
[ 2830, 3393, 6370, 4876, 2354, 47, 40029, 2940, 17, 1155, 353, 8840, 836, 8, 341, 13158, 2827, 2036, 341, 197, 58199, 914, 198, 197, 532, 78216, 1669, 3056, 1235, 341, 197, 11609, 1843, 914, 198, 197, 31215, 1843, 2827, 198, 197, 50780...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestConsumeSpace(t *testing.T) { p := NewHtmlParser(" ") consumeSpace(p) if p.pos != 3 { t.Errorf("expect %d, actual %d", 3, p.pos) } }
explode_data.jsonl/34980
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 71 }
[ 2830, 3393, 1109, 31323, 9914, 1155, 353, 8840, 836, 8, 341, 3223, 1669, 1532, 13591, 6570, 445, 256, 14167, 37203, 31323, 9914, 1295, 340, 743, 281, 13006, 961, 220, 18, 341, 197, 3244, 13080, 445, 17119, 1018, 67, 11, 5042, 1018, 67...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestXORMappedAddress_String(t *testing.T) { tt := []struct { in XORMappedAddress out string }{ { // 0 XORMappedAddress{ IP: net.ParseIP("fe80::dc2b:44ff:fe20:6009"), Port: 124, }, "[fe80::dc2b:44ff:fe20:6009]:124", }, { // 1 XORMappedAddress{ IP: net.ParseIP("213.141.156.236"), Port: 8147, }, "213.141.156.236:8147", }, } for i, c := range tt { if got := c.in.String(); got != c.out { t.Errorf("[%d]: XORMappesAddres.String() %s (got) != %s (expected)", i, got, c.out, ) } } }
explode_data.jsonl/19446
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 307 }
[ 2830, 3393, 55, 4365, 5677, 4286, 31777, 1155, 353, 8840, 836, 8, 341, 3244, 83, 1669, 3056, 1235, 341, 197, 17430, 220, 1599, 4365, 5677, 4286, 198, 197, 13967, 914, 198, 197, 59403, 197, 197, 515, 298, 197, 322, 220, 15, 198, 298,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestFinWithNoPendingData(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */) // Write something out, and have it acknowledged. view := make([]byte, 10) var r bytes.Reader r.Reset(view) if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil { t.Fatalf("Write failed: %s", err) } next := uint32(c.IRS) + 1 iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1) checker.IPv4(t, c.GetPacket(), checker.PayloadLen(len(view)+header.TCPMinimumSize), checker.TCP( checker.DstPort(context.TestPort), checker.TCPSeqNum(next), checker.TCPAckNum(uint32(iss)), checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh), ), ) next += uint32(len(view)) c.SendPacket(nil, &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck, SeqNum: iss, AckNum: seqnum.Value(next), RcvWnd: 30000, }) // Shutdown, check that we get a FIN. if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { t.Fatalf("Shutdown failed: %s", err) } checker.IPv4(t, c.GetPacket(), checker.PayloadLen(header.TCPMinimumSize), checker.TCP( checker.DstPort(context.TestPort), checker.TCPSeqNum(next), checker.TCPAckNum(uint32(iss)), checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), ), ) next++ // Ack and send FIN as well. c.SendPacket(nil, &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, Flags: header.TCPFlagAck | header.TCPFlagFin, SeqNum: iss, AckNum: seqnum.Value(next), RcvWnd: 30000, }) // Check that the stack acks the FIN. checker.IPv4(t, c.GetPacket(), checker.PayloadLen(header.TCPMinimumSize), checker.TCP( checker.DstPort(context.TestPort), checker.TCPSeqNum(next), checker.TCPAckNum(uint32(iss)+1), checker.TCPFlags(header.TCPFlagAck), ), ) }
explode_data.jsonl/75983
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 850 }
[ 2830, 3393, 9134, 2354, 2753, 32027, 1043, 1155, 353, 8840, 836, 8, 341, 1444, 1669, 2266, 7121, 1155, 11, 1638, 8505, 52, 340, 16867, 272, 727, 60639, 2822, 1444, 7251, 21146, 5378, 8787, 6341, 14076, 2833, 11, 220, 18, 15, 15, 15, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestHSL(t *testing.T) { var hsl HSL t.Log(hsl.RGBA()) t.Log(hslModel(hsl)) t.Log(hslModel(color.Gray16{Y: uint16(1)})) t.Log(HSLToRGB(0, 1, 0.4)) t.Log(HSLToRGB(0, 1, 0.6)) t.Log(hueToRGB(0, 0, -1)) t.Log(hueToRGB(0, 0, 2)) t.Log(hueToRGB(0, 0, 1.0/7)) t.Log(hueToRGB(0, 0, 0.4)) t.Log(hueToRGB(0, 0, 2.0/4)) t.Log(RGBToHSL(255, 255, 0)) t.Log(RGBToHSL(0, 255, 255)) t.Log(RGBToHSL(250, 100, 50)) t.Log(RGBToHSL(50, 100, 250)) t.Log(RGBToHSL(250, 50, 100)) }
explode_data.jsonl/36993
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 288 }
[ 2830, 3393, 39, 7984, 1155, 353, 8840, 836, 8, 341, 2405, 305, 3226, 472, 7984, 198, 3244, 5247, 3203, 3226, 80114, 32, 2398, 3244, 5247, 3203, 3226, 1712, 3203, 3226, 1171, 3244, 5247, 3203, 3226, 1712, 13441, 74968, 16, 21, 90, 56, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSetOnFunctions(t *testing.T) { // The set command between function variables should fail with an error // Issue #2691 withTestProcess("goroutinestackprog", t, func(p *proc.Target, fixture protest.Fixture) { setFunctionBreakpoint(p, t, "main.main") assertNoError(p.Continue(), t, "Continue()") scope, err := proc.GoroutineScope(p, p.CurrentThread()) assertNoError(err, t, "GoroutineScope") err = scope.SetVariable("main.func1", "main.func2") if err == nil { t.Fatal("expected error when assigning between function variables") } }) }
explode_data.jsonl/56362
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 193 }
[ 2830, 3393, 1649, 1925, 25207, 1155, 353, 8840, 836, 8, 341, 197, 322, 576, 738, 3210, 1948, 729, 7332, 1265, 3690, 448, 458, 1465, 198, 197, 322, 25226, 671, 17, 21, 24, 16, 198, 46948, 2271, 7423, 445, 5628, 411, 258, 477, 473, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestCheckOrigin(t *testing.T) { for _, testcase := range CheckOriginTests { br := bufio.NewReader(strings.NewReader(fmt.Sprintf(`GET /chat HTTP/1.1 Host: %s Upgrade: websocket Connection: Upgrade Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== Origin: %s Sec-WebSocket-Version: 13 `, testcase.host, testcase.origin))) req, err := http.ReadRequest(br) if err != nil { t.Fatal("request", err) } log := new(LogScope) log.LogFunc = func(*LogScope, LogLevel, string, string, string, ...interface{}) {} config := new(Config) if testcase.reqtls == ReqHTTPS { // Fake TLS config.Ssl = true req.TLS = &tls.ConnectionState{} } if testcase.same == OriginMustBeSame { config.SameOrigin = true } if testcase.allowed != nil { config.AllowOrigins = testcase.allowed } err = checkOrigin(req, config, log) if testcase.getsErr == ReturnsError && err == nil { t.Errorf("Test case %#v did not get an error", testcase.name) } else if testcase.getsErr == ReturnsPass && err != nil { t.Errorf("Test case %#v got error while expected to pass", testcase.name) } } }
explode_data.jsonl/31602
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 429 }
[ 2830, 3393, 3973, 13298, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 70080, 1669, 2088, 4248, 13298, 18200, 341, 197, 80255, 1669, 96917, 68587, 51442, 68587, 28197, 17305, 5809, 3806, 608, 9686, 10130, 14, 16, 13, 16, 198, 9296, 25, 1018...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestHeaderAddTo(t *testing.T) { //t.SkipNow() h := Header{"A": {"foo"}, "B": {"bar", "baz"}} r := &http.Request{Header: http.Header{}} h.AddTo(r) want := http.Header{"A": {"foo"}, "B": {"bar", "baz"}} if !reflect.DeepEqual(r.Header, want) { t.Errorf("got %v, want %v", r.Header, want) } }
explode_data.jsonl/775
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 136 }
[ 2830, 3393, 4047, 2212, 1249, 1155, 353, 8840, 836, 8, 341, 197, 322, 83, 57776, 7039, 741, 9598, 1669, 12104, 4913, 32, 788, 5212, 7975, 14345, 330, 33, 788, 5212, 2257, 497, 330, 42573, 95642, 7000, 1669, 609, 1254, 9659, 90, 4047, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestServerOkUnix(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("skipping test on windows") } addr := tmpTestUnix(t) ucfg, err := common.NewConfigFrom(m{"host": "unix:" + addr}) assert.NoError(t, err) btr, stop, err := setupServer(t, ucfg, nil) require.NoError(t, err) defer stop() baseUrl, client := btr.client(false) rsp, err := client.Get(baseUrl + HealthCheckURL) assert.NoError(t, err) assert.Equal(t, http.StatusOK, rsp.StatusCode) }
explode_data.jsonl/4940
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 187 }
[ 2830, 3393, 5475, 11578, 55832, 1155, 353, 8840, 836, 8, 341, 743, 15592, 97574, 3126, 621, 330, 27077, 1, 341, 197, 3244, 57776, 445, 4886, 5654, 1273, 389, 11030, 1138, 197, 630, 53183, 1669, 4174, 2271, 55832, 1155, 340, 197, 1754, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAddGlobalParamToScope(t *testing.T) { woc := newWoc() woc.globalParams = make(map[string]string) testVal := "test-value" param := wfv1.Parameter{ Name: "test-param", Value: &testVal, } // Make sure if the param is not global, don't add to scope woc.addParamToGlobalScope(param) assert.Nil(t, woc.wf.Status.Outputs) // Now set it as global. Verify it is added to workflow outputs param.GlobalName = "global-param" woc.addParamToGlobalScope(param) assert.Equal(t, 1, len(woc.wf.Status.Outputs.Parameters)) assert.Equal(t, param.GlobalName, woc.wf.Status.Outputs.Parameters[0].Name) assert.Equal(t, testVal, *woc.wf.Status.Outputs.Parameters[0].Value) assert.Equal(t, testVal, woc.globalParams["workflow.outputs.parameters.global-param"]) // Change the value and verify it is reflected in workflow outputs newValue := "new-value" param.Value = &newValue woc.addParamToGlobalScope(param) assert.Equal(t, 1, len(woc.wf.Status.Outputs.Parameters)) assert.Equal(t, param.GlobalName, woc.wf.Status.Outputs.Parameters[0].Name) assert.Equal(t, newValue, *woc.wf.Status.Outputs.Parameters[0].Value) assert.Equal(t, newValue, woc.globalParams["workflow.outputs.parameters.global-param"]) // Add a new global parameter param.GlobalName = "global-param2" woc.addParamToGlobalScope(param) assert.Equal(t, 2, len(woc.wf.Status.Outputs.Parameters)) assert.Equal(t, param.GlobalName, woc.wf.Status.Outputs.Parameters[1].Name) assert.Equal(t, newValue, *woc.wf.Status.Outputs.Parameters[1].Value) assert.Equal(t, newValue, woc.globalParams["workflow.outputs.parameters.global-param2"]) }
explode_data.jsonl/54374
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 620 }
[ 2830, 3393, 2212, 11646, 2001, 1249, 10803, 1155, 353, 8840, 836, 8, 341, 6692, 509, 1669, 501, 54, 509, 741, 6692, 509, 21932, 4870, 284, 1281, 9147, 14032, 30953, 340, 18185, 2208, 1669, 330, 1944, 19083, 698, 36037, 1669, 289, 27890,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestFuzzHC(t *testing.T) { f := func(input []byte) bool { output := make([]byte, CompressBound(input)) outSize, err := CompressHC(output, input) if err != nil { t.Fatalf("Compression failed: %v", err) } if outSize == 0 { t.Fatal("Output buffer is empty.") } output = output[:outSize] decompressed := make([]byte, len(input)) _, err = Uncompress(decompressed, output) if err != nil { t.Fatalf("Decompression failed: %v", err) } if string(decompressed) != string(input) { t.Fatalf("Decompressed output != input: %q != %q", decompressed, input) } return true } conf := &quick.Config{MaxCount: 20000} if testing.Short() { conf.MaxCount = 1000 } if err := quick.Check(f, conf); err != nil { t.Fatal(err) } }
explode_data.jsonl/62984
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 311 }
[ 2830, 3393, 37, 8889, 22455, 1155, 353, 8840, 836, 8, 341, 1166, 1669, 2915, 5384, 3056, 3782, 8, 1807, 341, 197, 21170, 1669, 1281, 10556, 3782, 11, 1198, 1873, 19568, 5384, 1171, 197, 13967, 1695, 11, 1848, 1669, 1198, 1873, 22455, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestGetFile_checksumSkip(t *testing.T) { dst := tempFile(t) u := testModule("basic-file/foo.txt") + "?checksum=md5:09f7e02f1290be211da707a266f153b3" getter := &MockGetter{Proxy: new(FileGetter)} client := &Client{ Src: u, Dst: dst, Dir: false, Getters: map[string]Getter{ "file": getter, }, } // get the file if err := client.Get(); err != nil { t.Fatalf("err: %s", err) } if v := getter.GetFileURL.Query().Get("checksum"); v != "" { t.Fatalf("bad: %s", v) } // remove proxy file getter and reset GetFileCalled so that we can re-test. getter.Proxy = nil getter.GetFileCalled = false if err := client.Get(); err != nil { t.Fatalf("err: %s", err) } if getter.GetFileCalled { t.Fatalf("get should not have been called") } }
explode_data.jsonl/827
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 329 }
[ 2830, 3393, 1949, 1703, 64038, 35134, 1155, 353, 8840, 836, 8, 341, 52051, 1669, 2730, 1703, 1155, 340, 10676, 1669, 1273, 3332, 445, 22342, 14203, 60555, 3909, 899, 488, 27244, 70740, 28, 2277, 20, 25, 15, 24, 69, 22, 68, 15, 17, 6...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestValuesForBinaryLiteral(t *testing.T) { // See issue #15310 store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test;") tk.MustExec("create table testValuesBinary(id int primary key auto_increment, a bit(1));") tk.MustExec("insert into testValuesBinary values(1,1);") err := tk.ExecToErr("insert into testValuesBinary values(1,1) on duplicate key update id = values(id),a = values(a);") require.NoError(t, err) tk.MustQuery("select a=0 from testValuesBinary;").Check(testkit.Rows("0")) err = tk.ExecToErr("insert into testValuesBinary values(1,0) on duplicate key update id = values(id),a = values(a);") require.NoError(t, err) tk.MustQuery("select a=0 from testValuesBinary;").Check(testkit.Rows("1")) tk.MustExec("drop table testValuesBinary;") }
explode_data.jsonl/65507
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 299 }
[ 2830, 3393, 6227, 2461, 21338, 17350, 1155, 353, 8840, 836, 8, 341, 197, 322, 3496, 4265, 671, 16, 20, 18, 16, 15, 198, 57279, 11, 4240, 1669, 1273, 8226, 7251, 11571, 6093, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_Hoverfly_SetModeWithArguments_OverwriteDuplicate(t *testing.T) { RegisterTestingT(t) unit := NewHoverflyWithConfiguration(&Configuration{}) Expect(unit.SetModeWithArguments(v2.ModeView{ Mode: "capture", Arguments: v2.ModeArgumentsView{ OverwriteDuplicate: true, }, })).To(Succeed()) storedMode := unit.modeMap[modes.Capture].View() Expect(storedMode.Arguments.OverwriteDuplicate).To(BeTrue()) }
explode_data.jsonl/45403
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 159 }
[ 2830, 3393, 2039, 1975, 21642, 14812, 3636, 2354, 19139, 62, 1918, 4934, 53979, 1155, 353, 8840, 836, 8, 341, 79096, 16451, 51, 1155, 692, 81189, 1669, 1532, 34379, 21642, 2354, 7688, 2099, 7688, 6257, 692, 35911, 24144, 4202, 3636, 2354,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStr(t *testing.T) { e := "abc\u1234" o := dummys.Get("someString") if v := o.String(); v != e { t.Errorf("expected %#v, got %#v", e, v) } if i := o.Interface().(string); i != e { t.Errorf("expected %#v, got %#v", e, i) } if dummys.Set("otherString", e); dummys.Get("otherString").String() != e { t.Fail() } }
explode_data.jsonl/56770
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 154 }
[ 2830, 3393, 2580, 1155, 353, 8840, 836, 8, 341, 7727, 1669, 330, 13683, 3770, 16, 17, 18, 19, 698, 22229, 1669, 62711, 8209, 2234, 445, 14689, 703, 1138, 743, 348, 1669, 297, 6431, 2129, 348, 961, 384, 341, 197, 3244, 13080, 445, 73...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestUDP2(t *testing.T) { test := newUDPTestCase(t) defer test.Finish() test.Send("hello.world 42.15 1422698155\nmetric.name -72.11 1422698155\n") select { case msg := <-test.rcvChan: test.Eq(msg, points.OnePoint("hello.world", 42.15, 1422698155)) default: t.Fatalf("Message #0 not received") } select { case msg := <-test.rcvChan: test.Eq(msg, points.OnePoint("metric.name", -72.11, 1422698155)) default: t.Fatalf("Message #1 not received") } }
explode_data.jsonl/18173
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 200 }
[ 2830, 3393, 41648, 17, 1155, 353, 8840, 836, 8, 341, 18185, 1669, 501, 41648, 16458, 1155, 340, 16867, 1273, 991, 18176, 2822, 18185, 20176, 445, 14990, 22882, 220, 19, 17, 13, 16, 20, 220, 16, 19, 17, 17, 21, 24, 23, 16, 20, 20, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestServiceImpl_GetLeagues(t *testing.T) { var ( firstLeague = dto.League{ LegacyID: 271, CountryID: 37, Name: "Superliga", LogoPath: "https://cdn.sportmonks.com/images/soccer/leagues/271.png", Type: "domestic", IsActive: true, IsCup: false, IsFriendly: false, CoveredLiveStandings: true, CoveredPredictions: true, CoveredTopscorerGoals: true, CoveredTopscorerAssists: true, CoveredTopscorerCards: true, } secondLeague = dto.League{ LegacyID: 501, CountryID: 70, Name: "Premiership", LogoPath: "https://cdn.sportmonks.com/images/soccer/leagues/501.png", Type: "domestic", IsActive: true, IsCup: false, IsFriendly: false, CoveredLiveStandings: true, CoveredPredictions: true, CoveredTopscorerGoals: true, CoveredTopscorerAssists: true, CoveredTopscorerCards: true, } ) sportmonksService := New(nil, nil) leagues, hasNextPage, err := sportmonksService.GetLeagues(context.TODO(), 1) assert.Nil(t, err) assert.NotNil(t, leagues) assert.Equal(t, firstLeague, leagues[0]) assert.Equal(t, secondLeague, leagues[1]) assert.Equal(t, false, hasNextPage) }
explode_data.jsonl/28415
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 777 }
[ 2830, 3393, 50603, 13614, 2304, 13488, 1155, 353, 8840, 836, 8, 341, 2405, 2399, 197, 42190, 70107, 284, 28335, 11824, 4663, 515, 298, 15070, 791, 2757, 915, 25, 394, 220, 17, 22, 16, 345, 298, 6258, 4976, 915, 25, 2290, 220, 18, 22...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestShutdown(t *testing.T) { srv := Serve() defer ServerClose(srv) lg := log.New(os.Stderr, "", log.LstdFlags) c := GetWebsocket(lg, 8080) // Direct use of the connection to see that data is sent/received. err := c.WriteMessage(websocket.TextMessage, []byte("hello")) if err != nil { t.Fatal(err) } _, message, err := c.ReadMessage() if err != nil { t.Fatal(err) } if string(message) != "hello" { t.Fatal("expecting hello message") } SendClose(lg, c) _, _, err = c.ReadMessage() if err == nil { t.Fatal("expecting the connection to be closed") } t.Log("shutdown err", err) }
explode_data.jsonl/66034
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 249 }
[ 2830, 3393, 62004, 1155, 353, 8840, 836, 8, 341, 1903, 10553, 1669, 52932, 741, 16867, 8422, 7925, 1141, 10553, 692, 8810, 70, 1669, 1487, 7121, 9638, 77319, 11, 7342, 1487, 1214, 1834, 9195, 340, 1444, 1669, 2126, 5981, 9556, 2333, 70,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestService_List(t *testing.T) { // given testErr := errors.New("Test error") modelApplications := []*model.Application{ fixModelApplication("foo", "tenant-foo", "foo", "Lorem Ipsum"), fixModelApplication("bar", "tenant-bar", "bar", "Lorem Ipsum"), } applicationPage := &model.ApplicationPage{ Data: modelApplications, TotalCount: len(modelApplications), PageInfo: &pagination.Page{ HasNextPage: false, EndCursor: "end", StartCursor: "start", }, } first := 2 after := "test" filter := []*labelfilter.LabelFilter{{Key: ""}} tnt := "tenant" externalTnt := "external-tnt" ctx := context.TODO() ctx = tenant.SaveToContext(ctx, tnt, externalTnt) testCases := []struct { Name string RepositoryFn func() *automock.ApplicationRepository InputLabelFilters []*labelfilter.LabelFilter InputPageSize int ExpectedResult *model.ApplicationPage ExpectedErrMessage string }{ { Name: "Success", RepositoryFn: func() *automock.ApplicationRepository { repo := &automock.ApplicationRepository{} repo.On("List", ctx, tnt, filter, first, after).Return(applicationPage, nil).Once() return repo }, InputPageSize: first, InputLabelFilters: filter, ExpectedResult: applicationPage, ExpectedErrMessage: "", }, { Name: "Returns error when application listing failed", RepositoryFn: func() *automock.ApplicationRepository { repo := &automock.ApplicationRepository{} repo.On("List", ctx, tnt, filter, first, after).Return(nil, testErr).Once() return repo }, InputPageSize: first, InputLabelFilters: filter, ExpectedResult: nil, ExpectedErrMessage: testErr.Error(), }, { Name: "Returns error when page size is less than 1", RepositoryFn: func() *automock.ApplicationRepository { repo := &automock.ApplicationRepository{} return repo }, InputPageSize: 0, InputLabelFilters: filter, ExpectedResult: nil, ExpectedErrMessage: "page size must be between 1 and 200", }, { Name: "Returns error when page size is bigger than 200", RepositoryFn: func() *automock.ApplicationRepository { repo := &automock.ApplicationRepository{} return repo }, InputPageSize: 201, InputLabelFilters: filter, ExpectedResult: nil, ExpectedErrMessage: "page size must be between 1 and 200", }, } for _, testCase := range testCases { t.Run(testCase.Name, func(t *testing.T) { repo := testCase.RepositoryFn() svc := application.NewService(nil, nil, repo, nil, nil, nil, nil, nil, nil, nil, nil) // when app, err := svc.List(ctx, testCase.InputLabelFilters, testCase.InputPageSize, after) // then if testCase.ExpectedErrMessage == "" { require.NoError(t, err) assert.Equal(t, testCase.ExpectedResult, app) } else { require.Error(t, err) assert.Contains(t, err.Error(), testCase.ExpectedErrMessage) } repo.AssertExpectations(t) }) } }
explode_data.jsonl/50079
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1213 }
[ 2830, 3393, 1860, 27104, 1155, 353, 8840, 836, 8, 341, 197, 322, 2661, 198, 18185, 7747, 1669, 5975, 7121, 445, 2271, 1465, 5130, 19727, 50359, 1669, 29838, 2528, 17521, 515, 197, 1166, 941, 1712, 4988, 445, 7975, 497, 330, 43919, 12, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestSplitHostPortDefault_Valid(t *testing.T) { tests := []struct { input string defaultHost string defaultPort string host string port string }{ {input: "192.168.0.140:456", defaultHost: "", defaultPort: "", host: "192.168.0.140", port: "456"}, {input: "192.168.0.140", defaultHost: "", defaultPort: "123", host: "192.168.0.140", port: "123"}, {input: "[::1]:456", defaultHost: "", defaultPort: "", host: "::1", port: "456"}, {input: "[::1]", defaultHost: "", defaultPort: "123", host: "::1", port: "123"}, {input: ":456", defaultHost: "1.2.3.4", defaultPort: "", host: "1.2.3.4", port: "456"}, {input: "xyz.rds.amazonaws.com", defaultHost: "", defaultPort: "123", host: "xyz.rds.amazonaws.com", port: "123"}, {input: "xyz.rds.amazonaws.com:123", defaultHost: "", defaultPort: "", host: "xyz.rds.amazonaws.com", port: "123"}, {input: "", defaultHost: "localhost", defaultPort: "1433", host: "localhost", port: "1433"}, } for _, testcase := range tests { addr, err := SplitHostPortDefault(testcase.input, testcase.defaultHost, testcase.defaultPort) assert.NoError(t, err) assert.Equal(t, testcase.host, addr.Host) assert.Equal(t, testcase.port, addr.Port) } }
explode_data.jsonl/37625
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 480 }
[ 2830, 3393, 20193, 9296, 7084, 3675, 97279, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 22427, 981, 914, 198, 197, 11940, 9296, 914, 198, 197, 11940, 7084, 914, 271, 197, 63104, 914, 198, 197, 52257, 914, 198, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSrcPtr(t *testing.T) { var a string = "aaa" var e string = "eeee" var c bool = true var d []int = []int{3, 2} src := WithPtr{ A: &a, B: 3, C: &c, D: &d, E: &e, G: "dddd", } var dst NoPtr StructCopy(&dst, &src) if src.B != dst.B { t.Error("field B failed") } if *src.C != dst.C { t.Error("field C failed") } if !reflect.DeepEqual(*src.D, dst.D) { t.Error("field D failed") } if *src.E != dst.E { t.Error("field E failed") } if dst.G == nil || *dst.G != src.G { t.Error("field G failed") } }
explode_data.jsonl/40579
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 272 }
[ 2830, 3393, 20360, 5348, 1155, 353, 8840, 836, 8, 341, 2405, 264, 914, 284, 330, 32646, 698, 2405, 384, 914, 284, 330, 68616, 698, 2405, 272, 1807, 284, 830, 198, 2405, 294, 3056, 396, 284, 3056, 396, 90, 18, 11, 220, 17, 532, 411...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func Test_StoreDelete(t *testing.T) { store, mock, close_ := store(t) defer close_() mock.ExpectExec( "^DELETE FROM variables WHERE \\(id IN \\(\\$1, \\$2, \\$3\\)\\)$", ).WillReturnResult(sqlmock.NewResult(0, 3)) if err := store.Delete(1, 2, 3); err != nil { t.Error(errors.Cause(err)) } }
explode_data.jsonl/29317
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 131 }
[ 2830, 3393, 92684, 6435, 1155, 353, 8840, 836, 8, 341, 57279, 11, 7860, 11, 3265, 62, 1669, 3553, 1155, 340, 16867, 3265, 62, 2822, 77333, 81893, 10216, 1006, 197, 197, 86490, 14424, 4295, 7332, 5288, 24984, 7, 307, 1964, 24984, 7, 34...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDB_LabelNames(t *testing.T) { tests := []struct { // Add 'sampleLabels1' -> Test Head -> Compact -> Test Disk -> // -> Add 'sampleLabels2' -> Test Head+Disk sampleLabels1 [][2]string // For checking head and disk separately. // To test Head+Disk, sampleLabels2 should have // at least 1 unique label name which is not in sampleLabels1. sampleLabels2 [][2]string // // For checking head and disk together. exp1 []string // after adding sampleLabels1. exp2 []string // after adding sampleLabels1 and sampleLabels2. }{ { sampleLabels1: [][2]string{ {"name1", "1"}, {"name3", "3"}, {"name2", "2"}, }, sampleLabels2: [][2]string{ {"name4", "4"}, {"name1", "1"}, }, exp1: []string{"name1", "name2", "name3"}, exp2: []string{"name1", "name2", "name3", "name4"}, }, { sampleLabels1: [][2]string{ {"name2", "2"}, {"name1", "1"}, {"name2", "2"}, }, sampleLabels2: [][2]string{ {"name6", "6"}, {"name0", "0"}, }, exp1: []string{"name1", "name2"}, exp2: []string{"name0", "name1", "name2", "name6"}, }, } blockRange := DefaultOptions.BlockRanges[0] // Appends samples into the database. appendSamples := func(db *DB, mint, maxt int64, sampleLabels [][2]string) { t.Helper() app := db.Appender() for i := mint; i <= maxt; i++ { for _, tuple := range sampleLabels { label := labels.FromStrings(tuple[0], tuple[1]) _, err := app.Add(label, i*blockRange, 0) testutil.Ok(t, err) } } err := app.Commit() testutil.Ok(t, err) } for _, tst := range tests { db, delete := openTestDB(t, nil) defer func() { testutil.Ok(t, db.Close()) delete() }() appendSamples(db, 0, 4, tst.sampleLabels1) // Testing head. headIndexr, err := db.head.Index() testutil.Ok(t, err) labelNames, err := headIndexr.LabelNames() testutil.Ok(t, err) testutil.Equals(t, tst.exp1, labelNames) testutil.Ok(t, headIndexr.Close()) // Testing disk. err = db.compact() testutil.Ok(t, err) // All blocks have same label names, hence check them individually. // No need to aggregrate and check. for _, b := range db.Blocks() { blockIndexr, err := b.Index() testutil.Ok(t, err) labelNames, err = blockIndexr.LabelNames() testutil.Ok(t, err) testutil.Equals(t, tst.exp1, labelNames) testutil.Ok(t, blockIndexr.Close()) } // Addings more samples to head with new label names // so that we can test (head+disk).LabelNames() (the union). appendSamples(db, 5, 9, tst.sampleLabels2) // Testing DB (union). q, err := db.Querier(math.MinInt64, math.MaxInt64) testutil.Ok(t, err) labelNames, err = q.LabelNames() testutil.Ok(t, err) testutil.Ok(t, q.Close()) testutil.Equals(t, tst.exp2, labelNames) } }
explode_data.jsonl/64386
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1210 }
[ 2830, 3393, 3506, 53557, 7980, 1155, 353, 8840, 836, 8, 341, 78216, 1669, 3056, 1235, 341, 197, 197, 322, 2691, 364, 13611, 23674, 16, 6, 1464, 3393, 11203, 1464, 54782, 1464, 3393, 38868, 12381, 197, 197, 322, 1464, 2691, 364, 13611, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestExtractKubeReservedFromKubeEnv(t *testing.T) { type testCase struct { kubeEnv string expectedReserved string expectedErr bool } testCases := []testCase{ { kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction --kube-reserved=cpu=1000m,memory=300000Mi\n" + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", expectedReserved: "cpu=1000m,memory=300000Mi", expectedErr: false, }, { kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "AUTOSCALER_ENV_VARS: node_labels=a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true;" + "node_taints='dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c';" + "kube_reserved=cpu=1000m,memory=300000Mi;" + "os=linux\n" + "KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction\n", expectedReserved: "cpu=1000m,memory=300000Mi", expectedErr: false, }, { // Multi-line KUBELET_ARGS kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "AUTOSCALER_ENV_VARS: node_labels=a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true;" + "node_taints='dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c';" + "kube_reserved=cpu=1000m,memory=300000Mi\n" + "KUBELET_ARGS: --experimental-allocatable-ignore-eviction\n" + " --kube_reserved=cpu=1000m,memory=300000Mi\n", expectedReserved: "cpu=1000m,memory=300000Mi", expectedErr: false, }, { kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction\n" + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", expectedReserved: "", expectedErr: true, }, { kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", expectedReserved: "", expectedErr: true, }, } for _, tc := range testCases { reserved, err := extractKubeReservedFromKubeEnv(tc.kubeEnv) assert.Equal(t, tc.expectedReserved, reserved) if tc.expectedErr { assert.Error(t, err) } else { assert.NoError(t, err) } } }
explode_data.jsonl/50923
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1438 }
[ 2830, 3393, 28959, 42, 3760, 53003, 3830, 42, 3760, 14359, 1155, 353, 8840, 836, 8, 341, 13158, 54452, 2036, 341, 197, 16463, 3760, 14359, 688, 914, 198, 197, 42400, 53003, 914, 198, 197, 42400, 7747, 414, 1807, 198, 197, 630, 18185, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestNumBigString(t *testing.T) { i := "900719925474099301239109123101" // very big j := fmt.Sprintf(`{"data":[ "hello", "%s" ]}`, i) res := Get(j, "data.1") if res.String() != "900719925474099301239109123101" { t.Fatalf("expected '%v', got '%v'", "900719925474099301239109123101", res.String()) } }
explode_data.jsonl/43456
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 143 }
[ 2830, 3393, 4651, 15636, 703, 1155, 353, 8840, 836, 8, 341, 8230, 1669, 330, 24, 15, 15, 22, 16, 24, 24, 17, 20, 19, 22, 19, 15, 24, 24, 18, 15, 16, 17, 18, 24, 16, 15, 24, 16, 17, 18, 16, 15, 16, 1, 442, 1602, 2409, 198...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestRatNeg(t *testing.T) { zero := new(Rat) for _, a := range setStringTests { x, ok := new(Rat).SetString(a.in) if !ok { continue } e := new(Rat).Sub(zero, x) z := new(Rat).Neg(x) if z.Cmp(e) != 0 { t.Errorf("got Neg(%v) = %v; want %v", x, z, e) } } }
explode_data.jsonl/35073
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 147 }
[ 2830, 3393, 49, 266, 47800, 1155, 353, 8840, 836, 8, 341, 197, 14154, 1669, 501, 2785, 266, 340, 2023, 8358, 264, 1669, 2088, 738, 703, 18200, 341, 197, 10225, 11, 5394, 1669, 501, 2785, 266, 568, 1649, 703, 2877, 1858, 340, 197, 74...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestSetArgSlice(t *testing.T) { _, ctrl := createFixtures(t) subject := new(Subject) var in = []byte{4, 5, 6} var set = []byte{1, 2, 3} ctrl.RecordCall(subject, "SetArgMethod", in, nil).SetArg(0, set) ctrl.Call(subject, "SetArgMethod", in, nil) if !reflect.DeepEqual(in, set) { t.Error("Expected SetArg() to modify input slice argument") } ctrl.Finish() }
explode_data.jsonl/17284
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 149 }
[ 2830, 3393, 1649, 2735, 33236, 1155, 353, 8840, 836, 8, 341, 197, 6878, 23743, 1669, 1855, 25958, 18513, 1155, 340, 28624, 583, 1669, 501, 7, 13019, 692, 2405, 304, 284, 3056, 3782, 90, 19, 11, 220, 20, 11, 220, 21, 532, 2405, 738, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestProcessFile(t *testing.T) { t.Skip() filePath := path.Join(sftpDirectory, "test.txt") testCase := scenario{ Name: "good conf", Conf: &SFTPConfig{ Address: "http://localhost:" + sftpPort, Paths: []string{filePath}, Credentials: sftpSetup.Credentials{ Username: "foo", Password: "pass", }, MaxConnectionAttempts: 10, RetrySleepDuration: "5s", Codec: "lines", DeleteOnFinish: false, MaxBuffer: 1000000, }, } GenerateTestFile(filePath, "This is a test file") time.Sleep(time.Second * 1) proc, err := NewSFTP(*testCase.Conf, benthosLog.Noop(), metrics.Noop()) assert.NoError(t, err, "config should not error") assert.NotNil(t, proc, "should return non-nil data") time.Sleep(time.Second * 3) err = proc.ConnectWithContext(context.Background()) assert.NoError(t, err, "ConnectWithContext should not error") msg, _, err := proc.ReadWithContext(context.Background()) assert.NoError(t, err, "ReadWithContext should not error") ValidateMessage(t, msg, "This is a test file", filePath) }
explode_data.jsonl/10205
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 434 }
[ 2830, 3393, 7423, 1703, 1155, 353, 8840, 836, 8, 341, 3244, 57776, 2822, 17661, 1820, 1669, 1815, 22363, 1141, 25068, 9310, 11, 330, 1944, 3909, 1138, 18185, 4207, 1669, 15048, 515, 197, 21297, 25, 330, 18536, 2335, 756, 197, 197, 15578...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestIDPCanParse(t *testing.T) { test := NewIdentifyProviderTest(t) r, _ := http.NewRequest("GET", "https://idp.example.com/saml/sso?RelayState=ThisIsTheRelayState&SAMLRequest=lJJBayoxFIX%2FypC9JhnU5wszAz7lgWCLaNtFd5fMbQ1MkmnunVb%2FfUfbUqEgdhs%2BTr5zkmLW8S5s8KVD4mzvm0Cl6FIwEciRCeCRDFuznd2sTD5Upk2Ro42NyGZEmNjFMI%2BBOo9pi%2BnVWbzfrEqxY27JSEntEPfg2waHNnpJ4JtcgiWRLfoLXYBjwDfu6p%2B8JIoiWy5K4eqBUipXIzVRUwXKKtRK53qkJ3qqQVuNPUjU4TIQQ%2BBS5EqPBzofKH2ntBn%2FMervo8jWnyX%2BuVC78FwKkT1gopNKX1JUxSklXTMIfM0gsv8xeeDL%2BPGk7%2FF0Qg0GdnwQ1cW5PDLUwFDID6uquO1Dlot1bJw9%2FPLRmia%2BzRMCYyk4dSiq6205QSDXOxfy3KAq5Pkvqt4DAAD%2F%2Fw%3D%3D", nil) req, err := NewIdpAuthnRequest(&test.IDP, r) assert.Check(t, err) assert.Check(t, req.Validate()) r, _ = http.NewRequest("GET", "https://idp.example.com/saml/sso?RelayState=ThisIsTheRelayState", nil) _, err = NewIdpAuthnRequest(&test.IDP, r) assert.Check(t, is.Error(err, "cannot decompress request: unexpected EOF")) r, _ = http.NewRequest("GET", "https://idp.example.com/saml/sso?RelayState=ThisIsTheRelayState&SAMLRequest=NotValidBase64", nil) _, err = NewIdpAuthnRequest(&test.IDP, r) assert.Check(t, is.Error(err, "cannot decode request: illegal base64 data at input byte 12")) r, _ = http.NewRequest("GET", "https://idp.example.com/saml/sso?RelayState=ThisIsTheRelayState&SAMLRequest=bm90IGZsYXRlIGVuY29kZWQ%3D", nil) _, err = NewIdpAuthnRequest(&test.IDP, r) assert.Check(t, is.Error(err, "cannot decompress request: flate: corrupt input before offset 1")) r, _ = http.NewRequest("FROBNICATE", "https://idp.example.com/saml/sso?RelayState=ThisIsTheRelayState&SAMLRequest=lJJBayoxFIX%2FypC9JhnU5wszAz7lgWCLaNtFd5fMbQ1MkmnunVb%2FfUfbUqEgdhs%2BTr5zkmLW8S5s8KVD4mzvm0Cl6FIwEciRCeCRDFuznd2sTD5Upk2Ro42NyGZEmNjFMI%2BBOo9pi%2BnVWbzfrEqxY27JSEntEPfg2waHNnpJ4JtcgiWRLfoLXYBjwDfu6p%2B8JIoiWy5K4eqBUipXIzVRUwXKKtRK53qkJ3qqQVuNPUjU4TIQQ%2BBS5EqPBzofKH2ntBn%2FMervo8jWnyX%2BuVC78FwKkT1gopNKX1JUxSklXTMIfM0gsv8xeeDL%2BPGk7%2FF0Qg0GdnwQ1cW5PDLUwFDID6uquO1Dlot1bJw9%2FPLRmia%2BzRMCYyk4dSiq6205QSDXOxfy3KAq5Pkvqt4DAAD%2F%2Fw%3D%3D", nil) _, err = NewIdpAuthnRequest(&test.IDP, r) assert.Check(t, is.Error(err, "method not allowed")) }
explode_data.jsonl/19827
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1186 }
[ 2830, 3393, 915, 4872, 276, 14463, 1155, 353, 8840, 836, 8, 341, 18185, 1669, 1532, 28301, 1437, 5179, 2271, 1155, 340, 7000, 11, 716, 1669, 1758, 75274, 445, 3806, 497, 330, 2428, 1110, 307, 79, 7724, 905, 2687, 9467, 2687, 704, 30, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func Test_buildEnvVars_TwoSortedKeys(t *testing.T) { firstKey := "first" lastKey := "last" inputEnvs := map[string]string{ lastKey: "", firstKey: "", } function := types.FunctionDeployment{ EnvVars: inputEnvs, } coreEnvs := buildEnvVars(&function) if coreEnvs[0].Name != firstKey { t.Errorf("first want: %s, got: %s", firstKey, coreEnvs[0].Name) t.Fail() } }
explode_data.jsonl/1074
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 164 }
[ 2830, 3393, 20801, 14359, 28305, 82989, 51051, 8850, 1155, 353, 8840, 836, 8, 341, 42190, 1592, 1669, 330, 3896, 698, 33096, 1592, 1669, 330, 4259, 1837, 22427, 1702, 11562, 1669, 2415, 14032, 30953, 515, 197, 33096, 1592, 25, 220, 8324, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestTryExceptionInCatch(t *testing.T) { const SCRIPT = ` function A() { var x; try { throw 4; } catch(e) { throw 5; } return x; } var rv; try { A(); } catch (e) { rv = e; } ` testScript(SCRIPT, intToValue(5), t) }
explode_data.jsonl/75224
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 130 }
[ 2830, 3393, 21453, 1354, 641, 57760, 1155, 353, 8840, 836, 8, 341, 4777, 53679, 284, 22074, 7527, 362, 368, 341, 197, 2405, 856, 280, 197, 6799, 341, 298, 9581, 220, 19, 280, 197, 197, 92, 2287, 2026, 8, 341, 298, 9581, 220, 20, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestStorageInputResource(t *testing.T) { gcsStorageInputs := &v1alpha1.Inputs{ Resources: []v1alpha1.TaskResource{{ Name: "gcs-input-resource", Type: "storage", }}, } for _, c := range []struct { desc string task *v1alpha1.Task taskRun *v1alpha1.TaskRun wantErr bool want *v1alpha1.TaskSpec }{{ desc: "inputs with no resource spec and resource ref", task: &v1alpha1.Task{ Spec: v1alpha1.TaskSpec{ Inputs: &v1alpha1.Inputs{ Resources: []v1alpha1.TaskResource{{ Name: "gcs-input-resource", Type: "storage", }}, }, }, }, taskRun: &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "get-storage-run", Namespace: "marshmallow", }, Spec: v1alpha1.TaskRunSpec{ Inputs: v1alpha1.TaskRunInputs{ Resources: []v1alpha1.TaskResourceBinding{{ Name: "gcs-input-resource", }}, }, }, }, wantErr: true, }, { desc: "inputs with resource spec and no resource ref", task: &v1alpha1.Task{ Spec: v1alpha1.TaskSpec{ Inputs: gcsStorageInputs, }, }, taskRun: &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "get-storage-run", Namespace: "marshmallow", }, Spec: v1alpha1.TaskRunSpec{ Inputs: v1alpha1.TaskRunInputs{ Resources: []v1alpha1.TaskResourceBinding{{ Name: "gcs-input-resource", ResourceSpec: &v1alpha1.PipelineResourceSpec{ Type: v1alpha1.PipelineResourceTypeStorage, Params: []v1alpha1.ResourceParam{{ Name: "Location", Value: "gs://fake-bucket/rules.zip", }, { Name: "Type", Value: "gcs", }}, }, }}, }, }, }, wantErr: false, want: &v1alpha1.TaskSpec{ Inputs: gcsStorageInputs, Steps: []corev1.Container{{ Name: "create-dir-gcs-input-resource-9l9zj", Image: "override-with-bash-noop:latest", Command: []string{"/ko-app/bash"}, Args: []string{"-args", "mkdir -p /workspace/gcs-input-resource"}, }, { Name: "fetch-gcs-input-resource-mz4c7", Image: "override-with-gsutil-image:latest", Command: []string{"/ko-app/gsutil"}, Args: []string{"-args", "cp gs://fake-bucket/rules.zip /workspace/gcs-input-resource"}, }}, }, }, { desc: "no inputs", task: &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ Name: "get-storage", Namespace: "marshmallow", }, }, taskRun: &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "get-storage-run", Namespace: "marshmallow", }, }, wantErr: false, want: &v1alpha1.TaskSpec{}, }, { desc: "storage resource as input", task: &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ Name: "get-storage", Namespace: "marshmallow", }, Spec: v1alpha1.TaskSpec{ Inputs: gcsStorageInputs, }, }, taskRun: &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "get-storage-run", Namespace: "marshmallow", }, Spec: v1alpha1.TaskRunSpec{ Inputs: v1alpha1.TaskRunInputs{ Resources: []v1alpha1.TaskResourceBinding{{ Name: "gcs-input-resource", ResourceRef: v1alpha1.PipelineResourceRef{ Name: "storage-gcs-keys", }, }}, }, }, }, wantErr: false, want: &v1alpha1.TaskSpec{ Inputs: gcsStorageInputs, Steps: []corev1.Container{{ Name: "create-dir-storage-gcs-keys-9l9zj", Image: "override-with-bash-noop:latest", Command: []string{"/ko-app/bash"}, Args: []string{"-args", "mkdir -p /workspace/gcs-input-resource"}, }, { Name: "fetch-storage-gcs-keys-mz4c7", Image: "override-with-gsutil-image:latest", Command: []string{"/ko-app/gsutil"}, Args: []string{"-args", "rsync -d -r gs://fake-bucket/rules.zip /workspace/gcs-input-resource"}, VolumeMounts: []corev1.VolumeMount{ {Name: "volume-storage-gcs-keys-secret-name", MountPath: "/var/secret/secret-name"}, }, Env: []corev1.EnvVar{ {Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/secret/secret-name/key.json"}, }, }}, Volumes: []corev1.Volume{{ Name: "volume-storage-gcs-keys-secret-name", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "secret-name"}}, }, { Name: "volume-storage-gcs-keys-secret-name2", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "secret-name2"}}, }}, }, }} { t.Run(c.desc, func(t *testing.T) { names.TestingSeed() setUp(t) fakekubeclient := fakek8s.NewSimpleClientset() got, err := AddInputResource(fakekubeclient, c.task.Name, &c.task.Spec, c.taskRun, mockResolveTaskResources(c.taskRun), logger) if (err != nil) != c.wantErr { t.Errorf("Test: %q; AddInputResource() error = %v, WantErr %v", c.desc, err, c.wantErr) } if d := cmp.Diff(got, c.want); d != "" { t.Errorf("Diff:\n%s", d) } }) } }
explode_data.jsonl/28010
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2381 }
[ 2830, 3393, 5793, 2505, 4783, 1155, 353, 8840, 836, 8, 341, 3174, 4837, 5793, 31946, 1669, 609, 85, 16, 7141, 16, 16130, 82, 515, 197, 197, 11277, 25, 3056, 85, 16, 7141, 16, 28258, 4783, 90, 515, 298, 21297, 25, 330, 70, 4837, 13...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestMSSQLStmtAndRows(t *testing.T) { db, sc, err := mssqlConnect() if err != nil { t.Fatal(err) } defer func() { // not checking resources usage here, because these are // unpredictable due to use of goroutines. err := db.Close() if err != nil { t.Fatalf("error closing DB: %v", err) } }() var staff = map[string][]string{ "acc": {"John", "Mary", "Moe"}, "eng": {"Bar", "Foo", "Uno"}, "sls": {"Scrudge", "Sls2", "Sls3"}, } db.Exec("drop table dbo.temp") exec(t, db, "create table dbo.temp (dept char(3), name varchar(20))") func() { // test 1 Stmt and many Exec's s, err := db.Prepare("insert into dbo.temp (dept, name) values (?, ?)") if err != nil { t.Fatal(err) } defer s.Close() for dept, people := range staff { for _, person := range people { _, err := s.Exec(dept, person) if err != nil { t.Fatal(err) } } } }() func() { // test Stmt is closed before Rows are s, err := db.Prepare("select name from dbo.temp") if err != nil { t.Fatal(err) } r, err := s.Query() if err != nil { s.Close() t.Fatal(err) } defer r.Close() // TODO(brainman): dangling statement(s) bug reported // https://github.com/golang/go/issues/3865 err = s.Close() if err != nil { t.Fatal(err) } n := 0 for r.Next() { var name string err = r.Scan(&name) if err != nil { t.Fatal(err) } n++ } err = r.Err() if err != nil { t.Fatal(err) } const should = 9 if n != should { t.Fatalf("expected %v, but received %v", should, n) } }() if db.Driver().(*Driver).Stats.StmtCount != sc { t.Fatalf("invalid statement count: expected %v, is %v", sc, db.Driver().(*Driver).Stats.StmtCount) } // no resource tracking past this point func() { // test 1 Stmt and many Query's executed one after the other s, err := db.Prepare("select name from dbo.temp where dept = ? order by name") if err != nil { t.Fatal(err) } defer s.Close() for dept, people := range staff { func() { r, err := s.Query(dept) if err != nil { t.Fatal(err) } defer r.Close() i := 0 for r.Next() { var is string err = r.Scan(&is) if err != nil { t.Fatal(err) } if people[i] != is { t.Fatalf("expected %v, but received %v", people[i], is) } i++ } err = r.Err() if err != nil { t.Fatal(err) } }() } // test 1 Stmt and many simultaneous Query's eof := fmt.Errorf("eof") ch := make(map[string]chan error) for dept, people := range staff { c := make(chan error) go func(c chan error, dept string, people []string) { c <- nil // NOTE(brainman): this could actually re-prepare since // we are running it simultaneously in multiple goroutines r, err := s.Query(dept) if err != nil { c <- fmt.Errorf("%v", err) return } defer r.Close() i := 0 c <- nil for r.Next() { var is string c <- nil err = r.Scan(&is) if err != nil { c <- fmt.Errorf("%v", err) return } c <- nil if people[i] != is { c <- fmt.Errorf("expected %v, but received %v", people[i], is) return } i++ } err = r.Err() if err != nil { c <- fmt.Errorf("%v", err) return } c <- eof }(c, dept, people) ch[dept] = c } for len(ch) > 0 { for dept, c := range ch { err := <-c if err != nil { if err != eof { t.Errorf("dept=%v: %v", dept, err) } delete(ch, dept) } } } }() exec(t, db, "drop table dbo.temp") }
explode_data.jsonl/33550
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1749 }
[ 2830, 3393, 44, 1220, 3588, 31063, 3036, 9024, 1155, 353, 8840, 836, 8, 341, 20939, 11, 1136, 11, 1848, 1669, 296, 79713, 14611, 741, 743, 1848, 961, 2092, 341, 197, 3244, 26133, 3964, 340, 197, 532, 16867, 2915, 368, 341, 197, 197, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestAA(t *testing.T) { var s []int for i := 0; i < 100; i++ { s = append(s, i) } const batch = 13 for i := 0; i < len(s); i += batch { var page []int FillPage(&page, s, i, batch) fmt.Println("got:", page) } }
explode_data.jsonl/64043
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 109 }
[ 2830, 3393, 6029, 1155, 353, 8840, 836, 8, 341, 2405, 274, 3056, 396, 198, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 16, 15, 15, 26, 600, 1027, 341, 197, 1903, 284, 8737, 1141, 11, 600, 340, 197, 630, 4777, 7162, 284, 220, 16...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDecodeString(t *testing.T) { for _, pair := range pairs { got, err := DefaultCodec.DecodeString(pair.encoded) if err != nil { t.Errorf("Decode(%q) error, %v", pair.encoded, err) } if !bytes.Equal(got, []byte(pair.decoded)) { t.Errorf("Decode(%q) = %q, want %q", pair.encoded, got, pair.decoded) } } }
explode_data.jsonl/17244
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 147 }
[ 2830, 3393, 32564, 703, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 6716, 1669, 2088, 13530, 341, 197, 3174, 354, 11, 1848, 1669, 7899, 36913, 56372, 703, 39144, 13, 19329, 340, 197, 743, 1848, 961, 2092, 341, 298, 3244, 13080, 445, 325...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestCheckStringSetWithInteger(t *testing.T) { l := lua.NewState() lua.DoString(l, `x = 5`) l.Global("x") defer func() { if x := recover(); x == nil { t.Fatal("Didn't panic") } }() checkStringSet(l, -1) }
explode_data.jsonl/40380
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 103 }
[ 2830, 3393, 3973, 703, 1649, 2354, 3486, 1155, 353, 8840, 836, 8, 341, 8810, 1669, 20357, 7121, 1397, 2822, 44822, 33596, 703, 2333, 11, 1565, 87, 284, 220, 20, 24183, 8810, 27381, 445, 87, 5130, 16867, 2915, 368, 341, 197, 743, 856, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestDirect(t *testing.T) { cli := createTestClient(t, "direct://default/127.0.0.1:18003,127.0.0.1:18002") count := 0 for i := 0; i < 10; i++ { if resp, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { t.Fatalf("TestDirect: SayHello failed!err:=%v", err) } else { if resp.Message == "server2" { count++ } } } if count != 10 { t.Fatalf("TestDirect: get server2 times must be 10") } }
explode_data.jsonl/25610
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 201 }
[ 2830, 3393, 16027, 1155, 353, 8840, 836, 8, 341, 86448, 1669, 1855, 2271, 2959, 1155, 11, 330, 19798, 1110, 2258, 14, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, 16, 23, 15, 15, 18, 11, 16, 17, 22, 13, 15, 13, 15, 13, 16, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestHasStep(t *testing.T) { tc := newFakeTableClient() _, flowChan := tc.t.Start() defer tc.t.Stop() for tc.t.State() != common.RunningState { time.Sleep(100 * time.Millisecond) } flowChan <- newICMPFlow(222) flowChan <- newICMPFlow(444) time.Sleep(time.Second) query := `G.Flows().Has("ICMP.ID", 222)` res := execTraversalQuery(t, tc, query) if len(res.Values()) != 1 { t.Fatalf("Should return 1 result, returned: %v", res.Values()) } query = `G.Flows().Has("ICMP.ID", NE(555))` res = execTraversalQuery(t, tc, query) if len(res.Values()) != 2 { t.Fatalf("Should return Z result, returned: %v", res.Values()) } query = `G.Flows().Has("ICMP.ID", NE(555)).Limit(1)` res = execTraversalQuery(t, tc, query) if len(res.Values()) != 1 { t.Fatalf("Should return 1 result, returned: %v", res.Values()) } }
explode_data.jsonl/45638
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 340 }
[ 2830, 3393, 10281, 8304, 1155, 353, 8840, 836, 8, 341, 78255, 1669, 501, 52317, 2556, 2959, 2822, 197, 6878, 6396, 46019, 1669, 17130, 734, 12101, 741, 16867, 17130, 734, 30213, 741, 2023, 17130, 734, 18942, 368, 961, 4185, 2013, 11216, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestCommitFile_String(t *testing.T) { v := CommitFile{ SHA: String(""), Filename: String(""), Additions: Int(0), Deletions: Int(0), Changes: Int(0), Status: String(""), Patch: String(""), BlobURL: String(""), RawURL: String(""), ContentsURL: String(""), PreviousFilename: String(""), } want := `github.CommitFile{SHA:"", Filename:"", Additions:0, Deletions:0, Changes:0, Status:"", Patch:"", BlobURL:"", RawURL:"", ContentsURL:"", PreviousFilename:""}` if got := v.String(); got != want { t.Errorf("CommitFile.String = %v, want %v", got, want) } }
explode_data.jsonl/33228
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 313 }
[ 2830, 3393, 33441, 1703, 31777, 1155, 353, 8840, 836, 8, 341, 5195, 1669, 9205, 1703, 515, 197, 7568, 17020, 25, 1060, 923, 445, 4461, 197, 12727, 4033, 25, 260, 923, 445, 4461, 197, 37972, 5930, 25, 286, 1333, 7, 15, 1326, 197, 197...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestArithmetic(t *testing.T) { record := "{x:10 (int32),f:2.5} (=0)" // Test integer arithmetic testSuccessful(t, "100 + 23", record, zint64(123)) testSuccessful(t, "x + 5", record, zint64(15)) testSuccessful(t, "5 + x", record, zint64(15)) testSuccessful(t, "x - 5", record, zint64(5)) testSuccessful(t, "0 - x", record, zint64(-10)) testSuccessful(t, "x + 5 - 3", record, zint64(12)) testSuccessful(t, "x*2", record, zint64(20)) testSuccessful(t, "5*x*2", record, zint64(100)) testSuccessful(t, "x/3", record, zint64(3)) testSuccessful(t, "20/x", record, zint64(2)) // Test precedence of arithmetic operations testSuccessful(t, "x + 1 * 10", record, zint64(20)) testSuccessful(t, "(x + 1) * 10", record, zint64(110)) // Test arithmetic with floats testSuccessful(t, "f + 1.0", record, zfloat64(3.5)) testSuccessful(t, "1.0 + f", record, zfloat64(3.5)) testSuccessful(t, "f - 1.0", record, zfloat64(1.5)) testSuccessful(t, "0.0 - f", record, zfloat64(-2.5)) testSuccessful(t, "f * 1.5", record, zfloat64(3.75)) testSuccessful(t, "1.5 * f", record, zfloat64(3.75)) testSuccessful(t, "f / 1.25", record, zfloat64(2.0)) testSuccessful(t, "5.0 / f", record, zfloat64(2.0)) width := func(id int) int { switch id { case zed.IDInt8, zed.IDUint8: return 8 case zed.IDInt16, zed.IDUint16: return 16 case zed.IDInt32, zed.IDUint32: return 32 case zed.IDInt64, zed.IDUint64: return 64 } panic("width") } signed := func(width int) zed.Type { switch width { case 8: return zed.TypeInt8 case 16: return zed.TypeInt16 case 32: return zed.TypeInt32 case 64: return zed.TypeInt64 } panic("signed") } unsigned := func(width int) zed.Type { switch width { case 8: return zed.TypeUint8 case 16: return zed.TypeUint16 case 32: return zed.TypeUint32 case 64: return zed.TypeUint64 } panic("signed") } // Test arithmetic between integer types iresult := func(t1, t2 string, v uint64) zed.Value { typ1 := zed.LookupPrimitive(t1) typ2 := zed.LookupPrimitive(t2) id1 := typ1.ID() id2 := typ2.ID() sign1 := zed.IsSigned(id1) sign2 := zed.IsSigned(id2) sign := true if sign1 == sign2 { sign = sign1 } w := width(id1) if w2 := width(id2); w2 > w { w = w2 } if sign { return zed.Value{signed(w), zed.AppendInt(nil, int64(v))} } return zed.Value{unsigned(w), zed.AppendUint(nil, v)} } var intTypes = []string{"int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64"} for _, t1 := range intTypes { for _, t2 := range intTypes { record := fmt.Sprintf("{a:4 (%s),b:2 (%s)} (=0)", t1, t2) testSuccessful(t, "a + b", record, iresult(t1, t2, 6)) testSuccessful(t, "b + a", record, iresult(t1, t2, 6)) testSuccessful(t, "a - b", record, iresult(t1, t2, 2)) testSuccessful(t, "a * b", record, iresult(t1, t2, 8)) testSuccessful(t, "b * a", record, iresult(t1, t2, 8)) testSuccessful(t, "a / b", record, iresult(t1, t2, 2)) testSuccessful(t, "b / a", record, iresult(t1, t2, 0)) } // Test arithmetic mixing float + int record = fmt.Sprintf("{x:10 (%s),f:2.5} (=0)", t1) testSuccessful(t, "f + 5", record, zfloat64(7.5)) testSuccessful(t, "5 + f", record, zfloat64(7.5)) testSuccessful(t, "f + x", record, zfloat64(12.5)) testSuccessful(t, "x + f", record, zfloat64(12.5)) testSuccessful(t, "x - f", record, zfloat64(7.5)) testSuccessful(t, "f - x", record, zfloat64(-7.5)) testSuccessful(t, "x*f", record, zfloat64(25.0)) testSuccessful(t, "f*x", record, zfloat64(25.0)) testSuccessful(t, "x/f", record, zfloat64(4.0)) testSuccessful(t, "f/x", record, zfloat64(0.25)) } // Test string concatenation testSuccessful(t, `"hello" + " world"`, record, zstring("hello world")) // Test string arithmetic other than + fails testSuccessful(t, `"hello" - " world"`, record, ZSON(`error("type string incompatible with '-' operator")`)) testSuccessful(t, `"hello" * " world"`, record, ZSON(`error("type string incompatible with '*' operator")`)) testSuccessful(t, `"hello" / " world"`, record, ZSON(`error("type string incompatible with '/' operator")`)) // Test that addition fails on an unsupported type testSuccessful(t, "10.1.1.1 + 1", record, ZSON(`error("incompatible types")`)) testSuccessful(t, "10.1.1.1 + 3.14159", record, ZSON(`error("incompatible types")`)) testSuccessful(t, `10.1.1.1 + "foo"`, record, ZSON(`error("incompatible types")`)) }
explode_data.jsonl/2311
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1953 }
[ 2830, 3393, 6953, 25922, 1155, 353, 8840, 836, 8, 341, 71952, 1669, 13868, 87, 25, 16, 15, 320, 396, 18, 17, 701, 69, 25, 17, 13, 20, 92, 38738, 15, 52294, 197, 322, 3393, 7546, 34784, 198, 18185, 36374, 1155, 11, 330, 16, 15, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestIssue2542Deadlock(t *testing.T) { db := newTestDB(t, "people") closeDB(t, db) for i := 0; i < 2; i++ { _, err := db.Query("SELECT|people|age,name|") if err == nil { t.Fatalf("expected error") } } }
explode_data.jsonl/15985
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 100 }
[ 2830, 3393, 42006, 17, 20, 19, 17, 28320, 1023, 1155, 353, 8840, 836, 8, 341, 20939, 1669, 501, 2271, 3506, 1155, 11, 330, 16069, 1138, 27873, 3506, 1155, 11, 2927, 340, 2023, 600, 1669, 220, 15, 26, 600, 366, 220, 17, 26, 600, 10...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRegionTLD(t *testing.T) { for _, tt := range []struct { in, out string ok bool }{ {"EH", "EH", true}, {"FR", "FR", true}, {"TL", "TL", true}, // In ccTLD before in ISO. {"GG", "GG", true}, // Non-standard assignment of ccTLD to ISO code. {"GB", "UK", true}, // Exceptionally reserved in ISO and valid ccTLD. {"UK", "UK", true}, {"AC", "AC", true}, {"EU", "EU", true}, {"SU", "SU", true}, // Exceptionally reserved in ISO and invalid ccTLD. {"CP", "ZZ", false}, {"DG", "ZZ", false}, {"EA", "ZZ", false}, {"FX", "ZZ", false}, {"IC", "ZZ", false}, {"TA", "ZZ", false}, // Transitionally reserved in ISO (e.g. deprecated) but valid ccTLD as // it is still being phased out. {"AN", "AN", true}, {"TP", "TP", true}, // Transitionally reserved in ISO (e.g. deprecated) and invalid ccTLD. // Defined in package language as it has a mapping in CLDR. {"BU", "ZZ", false}, {"CS", "ZZ", false}, {"NT", "ZZ", false}, {"YU", "ZZ", false}, {"ZR", "ZZ", false}, // Not defined in package: SF. // Indeterminately reserved in ISO. // Defined in package language as it has a legacy mapping in CLDR. {"DY", "ZZ", false}, {"RH", "ZZ", false}, {"VD", "ZZ", false}, // Not defined in package: EW, FL, JA, LF, PI, RA, RB, RC, RI, RL, RM, // RN, RP, WG, WL, WV, and YV. // Not assigned in ISO, but legacy definitions in CLDR. {"DD", "ZZ", false}, {"YD", "ZZ", false}, // Normal mappings but somewhat special status in ccTLD. {"BL", "BL", true}, {"MF", "MF", true}, {"BV", "BV", true}, {"SJ", "SJ", true}, // Have values when normalized, but not as is. {"QU", "ZZ", false}, // ISO Private Use. {"AA", "ZZ", false}, {"QM", "ZZ", false}, {"QO", "ZZ", false}, {"XA", "ZZ", false}, {"XK", "ZZ", false}, // Sometimes used for Kosovo, but invalid ccTLD. } { if tt.in == "" { continue } r := MustParseRegion(tt.in) var want Region if tt.out != "ZZ" { want = MustParseRegion(tt.out) } tld, err := r.TLD() if got := err == nil; got != tt.ok { t.Errorf("error(%v): got %v; want %v", r, got, tt.ok) } if tld != want { t.Errorf("TLD(%v): got %v; want %v", r, tld, want) } } }
explode_data.jsonl/15843
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 943 }
[ 2830, 3393, 14091, 51, 12335, 1155, 353, 8840, 836, 8, 341, 2023, 8358, 17853, 1669, 2088, 3056, 1235, 341, 197, 17430, 11, 700, 914, 198, 197, 59268, 414, 1807, 198, 197, 59403, 197, 197, 4913, 45589, 497, 330, 45589, 497, 830, 1583,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
6
func TestOpenCollection(t *testing.T) { cleanup := setup.FakeGCPDefaultCredentials(t) defer cleanup() tests := []struct { URL string WantErr bool }{ // OK. {"firestore://myproject/mycoll?name_field=_id", false}, // OK, hierarchical collection. {"firestore://myproject/mycoll/mydoc/subcoll?name_field=_id", false}, // Missing project ID. {"firestore:///mycoll?name_field=_id", true}, // Empty collection. {"firestore://myproject/", true}, // Missing name field. {"firestore://myproject/mycoll", true}, // Invalid param. {"firestore://myproject/mycoll?name_field=_id&param=value", true}, } ctx := context.Background() for _, test := range tests { _, err := docstore.OpenCollection(ctx, test.URL) if (err != nil) != test.WantErr { t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr) } } }
explode_data.jsonl/38990
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 330 }
[ 2830, 3393, 5002, 6482, 1155, 353, 8840, 836, 8, 341, 1444, 60639, 1669, 6505, 991, 726, 38, 7123, 3675, 27025, 1155, 340, 16867, 21290, 2822, 78216, 1669, 3056, 1235, 341, 197, 79055, 257, 914, 198, 197, 17300, 517, 7747, 1807, 198, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestCommandsExactlyOneArgument(t *testing.T) { commands := []commandWithFunction{ {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, {"USER", func(args []string) error { return user(nil, args, nil, "") }}, {"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}} for _, command := range commands { err := command.function([]string{}) if err == nil { t.Fatalf("Error should be present for %s command", command.name) } expectedError := errExactlyOneArgument(command.name) if err.Error() != expectedError.Error() { t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) } } }
explode_data.jsonl/28268
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 309 }
[ 2830, 3393, 30479, 65357, 3966, 9171, 1155, 353, 8840, 836, 8, 341, 197, 24270, 1669, 3056, 5631, 2354, 5152, 515, 197, 197, 4913, 4835, 3221, 34521, 497, 2915, 7356, 3056, 917, 8, 1465, 314, 470, 4981, 1743, 27907, 11, 2827, 11, 2092...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestParsePlainText(t *testing.T) { sqlFile := filepath.Join(t.TempDir(), "tmp.sql") err := ioutil.WriteFile(sqlFile, []byte("plain text"), 0o777) assert.Nil(t, err) _, err = Parse(sqlFile, "go_zero") assert.NotNil(t, err) }
explode_data.jsonl/25572
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 101 }
[ 2830, 3393, 14463, 73248, 1155, 353, 8840, 836, 8, 341, 30633, 1703, 1669, 26054, 22363, 1155, 65009, 6184, 1507, 330, 5173, 10045, 1138, 9859, 1669, 43144, 4073, 1703, 13148, 1703, 11, 3056, 3782, 445, 20772, 1467, 3975, 220, 15, 78, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestPerStmtTaskID(t *testing.T) { store, clean := realtikvtest.CreateMockStoreAndSetup(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table task_id (v int)") tk.MustExec("begin") tk.MustExec("select * from task_id where v > 10") taskID1 := tk.Session().GetSessionVars().StmtCtx.TaskID tk.MustExec("select * from task_id where v < 5") taskID2 := tk.Session().GetSessionVars().StmtCtx.TaskID tk.MustExec("commit") require.NotEqual(t, taskID1, taskID2) }
explode_data.jsonl/5755
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 204 }
[ 2830, 3393, 3889, 31063, 6262, 915, 1155, 353, 8840, 836, 8, 341, 57279, 11, 4240, 1669, 1931, 83, 1579, 85, 1944, 7251, 11571, 6093, 3036, 21821, 1155, 340, 16867, 4240, 2822, 3244, 74, 1669, 1273, 8226, 7121, 2271, 7695, 1155, 11, 3...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMuxMiddlewareStack(t *testing.T) { var stdmwInit, stdmwHandler uint64 stdmw := func(next Handler) Handler { stdmwInit++ return HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { stdmwHandler++ next.ServeHTTP(ctx, rc) }) } _ = stdmw var ctxmwInit, ctxmwHandler uint64 ctxmw := func(next Handler) Handler { ctxmwInit++ return HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { ctxmwHandler++ ctx = context.WithValue(ctx, ctxKey{"count.ctxmwHandler"}, ctxmwHandler) next.ServeHTTP(ctx, rc) }) } var inCtxmwInit, inCtxmwHandler uint64 inCtxmw := func(next Handler) Handler { inCtxmwInit++ return HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { inCtxmwHandler++ next.ServeHTTP(ctx, rc) }) } r := NewRouter() r.Use(stdmw) r.Use(ctxmw) r.Use(func(next Handler) Handler { return HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { if string(rc.Request.URI().Path()) == "/ping" { rc.Write([]byte("pong")) return } next.ServeHTTP(ctx, rc) }) }) var handlerCount uint64 r.With(inCtxmw).Get("/", HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { handlerCount++ ctxmwHandlerCount := ctx.Value(ctxKey{"count.ctxmwHandler"}).(uint64) rc.Write([]byte(fmt.Sprintf("inits:%d reqs:%d ctxValue:%d", ctxmwInit, handlerCount, ctxmwHandlerCount))) })) r.Get("/hi", HandlerFunc(func(ctx context.Context, rc *fasthttp.RequestCtx) { rc.Write([]byte("wooot")) })) ts := NewTestServer(r) defer ts.Close() testRequest(t, ts, "GET", "/", nil) testRequest(t, ts, "GET", "/", nil) var body string _, body = testRequest(t, ts, "GET", "/", nil) if body != "inits:1 reqs:3 ctxValue:3" { t.Fatalf("got: '%s'", body) } _, body = testRequest(t, ts, "GET", "/ping", nil) if body != "pong" { t.Fatalf("got: '%s'", body) } }
explode_data.jsonl/47949
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 827 }
[ 2830, 3393, 44, 2200, 24684, 4336, 1155, 353, 8840, 836, 8, 341, 2405, 1460, 44128, 3803, 11, 1460, 44128, 3050, 2622, 21, 19, 198, 6736, 44128, 1669, 2915, 16913, 19954, 8, 19954, 341, 197, 6736, 44128, 3803, 22940, 197, 853, 19954, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestListProcessor(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) mo := mock_owner.NewMockOwner(ctrl) cp := capture.NewCapture4Test(mo) router := newRouter(cp, newStatusProvider()) // test list processor succeeded api := testCase{url: "/api/v1/processors", method: "GET"} w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(context.Background(), api.method, api.url, nil) router.ServeHTTP(w, req) require.Equal(t, 200, w.Code) var resp []model.ProcessorCommonInfo err := json.NewDecoder(w.Body).Decode(&resp) require.Nil(t, err) require.Equal(t, changeFeedID, resp[0].CfID) }
explode_data.jsonl/75120
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 237 }
[ 2830, 3393, 852, 22946, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 84381, 1669, 342, 316, 1176, 7121, 2051, 1155, 340, 2109, 78, 1669, 7860, 29027, 7121, 11571, 13801, 62100, 340, 52018, 1669, 12322, 7121, 27429, 19, 2271, 12...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChannelBadBlocks(t *testing.T) { t.Parallel() receivedMessages := make(chan *proto.SignedGossipMessage, 1) cs := &cryptoService{} cs.On("VerifyBlock", mock.Anything).Return(nil) adapter := new(gossipAdapterMock) configureAdapter(adapter, discovery.NetworkMember{PKIid: pkiIDInOrg1}) adapter.On("Gossip", mock.Anything) adapter.On("Forward", mock.Anything) gc := NewGossipChannel(pkiIDInOrg1, orgInChannelA, cs, channelA, adapter, &joinChanMsg{}, disabledMetrics) adapter.On("DeMultiplex", mock.Anything).Run(func(args mock.Arguments) { receivedMessages <- args.Get(0).(*proto.SignedGossipMessage) }) // Send a valid block gc.HandleMessage(&receivedMsg{msg: createDataMsg(1, channelA), PKIID: pkiIDInOrg1}) assert.Len(t, receivedMessages, 1) <-receivedMessages // drain // Send a block with wrong channel gc.HandleMessage(&receivedMsg{msg: createDataMsg(2, common.ChainID("B")), PKIID: pkiIDInOrg1}) assert.Len(t, receivedMessages, 0) // Send a block with empty payload dataMsg := createDataMsg(3, channelA) dataMsg.GetDataMsg().Payload = nil gc.HandleMessage(&receivedMsg{msg: dataMsg, PKIID: pkiIDInOrg1}) assert.Len(t, receivedMessages, 0) // Send a block with a bad signature cs.Mock = mock.Mock{} cs.On("VerifyBlock", mock.Anything).Return(errors.New("Bad signature")) gc.HandleMessage(&receivedMsg{msg: createDataMsg(4, channelA), PKIID: pkiIDInOrg1}) assert.Len(t, receivedMessages, 0) }
explode_data.jsonl/66323
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 516 }
[ 2830, 3393, 9629, 17082, 29804, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 741, 17200, 8771, 15820, 1669, 1281, 35190, 353, 15110, 808, 1542, 38, 41473, 2052, 11, 220, 16, 340, 71899, 1669, 609, 35772, 1860, 16094, 71899, 8071, 44...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestBuildDeferredAnotherBuilding(t *testing.T) { log.Printf("In TestBuildDeferredAnotherBuilding()") var bucketName = "default" var index1 = "id_company" var index2 = "id_age" var index3 = "id_age1" e := secondaryindex.DropAllSecondaryIndexes(indexManagementAddress) FailTestIfError(e, "Error in DropAllSecondaryIndexes", t) docsToCreate := generateDocs(200000, "users.prod") UpdateKVDocs(docsToCreate, docs) log.Printf("Setting JSON docs in KV") kvutility.SetKeyValues(docsToCreate, "default", "", clusterconfig.KVAddress) err := secondaryindex.CreateSecondaryIndexAsync(index1, bucketName, indexManagementAddress, "", []string{"company"}, false, []byte("{\"defer_build\": true}"), true, nil) FailTestIfError(err, "Error in creating the index", t) err = secondaryindex.CreateSecondaryIndexAsync(index2, bucketName, indexManagementAddress, "", []string{"age"}, false, []byte("{\"defer_build\": true}"), true, nil) FailTestIfError(err, "Error in creating the index", t) err = secondaryindex.CreateSecondaryIndexAsync(index3, bucketName, indexManagementAddress, "", []string{"age"}, false, []byte("{\"defer_build\": true}"), true, nil) FailTestIfError(err, "Error in creating the index", t) err = secondaryindex.BuildIndex(index3, bucketName, indexManagementAddress, defaultIndexActiveTimeout) if err != nil { FailTestIfError(e, "Error in TestBuildDeferredAnotherBuilding", t) } client, err := secondaryindex.GetOrCreateClient(indexManagementAddress, "test7client") FailTestIfError(err, "Error in TestBuildDeferredAnotherBuilding while creating client", t) defn1, _ := secondaryindex.GetDefnID(client, bucketName, index1) err = secondaryindex.BuildIndexesAsync([]uint64{defn1}, indexManagementAddress, defaultIndexActiveTimeout) FailTestIfError(err, "Error from BuildIndexesAsync of index1", t) time.Sleep(100 * time.Millisecond) err = secondaryindex.BuildIndex(index2, bucketName, indexManagementAddress, defaultIndexActiveTimeout) if err == nil { e := errors.New("Error excpected when build index while another build is in progress") FailTestIfError(e, "Error in TestBuildDeferredAnotherBuilding", t) } else { if strings.Contains(err.Error(), "retry building in the background") { log.Printf("Build index failed as expected: %v", err.Error()) } else { log.Printf("Build index did not fail with expected error, instead failed with %v", err) e := errors.New("Build index did not fail") FailTestIfError(e, "Error in TestBuildDeferredAnotherBuilding", t) } } defnID, _ := secondaryindex.GetDefnID(client, bucketName, index1) e = secondaryindex.WaitTillIndexActive(defnID, client, defaultIndexActiveTimeout) if e != nil { FailTestIfError(e, "Error in WaitTillIndexActive", t) } // comment out this test since it depends on timing on when indexer will retry rebuilding index //time.Sleep(1 * time.Second) //err = secondaryindex.BuildIndex(index2, bucketName, indexManagementAddress, defaultIndexActiveTimeout) //FailTestIfNoError(err, "Index2 is expected to build in background. Expected failure when trying to build index2 explicitly, but no failure returned.", t) defnID2, _ := secondaryindex.GetDefnID(client, bucketName, index2) secondaryindex.WaitTillIndexActive(defnID2, client, defaultIndexActiveTimeout) docScanResults := datautility.ExpectedScanResponse_string(docs, "company", "M", "V", 2) scanResults, err := secondaryindex.Range(index1, bucketName, indexScanAddress, []interface{}{"M"}, []interface{}{"V"}, 2, false, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) docScanResults = datautility.ExpectedScanResponse_int64(docs, "age", 30, 50, 1) scanResults, err = secondaryindex.Range(index2, bucketName, indexScanAddress, []interface{}{30}, []interface{}{50}, 1, false, defaultlimit, c.SessionConsistency, nil) FailTestIfError(err, "Error in scan", t) err = tv.Validate(docScanResults, scanResults) FailTestIfError(err, "Error in scan result validation", t) }
explode_data.jsonl/59131
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1274 }
[ 2830, 3393, 11066, 88417, 14037, 30133, 1155, 353, 8840, 836, 8, 341, 6725, 19367, 445, 641, 3393, 11066, 88417, 14037, 30133, 368, 5130, 2405, 15621, 675, 284, 330, 2258, 698, 2405, 1922, 16, 284, 330, 307, 33403, 698, 2405, 1922, 17, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestConfigureLocator(t *testing.T) { d := initTestSpheroDriver() d.ConfigureLocator(DefaultLocatorConfig()) data := <-d.packetChannel buf := new(bytes.Buffer) binary.Write(buf, binary.BigEndian, DefaultLocatorConfig()) gobottest.Assert(t, data.body, buf.Bytes()) ret := d.Command("ConfigureLocator")( map[string]interface{}{ "Flags": 1.0, "X": 100.0, "Y": 100.0, "YawTare": 0.0, }, ) gobottest.Assert(t, ret, nil) data = <-d.packetChannel lconfig := LocatorConfig{Flags: 1, X: 100, Y: 100, YawTare: 0} buf = new(bytes.Buffer) binary.Write(buf, binary.BigEndian, lconfig) gobottest.Assert(t, data.body, buf.Bytes()) }
explode_data.jsonl/7292
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 286 }
[ 2830, 3393, 28560, 33831, 1155, 353, 8840, 836, 8, 341, 2698, 1669, 2930, 2271, 50, 759, 2328, 11349, 741, 2698, 78281, 33831, 87874, 33831, 2648, 2398, 8924, 1669, 9119, 67, 67139, 9629, 271, 26398, 1669, 501, 23158, 22622, 340, 2233, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestGetBoolean(t *testing.T) { config := &Config{Object{ "a": Boolean(true), "b": Boolean(false), "c": String("true"), "d": String("yes"), "e": String("on"), "f": String("false"), "g": String("no"), "h": String("off"), "i": String("aa"), "j": Array{Int(5)}, }} t.Run("return zero value(false) for a non-existing boolean", func(t *testing.T) { assertEquals(t, config.GetBoolean("z"), false) }) t.Run("panic if the value is a string that can not be converted to boolean", func(t *testing.T) { assertPanic(t, func() { config.GetBoolean("i") }) }) t.Run("panic if the value is not a boolean or string", func(t *testing.T) { assertPanic(t, func() { config.GetBoolean("j") }) }) var booleanTestCases = []struct { path string expected bool }{ {"a", true}, {"b", false}, {"c", true}, {"d", true}, {"e", true}, {"f", false}, {"g", false}, {"h", false}, } for _, tc := range booleanTestCases { t.Run(tc.path, func(t *testing.T) { assertEquals(t, config.GetBoolean(tc.path), tc.expected) }) } }
explode_data.jsonl/4125
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 443 }
[ 2830, 3393, 1949, 6890, 1155, 353, 8840, 836, 8, 341, 25873, 1669, 609, 2648, 90, 1190, 515, 197, 197, 56693, 788, 6992, 3715, 1326, 197, 197, 1, 65, 788, 6992, 3576, 1326, 197, 197, 96946, 788, 923, 445, 1866, 4461, 197, 197, 44917...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTrunc(t *testing.T) { tpl := `{{ "foooooo" | trunc 3 }}` if err := runt(tpl, "foo"); err != nil { t.Error(err) } }
explode_data.jsonl/63871
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 61 }
[ 2830, 3393, 1282, 1347, 1155, 353, 8840, 836, 8, 341, 3244, 500, 1669, 1565, 2979, 330, 824, 25761, 78, 1, 760, 62850, 220, 18, 3869, 3989, 743, 1848, 1669, 1598, 83, 1155, 500, 11, 330, 7975, 5038, 1848, 961, 2092, 341, 197, 3244, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestProposerSelection3(t *testing.T) { vset := NewValidatorSet([]*Validator{ newValidator([]byte("a"), 1), newValidator([]byte("b"), 1), newValidator([]byte("c"), 1), newValidator([]byte("d"), 1), }) proposerOrder := make([]*Validator, 4) for i := 0; i < 4; i++ { proposerOrder[i] = vset.GetProposer() vset.IncrementProposerPriority(1) } // i for the loop // j for the times // we should go in order for ever, despite some IncrementProposerPriority with times > 1 var i, j int for ; i < 10000; i++ { got := vset.GetProposer().Address expected := proposerOrder[j%4].Address if !bytes.Equal(got, expected) { t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) } // serialize, deserialize, check proposer b := vset.toBytes() vset.fromBytes(b) computed := vset.GetProposer() // findGetProposer() if i != 0 { if !bytes.Equal(got, computed.Address) { t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) } } // times is usually 1 times := 1 mod := (cmn.RandInt() % 5) + 1 if cmn.RandInt()%mod > 0 { // sometimes its up to 5 times = (cmn.RandInt() % 4) + 1 } vset.IncrementProposerPriority(times) j += times } }
explode_data.jsonl/28317
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 547 }
[ 2830, 3393, 2008, 23438, 11177, 18, 1155, 353, 8840, 836, 8, 341, 5195, 746, 1669, 1532, 14256, 1649, 85288, 14256, 515, 197, 8638, 14256, 10556, 3782, 445, 64, 3975, 220, 16, 1326, 197, 8638, 14256, 10556, 3782, 445, 65, 3975, 220, 1...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestUsys(t *testing.T) { usys.Call(usys.USYS_FUNC, 1) usys.Call(usys.USYS_FUNC, 1,2) usys.Call(usys.USYS_FUNC, 1,2,3) usys.Call(usys.USYS_FUNC, 1,2,3,4) usys.Call(usys.USYS_FUNC, 1,2,3,4,5) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6,7) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6,7,8) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6,7,8,9) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6,7,8,9,10) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6,7,8,9,10,11) usys.Call(usys.USYS_FUNC, 1,2,3,4,5,6,7,8,9,10,11,12) ret := usys.Call(usys.USYS_FUNC, 1, 2, 3, 4, 5, 6, 7) expect := int64(0x0007060504030201) if ret != expect { t.Errorf("usys.Call returned the wrong value for its arguments got %x, expected %x", ret, expect) } }
explode_data.jsonl/69709
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 453 }
[ 2830, 3393, 3558, 1047, 1155, 353, 8840, 836, 8, 341, 70175, 1047, 27017, 80644, 1047, 67672, 9394, 20875, 11, 220, 16, 340, 70175, 1047, 27017, 80644, 1047, 67672, 9394, 20875, 11, 220, 16, 11, 17, 340, 70175, 1047, 27017, 80644, 1047,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestResolveError(t *testing.T) { cli := &fakeClient{ nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") }, } idResolver := New(cli, false) _, err := idResolver.Resolve(context.Background(), struct{}{}, "nodeID") assert.EqualError(t, err, "unsupported type") }
explode_data.jsonl/9037
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 134 }
[ 2830, 3393, 56808, 1454, 1155, 353, 8840, 836, 8, 341, 86448, 1669, 609, 30570, 2959, 515, 197, 20831, 58533, 9626, 25, 2915, 6958, 915, 914, 8, 320, 2280, 2178, 21714, 11, 3056, 3782, 11, 1465, 8, 341, 298, 853, 60841, 21714, 22655, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChannelArbitratorAlreadyForceClosed(t *testing.T) { t.Parallel() // We'll create the arbitrator and its backing log to signal that it's // already in the process of being force closed. log := &mockArbitratorLog{ state: StateCommitmentBroadcasted, } chanArbCtx, err := createTestChannelArbitrator(t, log) if err != nil { t.Fatalf("unable to create ChannelArbitrator: %v", err) } chanArb := chanArbCtx.chanArb if err := chanArb.Start(); err != nil { t.Fatalf("unable to start ChannelArbitrator: %v", err) } defer chanArb.Stop() // Then, we'll create a request to signal a force close request to the // channel arbitrator. errChan := make(chan error, 1) respChan := make(chan *wire.MsgTx, 1) select { case chanArb.forceCloseReqs <- &forceCloseReq{ closeTx: respChan, errResp: errChan, }: case <-chanArb.quit: } // Finally, we should ensure that we are not able to do so by seeing // the expected errAlreadyForceClosed error. select { case err = <-errChan: if err != errAlreadyForceClosed { t.Fatalf("expected errAlreadyForceClosed, got %v", err) } case <-time.After(time.Second): t.Fatal("expected to receive error response") } }
explode_data.jsonl/3701
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 426 }
[ 2830, 3393, 9629, 6953, 4489, 81, 850, 38370, 18573, 26884, 1155, 353, 8840, 836, 8, 341, 3244, 41288, 7957, 2822, 197, 322, 1205, 3278, 1855, 279, 57957, 850, 323, 1181, 24668, 1487, 311, 8286, 429, 432, 594, 198, 197, 322, 2669, 304...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
8
func TestUnknownVersion(t *testing.T) { // prepare ctx := context.WithValue(context.Background(), opentelemetry.ContextLogger, logf.Log) nsn := types.NamespacedName{Name: "my-instance"} existing := &v1alpha1.OpenTelemetryCollector{ ObjectMeta: metav1.ObjectMeta{ Name: nsn.Name, Namespace: nsn.Namespace, }, } existing.Status.Version = "0.0.0" // we don't know how to upgrade from 0.0.0 objs := []runtime.Object{existing} s := scheme.Scheme s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.OpenTelemetryCollector{}, &v1alpha1.OpenTelemetryCollectorList{}, ) cl := fake.NewFakeClient(objs...) // test assert.NoError(t, ManagedInstances(ctx, cl)) // verify persisted := &v1alpha1.OpenTelemetryCollector{} assert.NoError(t, cl.Get(context.Background(), nsn, persisted)) assert.Equal(t, "0.0.0", persisted.Status.Version) }
explode_data.jsonl/44844
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 332 }
[ 2830, 3393, 13790, 5637, 1155, 353, 8840, 836, 8, 341, 197, 322, 10549, 198, 20985, 1669, 2266, 26124, 1130, 5378, 19047, 1507, 1179, 6817, 35958, 9328, 7395, 11, 1487, 69, 5247, 340, 84041, 77, 1669, 4494, 98932, 68552, 675, 63121, 25,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestChunkStoreRandom(t *testing.T) { ctx := context.Background() for _, schema := range schemas { t.Run(schema, func(t *testing.T) { store := newTestChunkStore(t, schema) defer store.Stop() // put 100 chunks from 0 to 99 const chunkLen = 2 * 3600 // in seconds for i := 0; i < 100; i++ { ts := model.TimeFromUnix(int64(i * chunkLen)) ch := encoding.New() nc, err := ch.Add(model.SamplePair{ Timestamp: ts, Value: model.SampleValue(float64(i)), }) require.NoError(t, err) require.Nil(t, nc) chunk := NewChunk( userID, model.Fingerprint(1), labels.Labels{ {Name: labels.MetricName, Value: "foo"}, {Name: "bar", Value: "baz"}, }, ch, ts, ts.Add(chunkLen*time.Second).Add(-1*time.Second), ) err = chunk.Encode() require.NoError(t, err) err = store.Put(ctx, []Chunk{chunk}) require.NoError(t, err) } // pick two random numbers and do a query for i := 0; i < 100; i++ { start := rand.Int63n(99 * chunkLen) end := start + 1 + rand.Int63n((99*chunkLen)-start) assert.True(t, start < end) startTime := model.TimeFromUnix(start) endTime := model.TimeFromUnix(end) matchers := []*labels.Matcher{ mustNewLabelMatcher(labels.MatchEqual, labels.MetricName, "foo"), mustNewLabelMatcher(labels.MatchEqual, "bar", "baz"), } chunks, err := store.Get(ctx, userID, startTime, endTime, matchers...) require.NoError(t, err) // We need to check that each chunk is in the time range for _, chunk := range chunks { assert.False(t, chunk.From.After(endTime)) assert.False(t, chunk.Through.Before(startTime)) samples, err := chunk.Samples(chunk.From, chunk.Through) assert.NoError(t, err) assert.Equal(t, 1, len(samples)) // TODO verify chunk contents } // And check we got all the chunks we want numChunks := (end / chunkLen) - (start / chunkLen) + 1 assert.Equal(t, int(numChunks), len(chunks)) } }) } }
explode_data.jsonl/43821
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 911 }
[ 2830, 3393, 28304, 6093, 13999, 1155, 353, 8840, 836, 8, 341, 20985, 1669, 2266, 19047, 2822, 2023, 8358, 10802, 1669, 2088, 61800, 341, 197, 3244, 16708, 42735, 11, 2915, 1155, 353, 8840, 836, 8, 341, 298, 57279, 1669, 501, 2271, 28304...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestFileCopyBadPath(t *testing.T) { eng := New() err := eng.Execute(`$file.copy('test', '/blah/test1');`) if err == nil { t.Fail() } }
explode_data.jsonl/36286
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 65 }
[ 2830, 3393, 1703, 12106, 17082, 1820, 1155, 353, 8840, 836, 8, 341, 197, 826, 1669, 1532, 741, 9859, 1669, 2922, 13827, 5809, 3, 1192, 12232, 492, 1944, 516, 3353, 70614, 12697, 16, 4667, 24183, 743, 1848, 621, 2092, 341, 197, 3244, 5...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
2
func TestToAntreaServices(t *testing.T) { tcpProto := v1.ProtocolTCP portNum := int32(80) tables := []struct { ports []networkingv1.NetworkPolicyPort expValues []networkpolicy.Service }{ { getK8sNetworkPolicyPorts(tcpProto), []networkpolicy.Service{ { Protocol: toAntreaProtocol(&tcpProto), Port: &portNum, }, }, }, } for _, table := range tables { services := toAntreaServices(table.ports) service := services[0] expValue := table.expValues[0] if *service.Protocol != *expValue.Protocol { t.Errorf("Unexpected Antrea Protocol in Antrea Service. Expected %v, got %v", *expValue.Protocol, *service.Protocol) } if *service.Port != *expValue.Port { t.Errorf("Unexpected Antrea Port in Antrea Service. Expected %v, got %v", *expValue.Port, *service.Port) } } }
explode_data.jsonl/82440
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 339 }
[ 2830, 3393, 1249, 17117, 5213, 11025, 1155, 353, 8840, 836, 8, 341, 3244, 4672, 31549, 1669, 348, 16, 54096, 49896, 198, 52257, 4651, 1669, 526, 18, 17, 7, 23, 15, 340, 26481, 82, 1669, 3056, 1235, 341, 197, 197, 3394, 257, 3056, 17...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
4
func TestRouteSimple(t *testing.T) { route1 := &Route{ AppID: id.New().String(), Path: "/some", Image: "foo", Memory: 128, CPUs: 100, Type: "sync", Format: "http", Timeout: 10, IdleTimeout: 10, } err := route1.Validate() if err != nil { t.Fatal("should not have failed, got: ", err) } route2 := &Route{ AppID: id.New().String(), Path: "/some", Image: "foo", Memory: 128, CPUs: 100, Type: "sync", Format: "nonsense", Timeout: 10, IdleTimeout: 10, } err = route2.Validate() if err == nil { t.Fatalf("should have failed route: %#v", route2) } }
explode_data.jsonl/56639
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 359 }
[ 2830, 3393, 4899, 16374, 1155, 353, 8840, 836, 8, 1476, 7000, 2133, 16, 1669, 609, 4899, 515, 197, 59557, 915, 25, 981, 877, 7121, 1005, 703, 3148, 197, 69640, 25, 286, 3521, 14689, 756, 197, 53397, 25, 981, 330, 7975, 756, 197, 920...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestPluginStream(t *testing.T) { grpcClientMock, teardown := setupPluginDriverTests(t) defer teardown(t) plug := plugin.NewPlugin(plugin.Config{ Cmd: "fake-plugin-command", }) defer plug.Close() data := []struct { in driver.StreamInput out driver.StreamOutput outErr error }{ { in: driver.StreamInput{}, out: driver.StreamOutput{}, }, { in: driver.StreamInput{}, out: driver.StreamOutput{ Error: &driver.Error{ Message: "", }, }, outErr: errors.New(""), }, } for _, tt := range data { grpcClientMock.On("Stream", tt.in).Return(tt.out, tt.outErr).Once() out := plug.Stream(tt.in) assert.Equal(t, tt.out, out) } grpcClientMock.AssertNumberOfCalls(t, "Stream", len(data)) }
explode_data.jsonl/37058
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 334 }
[ 2830, 3393, 11546, 3027, 1155, 353, 8840, 836, 8, 341, 197, 56585, 2959, 11571, 11, 49304, 1669, 6505, 11546, 11349, 18200, 1155, 340, 16867, 49304, 1155, 340, 197, 47474, 1669, 9006, 7121, 11546, 46801, 10753, 515, 197, 6258, 2277, 25, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestGetStore(t *testing.T) { // Given store := &mocksStore.StoreInterface{} codec := New(store) // When - Then assert.Equal(t, store, codec.GetStore()) }
explode_data.jsonl/29045
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 62 }
[ 2830, 3393, 1949, 6093, 1155, 353, 8840, 836, 8, 341, 197, 322, 16246, 198, 57279, 1669, 609, 16712, 82, 6093, 38047, 5051, 31483, 43343, 66, 1669, 1532, 31200, 692, 197, 322, 3197, 481, 5005, 198, 6948, 12808, 1155, 11, 3553, 11, 346...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestRequiredL7PolicyCreateOpts(t *testing.T) { // no param specified. res := l7policies.Create(fake.ServiceClient(), l7policies.CreateOpts{}) if res.Err == nil { t.Fatalf("Expected error, got none") } // Action is invalid. res = l7policies.Create(fake.ServiceClient(), l7policies.CreateOpts{ ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", Action: l7policies.Action("invalid"), }) if res.Err == nil { t.Fatalf("Expected error, but got none") } }
explode_data.jsonl/79632
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 199 }
[ 2830, 3393, 8164, 43, 22, 13825, 4021, 43451, 1155, 353, 8840, 836, 8, 341, 197, 322, 902, 1685, 5189, 624, 10202, 1669, 326, 22, 79, 42038, 7251, 74138, 13860, 2959, 1507, 326, 22, 79, 42038, 7251, 43451, 37790, 743, 592, 27862, 621,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestRemovePod(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) nodeName := "node" basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) tests := []struct { pod *v1.Pod wNodeInfo *NodeInfo }{{ pod: basePod, wNodeInfo: &NodeInfo{ requestedResource: &Resource{ MilliCPU: 100, Memory: 500, }, nonzeroRequest: &Resource{ MilliCPU: 100, Memory: 500, }, TransientInfo: newTransientSchedulerInfo(), allocatableResource: &Resource{}, pods: []*v1.Pod{basePod}, usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), imageStates: make(map[string]*ImageStateSummary), }, }} for i, tt := range tests { cache := newSchedulerCache(time.Second, time.Second, nil) if err := cache.AddPod(tt.pod); err != nil { t.Fatalf("AddPod failed: %v", err) } n := cache.nodes[nodeName] deepEqualWithoutGeneration(t, i, n, tt.wNodeInfo) if err := cache.RemovePod(tt.pod); err != nil { t.Fatalf("RemovePod failed: %v", err) } n = cache.nodes[nodeName] if n != nil { t.Errorf("#%d: expecting pod deleted and nil node info, get=%s", i, n) } } }
explode_data.jsonl/19652
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 605 }
[ 2830, 3393, 13021, 23527, 1155, 353, 8840, 836, 8, 341, 197, 322, 18567, 26282, 1925, 1955, 2461, 37889, 8974, 311, 653, 23831, 5101, 23757, 198, 79138, 12753, 13275, 13859, 42318, 4202, 28197, 17305, 4430, 82, 11265, 497, 4419, 1785, 497...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5
func TestFloat64ArrayScanNil(t *testing.T) { arr := Float64Array{5, 5, 5} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } }
explode_data.jsonl/5320
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 100 }
[ 2830, 3393, 5442, 21, 19, 1857, 26570, 19064, 1155, 353, 8840, 836, 8, 341, 36511, 1669, 13001, 21, 19, 1857, 90, 20, 11, 220, 20, 11, 220, 20, 532, 9859, 1669, 2890, 54874, 27907, 692, 743, 1848, 961, 2092, 341, 197, 3244, 30762, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
3
func TestDuplicateTagsWithNoMatchFuncV6(t *testing.T) { matchFunc := MatchesFunc(nil) tree := NewTreeV6() wasAdded, count, err := tree.Add(ipv6FromString("2001:db8:0:0:0:0:2:1/128", 128), "FOO", matchFunc) assert.True(t, wasAdded) assert.Equal(t, 1, count) assert.NoError(t, err) wasAdded, count, err = tree.Add(ipv6FromString("2001:db8:0:0:0:0:2:2/128", 128), "BAR", matchFunc) assert.True(t, wasAdded) assert.Equal(t, 1, count) assert.NoError(t, err) // add another at previous node wasAdded, count, err = tree.Add(ipv6FromString("2001:db8:0:0:0:0:2:2/128", 128), "FOOBAR", matchFunc) assert.True(t, wasAdded) assert.Equal(t, 2, count) assert.NoError(t, err) // add a dupe to the previous node - will be fine since match is nil wasAdded, count, err = tree.Add(ipv6FromString("2001:db8:0:0:0:0:2:2/128", 128), "BAR", matchFunc) assert.True(t, wasAdded) assert.Equal(t, 3, count) assert.NoError(t, err) }
explode_data.jsonl/79273
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 404 }
[ 2830, 3393, 53979, 15930, 2354, 2753, 8331, 9626, 53, 21, 1155, 353, 8840, 836, 8, 341, 47706, 9626, 1669, 61254, 9626, 27907, 692, 51968, 1669, 1532, 6533, 53, 21, 2822, 6692, 300, 19337, 11, 1760, 11, 1848, 1669, 4916, 1904, 23443, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestTypedef(t *testing.T) { nodes := map[string]Node{ `0x7f84d10dc1d0 '__darwin_ssize_t'`: &Typedef{ Addr: 0x7f84d10dc1d0, Type: "__darwin_ssize_t", ChildNodes: []Node{}, }, } runNodeTests(t, nodes) }
explode_data.jsonl/34281
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 135 }
[ 2830, 3393, 12834, 4219, 1155, 353, 8840, 836, 8, 341, 79756, 1669, 2415, 14032, 60, 1955, 515, 197, 197, 63, 15, 87, 22, 69, 23, 19, 67, 16, 15, 7628, 16, 67, 15, 12112, 98765, 643, 2141, 528, 6, 44622, 609, 12834, 4219, 515, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestMonitor_StoreStatistics(t *testing.T) { done := make(chan struct{}) defer close(done) ch := make(chan models.Points) var mc MetaClient mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { if got, want := name, monitor.DefaultStoreDatabase; got != want { t.Errorf("unexpected database: got=%q want=%q", got, want) } if got, want := spec.Name, monitor.MonitorRetentionPolicy; got != want { t.Errorf("unexpected retention policy: got=%q want=%q", got, want) } if spec.Duration != nil { if got, want := *spec.Duration, monitor.MonitorRetentionPolicyDuration; got != want { t.Errorf("unexpected duration: got=%q want=%q", got, want) } } else { t.Error("expected duration in retention policy spec") } if spec.ReplicaN != nil { if got, want := *spec.ReplicaN, monitor.MonitorRetentionPolicyReplicaN; got != want { t.Errorf("unexpected replica number: got=%q want=%q", got, want) } } else { t.Error("expected replica number in retention policy spec") } return &meta.DatabaseInfo{Name: name}, nil } var pw PointsWriter pw.WritePointsFn = func(database, policy string, points models.Points) error { // Verify that we are attempting to write to the correct database. if got, want := database, monitor.DefaultStoreDatabase; got != want { t.Errorf("unexpected database: got=%q want=%q", got, want) } if got, want := policy, monitor.MonitorRetentionPolicy; got != want { t.Errorf("unexpected retention policy: got=%q want=%q", got, want) } // Attempt to write the points to the main goroutine. select { case <-done: case ch <- points: } return nil } config := monitor.NewConfig() config.StoreInterval = toml.Duration(10 * time.Millisecond) s := monitor.New(nil, config) s.MetaClient = &mc s.PointsWriter = &pw if err := s.Open(); err != nil { t.Fatalf("unexpected error: %s", err) } defer s.Close() timer := time.NewTimer(100 * time.Millisecond) select { case points := <-ch: timer.Stop() // Search for the runtime statistic. found := false for _, pt := range points { if !bytes.Equal(pt.Name(), []byte("runtime")) { continue } // There should be a hostname. if got := pt.Tags().GetString("hostname"); len(got) == 0 { t.Errorf("expected hostname tag") } // This should write on an exact interval of 10 milliseconds. if got, want := pt.Time(), pt.Time().Truncate(10*time.Millisecond); got != want { t.Errorf("unexpected time: got=%q want=%q", got, want) } found = true break } if !found { t.Error("unable to find runtime statistic") } case <-timer.C: t.Errorf("timeout while waiting for statistics to be written") } }
explode_data.jsonl/7524
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1003 }
[ 2830, 3393, 30098, 92684, 38599, 1155, 353, 8840, 836, 8, 341, 40495, 1669, 1281, 35190, 2036, 37790, 16867, 3265, 34232, 340, 23049, 1669, 1281, 35190, 4119, 89270, 692, 2405, 19223, 15819, 2959, 198, 97662, 7251, 5988, 2354, 12020, 78165,...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
7
func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) { m := Metric() b := setup(NewBuffer("test", 5)) b.Add(m, m, m, m, m) batch := b.Batch(5) b.Add(m, m, m, m, m) b.Reject(batch) require.Equal(t, int64(5), b.MetricsDropped.Get()) require.Equal(t, int64(0), b.MetricsWritten.Get()) }
explode_data.jsonl/17686
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 141 }
[ 2830, 3393, 4095, 1668, 754, 78413, 35, 3702, 1918, 25569, 21074, 1155, 353, 8840, 836, 8, 341, 2109, 1669, 52458, 741, 2233, 1669, 6505, 35063, 4095, 445, 1944, 497, 220, 20, 4390, 2233, 1904, 1255, 11, 296, 11, 296, 11, 296, 11, 2...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestUnsub(t *testing.T) { ps := New(1) defer ps.Shutdown() ch := ps.Sub("t1") ps.Pub("hi", "t1") ps.Unsub(ch, "t1") checkContents(t, ch, []string{"hi"}) }
explode_data.jsonl/44251
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 82 }
[ 2830, 3393, 1806, 1966, 1155, 353, 8840, 836, 8, 341, 35009, 1669, 1532, 7, 16, 340, 16867, 4726, 10849, 18452, 2822, 23049, 1669, 4726, 12391, 445, 83, 16, 5130, 35009, 1069, 392, 445, 6023, 497, 330, 83, 16, 1138, 35009, 10616, 1966...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
1
func TestChaincodeInvokeOrQuery_waitForEvent(t *testing.T) { defer resetFlags() waitForEvent = true mockCF, err := getMockChaincodeCmdFactory() assert.NoError(t, err) peerAddresses = []string{"peer0", "peer1"} channelID := "testchannel" txID := "txid0" t.Run("success - deliver clients returns event with expected txid", func(t *testing.T) { _, err = ChaincodeInvokeOrQuery( &pb.ChaincodeSpec{}, channelID, txID, true, mockCF.Signer, mockCF.Certificate, mockCF.EndorserClients, mockCF.DeliverClients, mockCF.BroadcastClient, ) assert.NoError(t, err) }) t.Run("success - one deliver client first receives block without txid and then one with txid", func(t *testing.T) { filteredBlocks := []*pb.FilteredBlock{ createFilteredBlock(pb.TxValidationCode_VALID, "theseare", "notthetxidsyouarelookingfor"), createFilteredBlock(pb.TxValidationCode_VALID, "txid0"), } mockDCTwoBlocks := getMockDeliverClientRespondsWithFilteredBlocks(filteredBlocks) mockDC := getMockDeliverClientResponseWithTxStatusAndID(pb.TxValidationCode_VALID, "txid0") mockDeliverClients := []pb.DeliverClient{mockDCTwoBlocks, mockDC} _, err = ChaincodeInvokeOrQuery( &pb.ChaincodeSpec{}, channelID, txID, true, mockCF.Signer, mockCF.Certificate, mockCF.EndorserClients, mockDeliverClients, mockCF.BroadcastClient, ) assert.NoError(t, err) }) t.Run("failure - one of the deliver clients returns error", func(t *testing.T) { mockDCErr := getMockDeliverClientWithErr("moist") mockDC := getMockDeliverClientResponseWithTxStatusAndID(pb.TxValidationCode_VALID, "txid0") mockDeliverClients := []pb.DeliverClient{mockDCErr, mockDC} _, err = ChaincodeInvokeOrQuery( &pb.ChaincodeSpec{}, channelID, txID, true, mockCF.Signer, mockCF.Certificate, mockCF.EndorserClients, mockDeliverClients, mockCF.BroadcastClient, ) assert.Error(t, err) assert.Contains(t, err.Error(), "moist") }) t.Run("failure - transaction committed with non-success validation code", func(t *testing.T) { mockDC := getMockDeliverClientResponseWithTxStatusAndID(pb.TxValidationCode_VALID, "txid0") mockDCFail := getMockDeliverClientResponseWithTxStatusAndID(pb.TxValidationCode_ENDORSEMENT_POLICY_FAILURE, "txid0") mockDeliverClients := []pb.DeliverClient{mockDCFail, mockDC} _, err = ChaincodeInvokeOrQuery( &pb.ChaincodeSpec{}, channelID, txID, true, mockCF.Signer, mockCF.Certificate, mockCF.EndorserClients, mockDeliverClients, mockCF.BroadcastClient, ) assert.Error(t, err) assert.Equal(t, err.Error(), "transaction invalidated with status (ENDORSEMENT_POLICY_FAILURE)") }) t.Run("failure - deliver returns response status instead of block", func(t *testing.T) { mockDC := &mock.PeerDeliverClient{} mockDF := &mock.Deliver{} resp := &pb.DeliverResponse{ Type: &pb.DeliverResponse_Status{ Status: cb.Status_FORBIDDEN, }, } mockDF.RecvReturns(resp, nil) mockDC.DeliverFilteredReturns(mockDF, nil) mockDeliverClients := []pb.DeliverClient{mockDC} _, err = ChaincodeInvokeOrQuery( &pb.ChaincodeSpec{}, channelID, txID, true, mockCF.Signer, mockCF.Certificate, mockCF.EndorserClients, mockDeliverClients, mockCF.BroadcastClient, ) assert.Error(t, err) assert.Equal(t, err.Error(), "deliver completed with status (FORBIDDEN) before txid received") }) t.Run(" failure - timeout occurs - both deliver clients don't return an event with the expected txid before timeout", func(t *testing.T) { delayChan := make(chan struct{}) mockDCDelay := getMockDeliverClientRespondAfterDelay(delayChan, pb.TxValidationCode_VALID, "txid0") mockDeliverClients := []pb.DeliverClient{mockDCDelay, mockDCDelay} waitForEventTimeout = 10 * time.Millisecond _, err = ChaincodeInvokeOrQuery( &pb.ChaincodeSpec{}, channelID, txID, true, mockCF.Signer, mockCF.Certificate, mockCF.EndorserClients, mockDeliverClients, mockCF.BroadcastClient, ) assert.Error(t, err) assert.Contains(t, err.Error(), "timed out") close(delayChan) }) }
explode_data.jsonl/46379
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 1718 }
[ 2830, 3393, 18837, 1851, 17604, 2195, 2859, 18760, 2461, 1556, 1155, 353, 8840, 836, 8, 341, 16867, 7585, 9195, 2822, 48750, 2461, 1556, 284, 830, 198, 77333, 9650, 11, 1848, 1669, 633, 11571, 18837, 1851, 15613, 4153, 741, 6948, 35699, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestConstExpr_error(t *testing.T) { env := map[string]interface{}{ "divide": func(a, b int) int { return a / b }, } _, err := expr.Compile( `1 + divide(1, 0)`, expr.Env(env), expr.ConstExpr("divide"), ) require.Error(t, err) require.Equal(t, "compile error: integer divide by zero (1:5)\n | 1 + divide(1, 0)\n | ....^", err.Error()) }
explode_data.jsonl/36909
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 153 }
[ 2830, 3393, 19167, 16041, 4096, 1155, 353, 8840, 836, 8, 341, 57538, 1669, 2415, 14032, 31344, 67066, 197, 197, 1, 59394, 788, 2915, 2877, 11, 293, 526, 8, 526, 314, 470, 264, 608, 293, 1153, 197, 630, 197, 6878, 1848, 1669, 15169, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
1
func TestVPNFirewallRemote(t *testing.T) { RegisterTestingT(t) if testing.Short() { t.Skip("Skip, please run without -short") return } testVPN(t, ptNum, 2, map[string]int{ "vppagent-firewall-nse-1": 1, "vppagent-passthrough-nse": 0, "vpn-gateway-nse-1": 0, "vpn-gateway-nsc-1": 0, }, false) }
explode_data.jsonl/30709
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 161 }
[ 2830, 3393, 55662, 16697, 16431, 24703, 1155, 353, 8840, 836, 8, 341, 79096, 16451, 51, 1155, 692, 743, 7497, 55958, 368, 341, 197, 3244, 57776, 445, 35134, 11, 4486, 1598, 2041, 481, 8676, 1138, 197, 853, 198, 197, 630, 18185, 55662, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
2
func TestSuccessfulFindCommitsRequest(t *testing.T) { server, serverSocketPath := startTestServices(t) defer server.Stop() client, conn := newCommitServiceClient(t, serverSocketPath) defer conn.Close() testRepo, _, cleanupFn := testhelper.NewTestRepo(t) defer cleanupFn() testCases := []struct { desc string request *gitalypb.FindCommitsRequest // Use 'ids' if you know the exact commits id's that should be returned ids []string // Use minCommits if you don't know the exact commit id's minCommits int }{ { desc: "only revision, limit commits", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte("0031876facac3f2b2702a0e53a26e89939a42209"), Limit: 3, }, ids: []string{ "0031876facac3f2b2702a0e53a26e89939a42209", "bf6e164cac2dc32b1f391ca4290badcbe4ffc5fb", "48ca272b947f49eee601639d743784a176574a09", }, }, { desc: "revision, default commit limit", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte("0031876facac3f2b2702a0e53a26e89939a42209"), }, }, { desc: "revision, default commit limit, bypassing rugged walk", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte("0031876facac3f2b2702a0e53a26e89939a42209"), DisableWalk: true, }, }, { desc: "revision and paths", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte("0031876facac3f2b2702a0e53a26e89939a42209"), Paths: [][]byte{[]byte("LICENSE")}, Limit: 10, }, ids: []string{"1a0b36b3cdad1d2ee32457c102a8c0b7056fa863"}, }, { desc: "empty revision", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Limit: 35, }, minCommits: 35, }, { desc: "before and after", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Before: &timestamp.Timestamp{Seconds: 1483225200}, After: &timestamp.Timestamp{Seconds: 1472680800}, Limit: 10, }, ids: []string{ "b83d6e391c22777fca1ed3012fce84f633d7fed0", "498214de67004b1da3d820901307bed2a68a8ef6", }, }, { desc: "no merges", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte("e63f41fe459e62e1228fcef60d7189127aeba95a"), SkipMerges: true, Limit: 10, }, ids: []string{ "4a24d82dbca5c11c61556f3b35ca472b7463187e", "498214de67004b1da3d820901307bed2a68a8ef6", "38008cb17ce1466d8fec2dfa6f6ab8dcfe5cf49e", "c347ca2e140aa667b968e51ed0ffe055501fe4f4", "d59c60028b053793cecfb4022de34602e1a9218e", "a5391128b0ef5d21df5dd23d98557f4ef12fae20", "54fcc214b94e78d7a41a9a8fe6d87a5e59500e51", "048721d90c449b244b7b4c53a9186b04330174ec", "5f923865dde3436854e9ceb9cdb7815618d4e849", "2ea1f3dec713d940208fb5ce4a38765ecb5d3f73", }, }, { desc: "following renames", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, Revision: []byte("94bb47ca1297b7b3731ff2a36923640991e9236f"), Paths: [][]byte{[]byte("CHANGELOG.md")}, Follow: true, Limit: 10, }, ids: []string{ "94bb47ca1297b7b3731ff2a36923640991e9236f", "5f923865dde3436854e9ceb9cdb7815618d4e849", "913c66a37b4a45b9769037c55c2d238bd0942d2e", }, }, { desc: "all refs", request: &gitalypb.FindCommitsRequest{ Repository: testRepo, All: true, Limit: 90, }, minCommits: 90, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { ctx, cancel := testhelper.Context() defer cancel() stream, err := client.FindCommits(ctx, tc.request) require.NoError(t, err) var ids []string for err == nil { var resp *gitalypb.FindCommitsResponse resp, err = stream.Recv() for _, c := range resp.GetCommits() { ids = append(ids, c.Id) } } require.Equal(t, io.EOF, err) if tc.minCommits > 0 { require.True(t, len(ids) >= tc.minCommits, "expected at least %d commits, got %d", tc.minCommits, len(ids)) return } require.Equal(t, len(tc.ids), len(ids)) for i, id := range tc.ids { require.Equal(t, id, ids[i]) } }) } }
explode_data.jsonl/26118
{ "file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl", "token_count": 2159 }
[ 2830, 3393, 36374, 9885, 17977, 1199, 1900, 1155, 353, 8840, 836, 8, 341, 41057, 11, 3538, 10286, 1820, 1669, 1191, 2271, 11025, 1155, 340, 16867, 3538, 30213, 2822, 25291, 11, 4534, 1669, 501, 33441, 1860, 2959, 1155, 11, 3538, 10286, ...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
5