text stringlengths 93 16.4k | id stringlengths 20 40 | metadata dict | input_ids listlengths 45 2.05k | attention_mask listlengths 45 2.05k | complexity int64 1 9 |
|---|---|---|---|---|---|
func TestGetOrderbook(t *testing.T) {
t.Parallel()
_, err := b.GetOrderbook("BTC", "AUD")
if err != nil {
t.Error("Test failed - GetOrderbook() error", err)
}
} | explode_data.jsonl/48988 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 67
} | [
2830,
3393,
1949,
4431,
2190,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
197,
6878,
1848,
1669,
293,
2234,
4431,
2190,
445,
59118,
497,
330,
61278,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
6141,
445,
2271,
4641,
481,
212... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestValidCIDR(t *testing.T) {
var tests = []validationTest{
{
value: "0.0.0.0/0",
shouldErr: false,
},
{
value: "1.1.1.1/32",
shouldErr: false,
},
{
value: "192.168.0.0/16",
shouldErr: false,
},
{
value: "255.255.255.255/1",
shouldErr: false,
},
{
value: "8.8.8.8/33",
shouldErr: true,
},
{
value: "12.1",
shouldErr: true,
},
{
value: "1",
shouldErr: true,
},
{
value: "a string!",
shouldErr: true,
},
{
value: "192.168.1.1/8/",
shouldErr: true,
},
}
runValidations(t, tests, "cidr", IsValidCIDR)
} | explode_data.jsonl/77930 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 381
} | [
2830,
3393,
4088,
54146,
49,
1155,
353,
8840,
836,
8,
341,
2405,
7032,
284,
3056,
12284,
2271,
515,
197,
197,
515,
298,
16309,
25,
257,
330,
15,
13,
15,
13,
15,
13,
15,
14,
15,
756,
298,
197,
5445,
7747,
25,
895,
345,
197,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSwitch(t *testing.T) {
files := []int{1, 2, 3}
for i, _ := range files {
switch {
case i == 0:
log.Println("json")
case i == 1:
log.Println("html")
case i == 2:
log.Println("pdf")
default:
log.Println("error")
}
}
} | explode_data.jsonl/67075 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 137
} | [
2830,
3393,
16837,
1155,
353,
8840,
836,
8,
972,
74075,
1669,
3056,
396,
90,
16,
11,
220,
17,
11,
220,
18,
2570,
2023,
600,
11,
716,
1669,
2088,
3542,
972,
197,
8961,
972,
197,
2722,
600,
621,
220,
15,
2834,
298,
6725,
12419,
445,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestSANEmptyName(t *testing.T) {
inputPath := "SANEmptyName.pem"
expected := lint.Error
out := test.TestLint("e_ext_san_empty_name", inputPath)
if out.Status != expected {
t.Errorf("%s: expected %s, got %s", inputPath, expected, out.Status)
}
} | explode_data.jsonl/75679 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 99
} | [
2830,
3393,
68691,
3522,
675,
1155,
353,
8840,
836,
8,
341,
22427,
1820,
1669,
330,
68691,
3522,
675,
49373,
698,
42400,
1669,
57920,
6141,
198,
13967,
1669,
1273,
8787,
47556,
445,
68,
9927,
87866,
15124,
1269,
497,
1946,
1820,
340,
74... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestUpdateChannelRoles(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
const CHANNEL_ADMIN = "channel_admin channel_user"
const CHANNEL_MEMBER = "channel_user"
// User 1 creates a channel, making them channel admin by default.
channel := th.CreatePublicChannel()
// Adds User 2 to the channel, making them a channel member by default.
th.App.AddUserToChannel(th.BasicUser2, channel)
// User 1 promotes User 2
pass, resp := Client.UpdateChannelRoles(channel.Id, th.BasicUser2.Id, CHANNEL_ADMIN)
CheckNoError(t, resp)
if !pass {
t.Fatal("should have passed")
}
member, resp := Client.GetChannelMember(channel.Id, th.BasicUser2.Id, "")
CheckNoError(t, resp)
if member.Roles != CHANNEL_ADMIN {
t.Fatal("roles don't match")
}
// User 1 demotes User 2
_, resp = Client.UpdateChannelRoles(channel.Id, th.BasicUser2.Id, CHANNEL_MEMBER)
CheckNoError(t, resp)
th.LoginBasic2()
// User 2 cannot demote User 1
_, resp = Client.UpdateChannelRoles(channel.Id, th.BasicUser.Id, CHANNEL_MEMBER)
CheckForbiddenStatus(t, resp)
// User 2 cannot promote self
_, resp = Client.UpdateChannelRoles(channel.Id, th.BasicUser2.Id, CHANNEL_ADMIN)
CheckForbiddenStatus(t, resp)
th.LoginBasic()
// User 1 demotes self
_, resp = Client.UpdateChannelRoles(channel.Id, th.BasicUser.Id, CHANNEL_MEMBER)
CheckNoError(t, resp)
// System Admin promotes User 1
_, resp = th.SystemAdminClient.UpdateChannelRoles(channel.Id, th.BasicUser.Id, CHANNEL_ADMIN)
CheckNoError(t, resp)
// System Admin demotes User 1
_, resp = th.SystemAdminClient.UpdateChannelRoles(channel.Id, th.BasicUser.Id, CHANNEL_MEMBER)
CheckNoError(t, resp)
// System Admin promotes User 1
pass, resp = th.SystemAdminClient.UpdateChannelRoles(channel.Id, th.BasicUser.Id, CHANNEL_ADMIN)
CheckNoError(t, resp)
th.LoginBasic()
_, resp = Client.UpdateChannelRoles(channel.Id, th.BasicUser.Id, "junk")
CheckBadRequestStatus(t, resp)
_, resp = Client.UpdateChannelRoles(channel.Id, "junk", CHANNEL_MEMBER)
CheckBadRequestStatus(t, resp)
_, resp = Client.UpdateChannelRoles("junk", th.BasicUser.Id, CHANNEL_MEMBER)
CheckBadRequestStatus(t, resp)
_, resp = Client.UpdateChannelRoles(channel.Id, model.NewId(), CHANNEL_MEMBER)
CheckNotFoundStatus(t, resp)
_, resp = Client.UpdateChannelRoles(model.NewId(), th.BasicUser.Id, CHANNEL_MEMBER)
CheckForbiddenStatus(t, resp)
} | explode_data.jsonl/65665 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 868
} | [
2830,
3393,
4289,
9629,
25116,
1155,
353,
8840,
836,
8,
341,
70479,
1669,
18626,
1005,
3803,
15944,
1005,
3803,
2320,
7210,
741,
16867,
270,
836,
682,
4454,
741,
71724,
1669,
270,
11716,
271,
4777,
58756,
30460,
284,
330,
10119,
12207,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestBSTree_LevelOrderTraverse(t *testing.T) {
assert := internal.NewAssert(t, "TestBSTree_LevelOrderTraverse")
bstree := NewBSTree(6, &intComparator{})
bstree.InsertNode(7)
bstree.InsertNode(5)
bstree.InsertNode(2)
bstree.InsertNode(4)
bstree.Print()
acturl := bstree.LevelOrderTraverse()
t.Log(acturl)
assert.Equal([]int{6, 5, 7, 2, 4}, acturl)
} | explode_data.jsonl/41663 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
61006,
765,
78861,
4431,
1282,
22439,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
5306,
7121,
8534,
1155,
11,
330,
2271,
61006,
765,
78861,
4431,
1282,
22439,
5130,
2233,
267,
765,
1669,
1532,
61006,
765,
7,
21,
11,
609,
396... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRecoverEvaluatorError(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()}
var err error
e := errors.New("custom error")
defer func() {
require.EqualError(t, err, e.Error())
}()
defer ev.recover(nil, &err)
panic(e)
} | explode_data.jsonl/35558 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 104
} | [
2830,
3393,
693,
3688,
89042,
1454,
1155,
353,
8840,
836,
8,
341,
74837,
1669,
609,
14170,
45162,
90,
9786,
25,
1487,
7121,
45,
453,
7395,
23509,
2405,
1848,
1465,
271,
7727,
1669,
5975,
7121,
445,
9163,
1465,
5130,
16867,
2915,
368,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPatchDevice(t *testing.T) {
expectedRequestId := ExampleUUID
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
testReq := buildTestUpdateDeviceRequest()
dsModels := models.Device{
Id: *testReq.Device.Id,
Name: *testReq.Device.Name,
Description: *testReq.Device.Description,
Labels: testReq.Device.Labels,
AdminState: models.AdminState(*testReq.Device.AdminState),
OperatingState: models.OperatingState(*testReq.Device.OperatingState),
LastConnected: *testReq.Device.LastConnected,
LastReported: *testReq.Device.LastReported,
Location: testReq.Device.Location,
ServiceName: *testReq.Device.ServiceName,
ProfileName: *testReq.Device.ProfileName,
AutoEvents: dtos.ToAutoEventModels(testReq.Device.AutoEvents),
Protocols: dtos.ToProtocolModels(testReq.Device.Protocols),
Notify: *testReq.Device.Notify,
}
valid := testReq
dbClientMock.On("DeviceServiceNameExists", *valid.Device.ServiceName).Return(true, nil)
dbClientMock.On("DeviceProfileNameExists", *valid.Device.ProfileName).Return(true, nil)
dbClientMock.On("DeviceById", *valid.Device.Id).Return(dsModels, nil)
dbClientMock.On("UpdateDevice", mock.Anything).Return(nil)
dbClientMock.On("DeviceServiceByName", *valid.Device.ServiceName).Return(models.DeviceService{BaseAddress: testBaseAddress}, nil)
validWithNoReqID := testReq
validWithNoReqID.RequestId = ""
validWithNoId := testReq
validWithNoId.Device.Id = nil
dbClientMock.On("DeviceByName", *validWithNoId.Device.Name).Return(dsModels, nil)
validWithNoName := testReq
validWithNoName.Device.Name = nil
invalidId := testReq
invalidUUID := "invalidUUID"
invalidId.Device.Id = &invalidUUID
emptyString := ""
emptyId := testReq
emptyId.Device.Id = &emptyString
emptyId.Device.Name = nil
emptyName := testReq
emptyName.Device.Id = nil
emptyName.Device.Name = &emptyString
invalidNoIdAndName := testReq
invalidNoIdAndName.Device.Id = nil
invalidNoIdAndName.Device.Name = nil
invalidNotFoundId := testReq
invalidNotFoundId.Device.Name = nil
notFoundId := "12345678-1111-1234-5678-de9dac3fb9bc"
invalidNotFoundId.Device.Id = ¬FoundId
notFoundIdError := errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, fmt.Sprintf("%s doesn't exist in the database", notFoundId), nil)
dbClientMock.On("DeviceById", *invalidNotFoundId.Device.Id).Return(dsModels, notFoundIdError)
invalidNotFoundName := testReq
invalidNotFoundName.Device.Id = nil
notFoundName := "notFoundName"
invalidNotFoundName.Device.Name = ¬FoundName
notFoundNameError := errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, fmt.Sprintf("%s doesn't exist in the database", notFoundName), nil)
dbClientMock.On("DeviceByName", *invalidNotFoundName.Device.Name).Return(dsModels, notFoundNameError)
notFountServiceName := "notFoundService"
notFoundService := testReq
notFoundService.Device.ServiceName = ¬FountServiceName
dbClientMock.On("DeviceServiceNameExists", *notFoundService.Device.ServiceName).Return(false, nil)
notFountProfileName := "notFoundProfile"
notFoundProfile := testReq
notFoundProfile.Device.ProfileName = ¬FountProfileName
dbClientMock.On("DeviceProfileNameExists", *notFoundProfile.Device.ProfileName).Return(false, nil)
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
require.NotNil(t, controller)
tests := []struct {
name string
request []requests.UpdateDeviceRequest
expectedStatusCode int
expectedResponseCode int
}{
{"Valid", []requests.UpdateDeviceRequest{valid}, http.StatusMultiStatus, http.StatusOK},
{"Valid - no requestId", []requests.UpdateDeviceRequest{validWithNoReqID}, http.StatusMultiStatus, http.StatusOK},
{"Valid - no id", []requests.UpdateDeviceRequest{validWithNoId}, http.StatusMultiStatus, http.StatusOK},
{"Valid - no name", []requests.UpdateDeviceRequest{validWithNoName}, http.StatusMultiStatus, http.StatusOK},
{"Invalid - invalid id", []requests.UpdateDeviceRequest{invalidId}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - empty id", []requests.UpdateDeviceRequest{emptyId}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - empty name", []requests.UpdateDeviceRequest{emptyName}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - not found id", []requests.UpdateDeviceRequest{invalidNotFoundId}, http.StatusMultiStatus, http.StatusNotFound},
{"Invalid - not found name", []requests.UpdateDeviceRequest{invalidNotFoundName}, http.StatusMultiStatus, http.StatusNotFound},
{"Invalid - no id and name", []requests.UpdateDeviceRequest{invalidNoIdAndName}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - not found service", []requests.UpdateDeviceRequest{notFoundService}, http.StatusMultiStatus, http.StatusNotFound},
{"Invalid - not found profile", []requests.UpdateDeviceRequest{notFoundProfile}, http.StatusMultiStatus, http.StatusNotFound},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
jsonData, err := json.Marshal(testCase.request)
require.NoError(t, err)
reader := strings.NewReader(string(jsonData))
req, err := http.NewRequest(http.MethodPatch, common.ApiDeviceRoute, reader)
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.PatchDevice)
handler.ServeHTTP(recorder, req)
if testCase.expectedStatusCode == http.StatusMultiStatus {
var res []commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, http.StatusMultiStatus, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, common.ApiVersion, res[0].ApiVersion, "API Version not as expected")
if res[0].RequestId != "" {
assert.Equal(t, expectedRequestId, res[0].RequestId, "RequestID not as expected")
}
assert.Equal(t, testCase.expectedResponseCode, res[0].StatusCode, "BaseResponse status code not as expected")
if testCase.expectedResponseCode == http.StatusOK {
assert.Empty(t, res[0].Message, "Message should be empty when it is successful")
} else {
assert.NotEmpty(t, res[0].Message, "Response message doesn't contain the error message")
}
} else {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedResponseCode, res.StatusCode, "BaseResponse status code not as expected")
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
}
})
}
} | explode_data.jsonl/9305 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2436
} | [
2830,
3393,
43622,
6985,
1155,
353,
8840,
836,
8,
341,
42400,
61774,
1669,
13383,
24754,
198,
2698,
292,
1669,
7860,
44545,
741,
20939,
2959,
11571,
1669,
609,
1999,
11571,
22537,
2959,
16094,
18185,
27234,
1669,
1936,
2271,
4289,
6985,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCheckAlphaNumericOrUTF8Present(t *testing.T) {
testCases := []struct {
name string
input string
result bool
}{
{
name: "ascii lowercase",
input: "aa",
result: true,
},
{
name: "ascii uppercase",
input: "AA",
result: true,
},
{
name: "ascii numbers",
input: "123",
result: true,
},
{
name: "ascii start with metadata",
input: "-- abc3",
result: true,
},
{
name: "ascii end with metadata",
input: "abc3 ..",
result: true,
},
{
name: "UTF8",
input: "テスト",
result: true,
},
{
name: "UTF8 start with metadata",
input: "?? テスト",
result: true,
},
{
name: "UTF8 end with metadata",
input: "テスト ??",
result: true,
},
{
name: "-",
input: "-",
result: false,
},
{
name: "**",
input: "**",
result: false,
},
{
name: "...",
input: "...",
result: false,
},
{
name: "- -",
input: "- -",
result: false,
},
{
name: " -",
input: " -",
result: false,
},
{
name: " ",
input: " ",
result: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := checkAlphaNumericOrUTF8Present(tc.input)
if result != tc.result {
t.Errorf("expected check to be %v, got %v", tc.result, result)
}
})
}
} | explode_data.jsonl/34622 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 710
} | [
2830,
3393,
3973,
19384,
36296,
2195,
8561,
23,
21195,
1155,
353,
8840,
836,
8,
341,
18185,
37302,
1669,
3056,
1235,
341,
197,
11609,
256,
914,
198,
197,
22427,
220,
914,
198,
197,
9559,
1807,
198,
197,
59403,
197,
197,
515,
298,
1160... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestConsulUpdateGetSvcsErr(t *testing.T) {
err := errors.New("asonteuh")
testConsulGetClustersCase{err, nil, nil}.run(t)
testConsulGetClustersCase{}.run(t)
} | explode_data.jsonl/71883 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 68
} | [
2830,
3393,
15220,
360,
4289,
1949,
50,
85,
4837,
7747,
1155,
353,
8840,
836,
8,
341,
9859,
1669,
5975,
7121,
445,
300,
52277,
12540,
1138,
18185,
15220,
360,
1949,
94992,
4207,
90,
615,
11,
2092,
11,
2092,
7810,
6108,
1155,
340,
1818... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestRetryDoNoAttempts(t *testing.T) {
t.Parallel()
r := New(
WithMaxAttempts(0),
)
assert.Error(t, r.Do(context.Background(), func(context.Context) error {
return nil
}))
} | explode_data.jsonl/82059 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 75
} | [
2830,
3393,
51560,
5404,
2753,
81517,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
7000,
1669,
1532,
1006,
197,
197,
2354,
5974,
81517,
7,
15,
1326,
197,
692,
6948,
6141,
1155,
11,
435,
33596,
5378,
19047,
1507,
2915,
5378,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestNoIPAddressesInSNI(t *testing.T) {
for _, ipLiteral := range []string{"1.2.3.4", "::1"} {
c, s := net.Pipe()
go func() {
client := Client(c, &Config{ServerName: ipLiteral})
client.Handshake()
}()
var header [5]byte
if _, err := io.ReadFull(s, header[:]); err != nil {
t.Fatal(err)
}
recordLen := int(header[3])<<8 | int(header[4])
record := make([]byte, recordLen)
if _, err := io.ReadFull(s, record[:]); err != nil {
t.Fatal(err)
}
s.Close()
if bytes.Index(record, []byte(ipLiteral)) != -1 {
t.Errorf("IP literal %q found in ClientHello: %x", ipLiteral, record)
}
}
} | explode_data.jsonl/19958 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 283
} | [
2830,
3393,
2753,
3298,
52290,
641,
50,
14912,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
5997,
17350,
1669,
2088,
3056,
917,
4913,
16,
13,
17,
13,
18,
13,
19,
497,
70154,
16,
9207,
341,
197,
1444,
11,
274,
1669,
4179,
1069,
3444,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestLastReparentTooRecent_Buffering(t *testing.T) {
resetVariables()
flag.Set("enable_buffer", "true")
// Enable the buffer (no explicit whitelist i.e. it applies to everything).
defer resetFlagsForTesting()
now := time.Now()
b := newWithNow(func() time.Time { return now })
// Simulate that the old master notified us about its reparented timestamp
// very recently (time.Now()).
// vtgate should see this immediately after the start.
b.StatsUpdate(&discovery.TabletStats{
Tablet: oldMaster,
Target: &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_MASTER},
TabletExternallyReparentedTimestamp: now.Unix(),
})
// Failover to new master. Do not issue any requests before or after i.e.
// there was 0 QPS traffic and no buffering was started.
now = now.Add(1 * time.Second)
b.StatsUpdate(&discovery.TabletStats{
Tablet: newMaster,
Target: &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_MASTER},
TabletExternallyReparentedTimestamp: now.Unix(),
})
// After we're past the --buffer_min_time_between_failovers threshold, go
// through a failover with non-zero QPS.
now = now.Add(*minTimeBetweenFailovers)
// We're seeing errors first.
stopped := issueRequest(context.Background(), t, b, failoverErr)
if err := waitForRequestsInFlight(b, 1); err != nil {
t.Fatal(err)
}
// And then the failover end.
b.StatsUpdate(&discovery.TabletStats{
Tablet: newMaster,
Target: &querypb.Target{Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_MASTER},
TabletExternallyReparentedTimestamp: now.Unix(),
})
// Check that the drain is successful.
if err := <-stopped; err != nil {
t.Fatalf("request should have been buffered and not returned an error: %v", err)
}
// Drain will reset the state to "idle" eventually.
if err := waitForState(b, stateIdle); err != nil {
t.Fatal(err)
}
if got, want := requestsSkipped.Counts()[statsKeyJoinedLastReparentTooRecent], int64(0); got != want {
t.Fatalf("request should not have been skipped: got = %v, want = %v", got, want)
}
if got, want := requestsBuffered.Counts()[statsKeyJoined], int64(1); got != want {
t.Fatalf("request should have been tracked as buffered: got = %v, want = %v", got, want)
}
} | explode_data.jsonl/55857 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 777
} | [
2830,
3393,
5842,
693,
3765,
31246,
25140,
55040,
287,
1155,
353,
8840,
836,
8,
341,
70343,
22678,
2822,
30589,
4202,
445,
12552,
7776,
497,
330,
1866,
1138,
197,
322,
18567,
279,
4147,
320,
2152,
11464,
67727,
600,
1734,
13,
432,
16790... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBind(t *testing.T) {
table := []struct {
binding *v1.Binding
}{
{binding: &v1.Binding{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: "foo",
},
Target: v1.ObjectReference{
Name: "foohost.kubernetes.mydomain.com",
},
}},
}
for _, item := range table {
handler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
b := binder{client}
if err := b.Bind(item.binding); err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
expectedBody := runtime.EncodeOrDie(util.Test.Codec(), item.binding)
handler.ValidateRequest(t,
util.Test.SubResourcePath(string(v1.ResourcePods), metav1.NamespaceDefault, "foo", "binding"),
"POST", &expectedBody)
}
} | explode_data.jsonl/13325 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 427
} | [
2830,
3393,
9950,
1155,
353,
8840,
836,
8,
341,
26481,
1669,
3056,
1235,
341,
197,
2233,
3961,
353,
85,
16,
36022,
198,
197,
59403,
197,
197,
90,
31036,
25,
609,
85,
16,
36022,
515,
298,
23816,
12175,
25,
77520,
16,
80222,
515,
571,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestEnvVarValue(t *testing.T) {
d, err := ioutil.TempDir("", "secrets")
assert.NoError(t, err)
defer os.RemoveAll(d)
secretsManager := createSecrets(t, d)
tests := []struct {
name string
envVar v1.EnvVar
options CtrSpecGenOptions
succeed bool
expected string
}{
{
"ConfigMapExists",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
CtrSpecGenOptions{
ConfigMaps: configMapList,
},
true,
"foo",
},
{
"ContainerKeyDoesNotExistInConfigMap",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "doesnotexist",
},
},
},
CtrSpecGenOptions{
ConfigMaps: configMapList,
},
false,
"",
},
{
"OptionalContainerKeyDoesNotExistInConfigMap",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "doesnotexist",
Optional: &optional,
},
},
},
CtrSpecGenOptions{
ConfigMaps: configMapList,
},
true,
"",
},
{
"ConfigMapDoesNotExist",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
Key: "myvar",
},
},
},
CtrSpecGenOptions{
ConfigMaps: configMapList,
},
false,
"",
},
{
"OptionalConfigMapDoesNotExist",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
Key: "myvar",
Optional: &optional,
},
},
},
CtrSpecGenOptions{
ConfigMaps: configMapList,
},
true,
"",
},
{
"EmptyConfigMapList",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
CtrSpecGenOptions{
ConfigMaps: []v1.ConfigMap{},
},
false,
"",
},
{
"OptionalEmptyConfigMapList",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
Optional: &optional,
},
},
},
CtrSpecGenOptions{
ConfigMaps: []v1.ConfigMap{},
},
true,
"",
},
{
"SecretExists",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
CtrSpecGenOptions{
SecretsManager: secretsManager,
},
true,
"foo",
},
{
"ContainerKeyDoesNotExistInSecret",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "doesnotexist",
},
},
},
CtrSpecGenOptions{
SecretsManager: secretsManager,
},
false,
"",
},
{
"OptionalContainerKeyDoesNotExistInSecret",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "doesnotexist",
Optional: &optional,
},
},
},
CtrSpecGenOptions{
SecretsManager: secretsManager,
},
true,
"",
},
{
"SecretDoesNotExist",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
Key: "myvar",
},
},
},
CtrSpecGenOptions{
SecretsManager: secretsManager,
},
false,
"",
},
{
"OptionalSecretDoesNotExist",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
Key: "myvar",
Optional: &optional,
},
},
},
CtrSpecGenOptions{
SecretsManager: secretsManager,
},
true,
"",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
result, err := envVarValue(test.envVar, &test.options)
assert.Equal(t, err == nil, test.succeed)
assert.Equal(t, test.expected, result)
})
}
} | explode_data.jsonl/2873 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 2600
} | [
2830,
3393,
14359,
3962,
1130,
1155,
353,
8840,
836,
8,
341,
2698,
11,
1848,
1669,
43144,
65009,
6184,
19814,
330,
325,
52710,
1138,
6948,
35699,
1155,
11,
1848,
340,
16867,
2643,
84427,
1500,
340,
84686,
52710,
2043,
1669,
1855,
19773,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMissingAuthenticatorForLoginHandler(t *testing.T) {
authMiddleware, err := New(&GinJWTMiddleware{
Realm: "test zone",
Key: key,
Timeout: time.Hour,
MaxRefresh: time.Hour * 24,
})
assert.NoError(t, err)
handler := ginHandler(authMiddleware)
r := gofight.New()
r.POST("/login").
SetJSON(gofight.D{
"username": "admin",
"password": "admin",
}).
Run(handler, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {
message := gjson.Get(r.Body.String(), "message")
assert.Equal(t, ErrMissingAuthenticatorFunc.Error(), message.String())
assert.Equal(t, http.StatusInternalServerError, r.Code)
})
} | explode_data.jsonl/64434 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 268
} | [
2830,
3393,
25080,
5087,
61393,
2461,
6231,
3050,
1155,
353,
8840,
836,
8,
1476,
78011,
24684,
11,
1848,
1669,
1532,
2099,
38,
258,
55172,
24684,
515,
197,
197,
64290,
25,
414,
330,
1944,
10143,
756,
197,
55242,
25,
286,
1376,
345,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRightArrow(t *testing.T) {
d1 := initDrawing(Point{90, 90}, Point{160, 90})
c1 := Connector{0, 1}
d1 = AddConnector(d1, c1)
expectedP1 := Point{110, 100}
expectedP2 := Point{139, 100}
expectedSlope := 0.0
actualP1 := connectorP1(d1, c1)
actualP2 := connectorP2(d1, c1)
actualSlope := connectorSlope(d1, c1)
if actualP1 != expectedP1 {
t.Log("invalid P1")
t.Log(actualP1)
t.Fail()
}
if actualP2 != expectedP2 {
t.Log("invalid P2")
t.Log(actualP2)
t.Fail()
}
if actualSlope != expectedSlope {
t.Log("invalid Slope")
t.Log(actualSlope)
t.Fail()
}
} | explode_data.jsonl/54942 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 276
} | [
2830,
3393,
5979,
25914,
1155,
353,
8840,
836,
8,
1476,
2698,
16,
1669,
2930,
37437,
32737,
90,
24,
15,
11,
220,
24,
15,
2137,
5126,
90,
16,
21,
15,
11,
220,
24,
15,
8824,
1444,
16,
1669,
54814,
90,
15,
11,
220,
16,
532,
2698,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestResolveReference(t *testing.T) {
mustParse := func(url string) *URL {
u, err := Parse(url)
if err != nil {
t.Fatalf("Parse(%q) got err %v", url, err)
}
return u
}
opaque := &URL{Scheme: "scheme", Opaque: "opaque"}
for _, test := range resolveReferenceTests {
base := mustParse(test.base)
rel := mustParse(test.rel)
url := base.ResolveReference(rel)
if got := url.String(); got != test.expected {
t.Errorf("URL(%q).ResolveReference(%q)\ngot %q\nwant %q", test.base, test.rel, got, test.expected)
}
// Ensure that new instances are returned.
if base == url {
t.Errorf("Expected URL.ResolveReference to return new URL instance.")
}
// Test the convenience wrapper too.
url, err := base.Parse(test.rel)
if err != nil {
t.Errorf("URL(%q).Parse(%q) failed: %v", test.base, test.rel, err)
} else if got := url.String(); got != test.expected {
t.Errorf("URL(%q).Parse(%q)\ngot %q\nwant %q", test.base, test.rel, got, test.expected)
} else if base == url {
// Ensure that new instances are returned for the wrapper too.
t.Errorf("Expected URL.Parse to return new URL instance.")
}
// Ensure Opaque resets the URL.
url = base.ResolveReference(opaque)
if *url != *opaque {
t.Errorf("ResolveReference failed to resolve opaque URL:\ngot %#v\nwant %#v", url, opaque)
}
// Test the convenience wrapper with an opaque URL too.
url, err = base.Parse("scheme:opaque")
if err != nil {
t.Errorf(`URL(%q).Parse("scheme:opaque") failed: %v`, test.base, err)
} else if *url != *opaque {
t.Errorf("Parse failed to resolve opaque URL:\ngot %#v\nwant %#v", opaque, url)
} else if base == url {
// Ensure that new instances are returned, again.
t.Errorf("Expected URL.Parse to return new URL instance.")
}
}
} | explode_data.jsonl/71726 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 703
} | [
2830,
3393,
56808,
8856,
1155,
353,
8840,
836,
8,
341,
2109,
590,
14463,
1669,
2915,
6522,
914,
8,
353,
3144,
341,
197,
10676,
11,
1848,
1669,
14775,
6522,
340,
197,
743,
1848,
961,
2092,
341,
298,
3244,
30762,
445,
14463,
15238,
80,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestJsonEncodeIndent(t *testing.T) {
testOnce.Do(testInitAll)
v := TestSimplish{
Ii: -794,
Ss: `A Man is
after the new line
after new line and tab
`,
}
v2 := v
v.Mm = make(map[string]*TestSimplish)
for i := 0; i < len(v.Ar); i++ {
v3 := v2
v3.Ii += (i * 4)
v3.Ss = fmt.Sprintf("%d - %s", v3.Ii, v3.Ss)
if i%2 == 0 {
v.Ar[i] = &v3
}
// v3 = v2
v.Sl = append(v.Sl, &v3)
v.Mm[strconv.FormatInt(int64(i), 10)] = &v3
}
oldcan := testJsonH.Canonical
oldIndent := testJsonH.Indent
oldS2A := testJsonH.StructToArray
defer func() {
testJsonH.Canonical = oldcan
testJsonH.Indent = oldIndent
testJsonH.StructToArray = oldS2A
}()
testJsonH.Canonical = true
testJsonH.Indent = -1
testJsonH.StructToArray = false
var bs []byte
NewEncoderBytes(&bs, testJsonH).MustEncode(&v)
txt1Tab := string(bs)
bs = nil
testJsonH.Indent = 120
NewEncoderBytes(&bs, testJsonH).MustEncode(&v)
txtSpaces := string(bs)
// fmt.Printf("\n-----------\n%s\n------------\n%s\n-------------\n", txt1Tab, txtSpaces)
goldenResultTab := `{
"Ar": [
{
"Ar": [
null,
null
],
"Ii": -794,
"Mm": null,
"Sl": null,
"Ss": "-794 - A Man is\nafter the new line\n\tafter new line and tab\n"
},
null
],
"Ii": -794,
"Mm": {
"0": {
"Ar": [
null,
null
],
"Ii": -794,
"Mm": null,
"Sl": null,
"Ss": "-794 - A Man is\nafter the new line\n\tafter new line and tab\n"
},
"1": {
"Ar": [
null,
null
],
"Ii": -790,
"Mm": null,
"Sl": null,
"Ss": "-790 - A Man is\nafter the new line\n\tafter new line and tab\n"
}
},
"Sl": [
{
"Ar": [
null,
null
],
"Ii": -794,
"Mm": null,
"Sl": null,
"Ss": "-794 - A Man is\nafter the new line\n\tafter new line and tab\n"
},
{
"Ar": [
null,
null
],
"Ii": -790,
"Mm": null,
"Sl": null,
"Ss": "-790 - A Man is\nafter the new line\n\tafter new line and tab\n"
}
],
"Ss": "A Man is\nafter the new line\n\tafter new line and tab\n"
}`
if txt1Tab != goldenResultTab {
logT(t, "decoded indented with tabs != expected: \nexpected: %s\nencoded: %s", goldenResultTab, txt1Tab)
failT(t)
}
if txtSpaces != strings.Replace(goldenResultTab, "\t", strings.Repeat(" ", 120), -1) {
logT(t, "decoded indented with spaces != expected: \nexpected: %s\nencoded: %s", goldenResultTab, txtSpaces)
failT(t)
}
} | explode_data.jsonl/21002 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1219
} | [
2830,
3393,
5014,
32535,
42729,
1155,
353,
8840,
836,
8,
341,
18185,
12522,
33596,
8623,
3803,
2403,
340,
5195,
1669,
3393,
50,
6383,
812,
515,
197,
24486,
72,
25,
481,
22,
24,
19,
345,
197,
7568,
82,
25,
1565,
32,
2363,
374,
198,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBzip2Decompressor(t *testing.T) {
cases := []TestDecompressCase{
{
"single.bz2",
false,
false,
nil,
"d3b07384d113edec49eaa6238ad5ff00",
nil,
},
{
"single.bz2",
true,
true,
nil,
"",
nil,
},
}
for i, tc := range cases {
cases[i].Input = filepath.Join("./testdata", "decompress-bz2", tc.Input)
}
TestDecompressor(t, new(Bzip2Decompressor), cases)
} | explode_data.jsonl/74746 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 213
} | [
2830,
3393,
33,
9964,
17,
4900,
316,
56220,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
2271,
4900,
316,
1873,
4207,
515,
197,
197,
515,
298,
197,
1,
15338,
81374,
17,
756,
298,
36012,
345,
298,
36012,
345,
298,
84131,
345... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestString_Len(t *testing.T) {
tests := []struct {
name string
e String
want int
}{
{name: "", e: String{""}, want: 0},
{name: "", e: String{"abc"}, want: 3},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.e.Len(); got != tt.want {
t.Errorf("String.Len() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/34776 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 174
} | [
2830,
3393,
703,
2351,
268,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
7727,
262,
923,
198,
197,
50780,
526,
198,
197,
59403,
197,
197,
47006,
25,
7342,
384,
25,
923,
90,
3014,
2137,
1366,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestValueError_Error(t *testing.T) {
type fields struct {
message string
}
tests := []struct {
name string
fields fields
want string
}{
{"", fields{"error"}, "ValueError: error"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := NewValueError(Message(tt.fields.message))
if got := e.Error(); got != tt.want {
t.Errorf("ValueError.Error() = %v, want %v", got, tt.want)
}
})
}
} | explode_data.jsonl/22610 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 186
} | [
2830,
3393,
1130,
1454,
28651,
1155,
353,
8840,
836,
8,
341,
13158,
5043,
2036,
341,
197,
24753,
914,
198,
197,
532,
78216,
1669,
3056,
1235,
341,
197,
11609,
256,
914,
198,
197,
55276,
5043,
198,
197,
50780,
256,
914,
198,
197,
59403... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestStructArg(t *testing.T) {
type padded struct {
B string
C int32
}
var (
gotA padded
gotB uint32
wantA = padded{"3", 4}
wantB = uint32(5)
)
f := func(a padded, b uint32) {
gotA, gotB = a, b
}
ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)})
if gotA != wantA || gotB != wantB {
t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB)
}
} | explode_data.jsonl/29582 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 187
} | [
2830,
3393,
9422,
2735,
1155,
353,
8840,
836,
8,
341,
13158,
43868,
2036,
341,
197,
12791,
914,
198,
197,
6258,
526,
18,
17,
198,
197,
532,
2405,
2399,
197,
3174,
354,
32,
220,
43868,
198,
197,
3174,
354,
33,
220,
2622,
18,
17,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetSecretReference(t *testing.T) {
testcases := map[string]struct {
params map[string]string
snapContentName string
snapshot *crdv1.VolumeSnapshot
expectRef *v1.SecretReference
expectErr bool
}{
"no params": {
params: nil,
expectRef: nil,
},
"empty err": {
params: map[string]string{snapshotterSecretNameKey: "", snapshotterSecretNamespaceKey: ""},
expectErr: true,
},
"[deprecated] name, no namespace": {
params: map[string]string{snapshotterSecretNameKey: "foo"},
expectErr: true,
},
"namespace, no name": {
params: map[string]string{prefixedSnapshotterSecretNamespaceKey: "foo"},
expectErr: true,
},
"simple - valid": {
params: map[string]string{prefixedSnapshotterSecretNameKey: "name", prefixedSnapshotterSecretNamespaceKey: "ns"},
snapshot: &crdv1.VolumeSnapshot{},
expectRef: &v1.SecretReference{Name: "name", Namespace: "ns"},
},
"[deprecated] simple - valid, no pvc": {
params: map[string]string{snapshotterSecretNameKey: "name", snapshotterSecretNamespaceKey: "ns"},
snapshot: nil,
expectRef: &v1.SecretReference{Name: "name", Namespace: "ns"},
},
"simple - invalid name": {
params: map[string]string{prefixedSnapshotterSecretNameKey: "bad name", prefixedSnapshotterSecretNamespaceKey: "ns"},
snapshot: &crdv1.VolumeSnapshot{},
expectRef: nil,
expectErr: true,
},
"[deprecated] simple - invalid namespace": {
params: map[string]string{snapshotterSecretNameKey: "name", snapshotterSecretNamespaceKey: "bad ns"},
snapshot: &crdv1.VolumeSnapshot{},
expectRef: nil,
expectErr: true,
},
"template - invalid": {
params: map[string]string{
prefixedSnapshotterSecretNameKey: "static-${volumesnapshotcontent.name}-${volumesnapshot.namespace}-${volumesnapshot.name}-${volumesnapshot.annotations['akey']}",
prefixedSnapshotterSecretNamespaceKey: "static-${volumesnapshotcontent.name}-${volumesnapshot.namespace}",
},
snapContentName: "snapcontentname",
snapshot: &crdv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: "snapshotname",
Namespace: "snapshotnamespace",
Annotations: map[string]string{"akey": "avalue"},
},
},
expectRef: nil,
expectErr: true,
},
"template - invalid namespace tokens": {
params: map[string]string{
snapshotterSecretNameKey: "myname",
snapshotterSecretNamespaceKey: "mynamespace${bar}",
},
snapshot: &crdv1.VolumeSnapshot{},
expectRef: nil,
expectErr: true,
},
"template - invalid name tokens": {
params: map[string]string{
snapshotterSecretNameKey: "myname${foo}",
snapshotterSecretNamespaceKey: "mynamespace",
},
snapshot: &crdv1.VolumeSnapshot{},
expectRef: nil,
expectErr: true,
},
}
for k, tc := range testcases {
t.Run(k, func(t *testing.T) {
ref, err := getSecretReference(tc.params, tc.snapContentName, tc.snapshot)
if err != nil {
if tc.expectErr {
return
}
t.Fatalf("Did not expect error but got: %v", err)
} else {
if tc.expectErr {
t.Fatalf("Expected error but got none")
}
}
if !reflect.DeepEqual(ref, tc.expectRef) {
t.Errorf("Expected %v, got %v", tc.expectRef, ref)
}
})
}
} | explode_data.jsonl/10882 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1408
} | [
2830,
3393,
1949,
19773,
8856,
1155,
353,
8840,
836,
8,
341,
18185,
23910,
1669,
2415,
14032,
60,
1235,
341,
197,
25856,
688,
2415,
14032,
30953,
198,
197,
1903,
6861,
2762,
675,
914,
198,
197,
1903,
9601,
286,
353,
5082,
37261,
16,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestSamplingMultiplePolicies(t *testing.T) {
const maxSize = 100
const decisionWaitSeconds = 5
// For this test explicitly control the timer calls and batcher, and set a mock
// sampling policy evaluator.
msp := new(consumertest.TracesSink)
mpe1 := &mockPolicyEvaluator{}
mpe2 := &mockPolicyEvaluator{}
mtt := &manualTTicker{}
tsp := &tailSamplingSpanProcessor{
ctx: context.Background(),
nextConsumer: msp,
maxNumTraces: maxSize,
logger: zap.NewNop(),
decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
policies: []*Policy{
{
Name: "policy-1", Evaluator: mpe1, ctx: context.TODO(),
},
{
Name: "policy-2", Evaluator: mpe2, ctx: context.TODO(),
}},
deleteChan: make(chan pdata.TraceID, maxSize),
policyTicker: mtt,
}
_, batches := generateIdsAndBatches(210)
currItem := 0
numSpansPerBatchWindow := 10
// First evaluations shouldn't have anything to evaluate, until decision wait time passed.
for evalNum := 0; evalNum < decisionWaitSeconds; evalNum++ {
for ; currItem < numSpansPerBatchWindow*(evalNum+1); currItem++ {
tsp.ConsumeTraces(context.Background(), batches[currItem])
require.True(t, mtt.Started, "Time ticker was expected to have started")
}
tsp.samplingPolicyOnTick()
require.False(
t,
msp.SpansCount() != 0 || mpe1.EvaluationCount != 0 || mpe2.EvaluationCount != 0,
"policy for initial items was evaluated before decision wait period",
)
}
// Both policies will decide to sample
mpe1.NextDecision = sampling.Sampled
mpe2.NextDecision = sampling.Sampled
tsp.samplingPolicyOnTick()
require.False(
t,
msp.SpansCount() == 0 || mpe1.EvaluationCount == 0 || mpe2.EvaluationCount == 0,
"policy should have been evaluated totalspans == %d and evaluationcount(1) == %d and evaluationcount(2) == %d",
msp.SpansCount(),
mpe1.EvaluationCount,
mpe2.EvaluationCount,
)
require.Equal(t, numSpansPerBatchWindow, msp.SpansCount(), "nextConsumer should've been called with exactly 1 batch of spans")
// Late span of a sampled trace should be sent directly down the pipeline exporter
tsp.ConsumeTraces(context.Background(), batches[0])
expectedNumWithLateSpan := numSpansPerBatchWindow + 1
require.Equal(t, expectedNumWithLateSpan, msp.SpansCount(), "late span was not accounted for")
require.Equal(t, 1, mpe1.LateArrivingSpansCount, "1st policy was not notified of the late span")
require.Equal(t, 0, mpe2.LateArrivingSpansCount, "2nd policy should not have been notified of the late span")
} | explode_data.jsonl/31674 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 947
} | [
2830,
3393,
98622,
32089,
47,
42038,
1155,
353,
8840,
836,
8,
341,
4777,
61935,
284,
220,
16,
15,
15,
198,
4777,
5480,
14190,
15343,
284,
220,
20,
198,
197,
322,
1752,
419,
1273,
20975,
2524,
279,
9021,
6738,
323,
7162,
261,
11,
323... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestSelectWithUnionAll(t *testing.T) {
executor, sbc1, sbc2, _ := createLegacyExecutorEnv()
executor.normalize = true
sql := "select id from user where id in (1, 2, 3) union all select id from user where id in (1, 2, 3)"
bv, _ := sqltypes.BuildBindVariable([]int64{1, 2, 3})
bv1, _ := sqltypes.BuildBindVariable([]int64{1, 2})
bv2, _ := sqltypes.BuildBindVariable([]int64{3})
sbc1WantQueries := []*querypb.BoundQuery{{
Sql: "select id from user where id in ::__vals",
BindVariables: map[string]*querypb.BindVariable{
"__vals": bv1,
"vtg1": bv,
"vtg2": bv,
},
}, {
Sql: "select id from user where id in ::__vals",
BindVariables: map[string]*querypb.BindVariable{
"__vals": bv1,
"vtg1": bv,
"vtg2": bv,
},
}}
sbc2WantQueries := []*querypb.BoundQuery{{
Sql: "select id from user where id in ::__vals",
BindVariables: map[string]*querypb.BindVariable{
"__vals": bv2,
"vtg1": bv,
"vtg2": bv,
},
}, {
Sql: "select id from user where id in ::__vals",
BindVariables: map[string]*querypb.BindVariable{
"__vals": bv2,
"vtg1": bv,
"vtg2": bv,
},
}}
_, err := executorExec(executor, sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "sbc1")
utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "sbc2")
// Reset
sbc1.Queries = nil
sbc2.Queries = nil
_, err = executorStream(executor, sql)
require.NoError(t, err)
utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "sbc1")
utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "sbc2")
} | explode_data.jsonl/67435 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 719
} | [
2830,
3393,
3379,
2354,
32658,
2403,
1155,
353,
8840,
836,
8,
341,
67328,
4831,
11,
7898,
66,
16,
11,
7898,
66,
17,
11,
716,
1669,
1855,
77415,
25255,
14359,
741,
67328,
4831,
44657,
284,
830,
198,
30633,
1669,
330,
1742,
877,
504,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDNSProvider_PresentFailed(t *testing.T) {
provider, mux := setupTest(t)
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method, "method")
_, err := fmt.Fprintf(w, `{"data":"record_already_exists_remove_first","result":"error"}`)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
})
err := provider.Present("example.com", "", fakeChallengeToken)
require.EqualError(t, err, "dreamhost: add TXT record failed: record_already_exists_remove_first")
} | explode_data.jsonl/52235 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 210
} | [
2830,
3393,
61088,
5179,
1088,
2695,
9408,
1155,
353,
8840,
836,
8,
341,
197,
19979,
11,
59807,
1669,
6505,
2271,
1155,
692,
2109,
2200,
63623,
35460,
2915,
3622,
1758,
37508,
11,
435,
353,
1254,
9659,
8,
341,
197,
6948,
12808,
1155,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMustGather(t *testing.T) {
expErr := errors.New("failed to gather")
g := prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {
return nil, expErr
})
ft := new(fakeT)
_ = promtest.MustGather(ft, g)
if !ft.failed {
t.Fatal("MustGather should have failed but didn't")
}
logged := ft.logBuf.String()
if !strings.HasPrefix(logged, "error while gathering metrics:") || !strings.Contains(logged, expErr.Error()) {
t.Fatalf("did not log the expected error message: %s", logged)
}
expMF := []*dto.MetricFamily{} // Use a non-nil, zero-length slice for a simple-ish check.
g = prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {
return expMF, nil
})
ft = new(fakeT)
gotMF := promtest.MustGather(ft, g)
if ft.failed {
t.Fatalf("MustGather should not have failed")
}
if gotMF == nil || len(gotMF) != 0 {
t.Fatalf("exp: %v, got: %v", expMF, gotMF)
}
} | explode_data.jsonl/49061 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 362
} | [
2830,
3393,
31776,
38,
1856,
1155,
353,
8840,
836,
8,
341,
48558,
7747,
1669,
5975,
7121,
445,
16091,
311,
9567,
1138,
3174,
1669,
2706,
39705,
1224,
1856,
261,
9626,
18552,
368,
34923,
9,
58978,
1321,
16340,
15192,
11,
1465,
8,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAllocCancelDoesntStarve(t *testing.T) {
sem := semaphore.NewWeighted(10)
// Block off a portion of the semaphore so that Acquire(_, 10) can eventually succeed.
sem.Acquire(context.Background(), 1)
// In the background, Acquire(_, 10).
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
sem.Acquire(ctx, 10)
}()
// Wait until the Acquire(_, 10) call blocks.
for sem.TryAcquire(1) {
sem.Release(1)
runtime.Gosched()
}
// Now try to grab a read lock, and simultaneously unblock the Acquire(_, 10) call.
// Both Acquire calls should unblock and return, in either order.
go cancel()
err := sem.Acquire(context.Background(), 1)
if err != nil {
t.Fatalf("Acquire(_, 1) failed unexpectedly: %v", err)
}
sem.Release(1)
} | explode_data.jsonl/56022 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 277
} | [
2830,
3393,
25154,
9269,
21468,
406,
12699,
586,
1155,
353,
8840,
836,
8,
341,
89527,
1669,
55918,
7121,
8295,
291,
7,
16,
15,
692,
197,
322,
8362,
1007,
264,
13348,
315,
279,
55918,
773,
429,
6381,
984,
41117,
220,
16,
15,
8,
646,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestParallelBlockValidation(t *testing.T) {
viper.Set("peer.fileSystemPath", "/tmp/fabric/txvalidatortest")
ledgermgmt.InitializeTestEnv()
defer ledgermgmt.CleanupTestEnv()
gb, _ := test.MakeGenesisBlock("TestLedger")
gbHash := protoutil.BlockHeaderHash(gb.Header)
ledger, _ := ledgermgmt.CreateLedger(gb)
defer ledger.Close()
// here we test validation of a block with 128 txes
testValidationWithNTXes(t, ledger, gbHash, 128)
} | explode_data.jsonl/5162 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
16547,
4713,
13799,
1155,
353,
8840,
836,
8,
341,
5195,
12858,
4202,
445,
16537,
9715,
2320,
1820,
497,
3521,
5173,
6663,
28897,
14,
3998,
1891,
266,
371,
477,
1138,
197,
50704,
12311,
2501,
45829,
2271,
14359,
741,
16867,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInteg_00_ListPipelines(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
cfg := integration.ConfigWithDefaultRegion("us-west-2")
svc := codepipeline.New(cfg)
params := &codepipeline.ListPipelinesInput{}
req := svc.ListPipelinesRequest(params)
req.Handlers.Validate.Remove(defaults.ValidateParametersHandler)
_, err := req.Send(ctx)
if err != nil {
t.Errorf("expect no error, got %v", err)
}
} | explode_data.jsonl/4927 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 175
} | [
2830,
3393,
1072,
791,
62,
15,
15,
27104,
47,
93997,
1155,
353,
8840,
836,
8,
341,
20985,
11,
9121,
24911,
1669,
2266,
26124,
7636,
5378,
19047,
1507,
220,
20,
77053,
32435,
340,
16867,
9121,
24911,
2822,
50286,
1669,
17590,
10753,
2354... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestErrDisallowedFields(t *testing.T) {
expected := cli.FieldErrors{
&field.Error{
Type: field.ErrorTypeForbidden,
Field: rifftesting.TestField,
BadValue: "",
Detail: "",
},
}
actual := cli.ErrDisallowedFields(rifftesting.TestField, "")
if diff := cmp.Diff(expected, actual); diff != "" {
t.Errorf("(-expected, +actual): %s", diff)
}
} | explode_data.jsonl/13216 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 162
} | [
2830,
3393,
7747,
4839,
20967,
8941,
1155,
353,
8840,
836,
8,
341,
42400,
1669,
21348,
17087,
13877,
515,
197,
197,
5,
2566,
6141,
515,
298,
27725,
25,
257,
2070,
6141,
929,
69115,
345,
298,
94478,
25,
262,
36924,
723,
59855,
8787,
18... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestNullArray(t *testing.T) {
n := len(Get(`{"data":null}`, "data").Array())
if n != 0 {
t.Fatalf("expected '%v', got '%v'", 0, n)
}
n = len(Get(`{}`, "data").Array())
if n != 0 {
t.Fatalf("expected '%v', got '%v'", 0, n)
}
n = len(Get(`{"data":[]}`, "data").Array())
if n != 0 {
t.Fatalf("expected '%v', got '%v'", 0, n)
}
n = len(Get(`{"data":[null]}`, "data").Array())
if n != 1 {
t.Fatalf("expected '%v', got '%v'", 1, n)
}
} | explode_data.jsonl/43448 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 222
} | [
2830,
3393,
3280,
1857,
1155,
353,
8840,
836,
8,
341,
9038,
1669,
2422,
24460,
5809,
4913,
691,
788,
2921,
28350,
330,
691,
1827,
1857,
2398,
743,
308,
961,
220,
15,
341,
197,
3244,
30762,
445,
7325,
7677,
85,
516,
2684,
7677,
85,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func Test_RandomString(t *testing.T) {
tcases := []int{1, 8, 13, 96, 512, 1024}
for _, tc := range tcases {
rnd := certutil.RandomString(tc)
assert.Equal(t, tc, len(rnd))
assert.NotContains(t, rnd, "=")
assert.NotContains(t, rnd, "/")
}
} | explode_data.jsonl/55779 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 113
} | [
2830,
3393,
2568,
2206,
703,
1155,
353,
8840,
836,
8,
341,
3244,
23910,
1669,
3056,
396,
90,
16,
11,
220,
23,
11,
220,
16,
18,
11,
220,
24,
21,
11,
220,
20,
16,
17,
11,
220,
16,
15,
17,
19,
532,
2023,
8358,
17130,
1669,
2088,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDetermineFeePerKw(t *testing.T) {
t.Parallel()
defaultFee := chainfee.SatPerKWeight(999)
relayFee := chainfee.SatPerKWeight(300)
feeEstimator := newMockFeeEstimator(defaultFee, relayFee)
// We'll populate two items in the internal map which is used to query
// a fee based on a confirmation target: the default conf target, and
// an arbitrary conf target. We'll ensure below that both of these are
// properly
feeEstimator.blocksToFee[50] = 300
feeEstimator.blocksToFee[defaultNumBlocksEstimate] = 1000
testCases := []struct {
// feePref is the target fee preference for this case.
feePref FeePreference
// fee is the value the DetermineFeePerKw should return given
// the FeePreference above
fee chainfee.SatPerKWeight
// fail determines if this test case should fail or not.
fail bool
}{
// A fee rate below the fee rate floor should output the floor.
{
feePref: FeePreference{
FeeRate: chainfee.SatPerKWeight(99),
},
fee: chainfee.FeePerKwFloor,
},
// A fee rate above the floor, should pass through and return
// the target fee rate.
{
feePref: FeePreference{
FeeRate: 900,
},
fee: 900,
},
// A specified confirmation target should cause the function to
// query the estimator which will return our value specified
// above.
{
feePref: FeePreference{
ConfTarget: 50,
},
fee: 300,
},
// If the caller doesn't specify any values at all, then we
// should query for the default conf target.
{
feePref: FeePreference{},
fee: 1000,
},
// Both conf target and fee rate are set, we should return with
// an error.
{
feePref: FeePreference{
ConfTarget: 50,
FeeRate: 90000,
},
fee: 300,
fail: true,
},
}
for i, testCase := range testCases {
targetFee, err := DetermineFeePerKw(
feeEstimator, testCase.feePref,
)
switch {
case testCase.fail && err != nil:
continue
case testCase.fail && err == nil:
t.Fatalf("expected failure for #%v", i)
case !testCase.fail && err != nil:
t.Fatalf("unable to estimate fee; %v", err)
}
if targetFee != testCase.fee {
t.Fatalf("#%v: wrong fee: expected %v got %v", i,
testCase.fee, targetFee)
}
}
} | explode_data.jsonl/78819 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 843
} | [
2830,
3393,
35,
24308,
41941,
3889,
42,
86,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
11940,
41941,
1669,
8781,
30017,
808,
266,
3889,
42,
8295,
7,
24,
24,
24,
340,
197,
90891,
41941,
1669,
8781,
30017,
808,
266,
3889,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestSetTrustLineFlags(t *testing.T) {
asset := CreditAsset{"ABCD", "GAEJJMDDCRYF752PKIJICUVL7MROJBNXDV2ZB455T7BAFHU2LCLSE2LW"}
source := "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU"
trustor := "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z"
for _, testcase := range []struct {
name string
op SetTrustLineFlags
}{
{
name: "Both set and clear",
op: SetTrustLineFlags{
Trustor: trustor,
Asset: asset,
SetFlags: []TrustLineFlag{TrustLineClawbackEnabled},
ClearFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities},
SourceAccount: source,
},
},
{
name: "Both set and clear 2",
op: SetTrustLineFlags{
Trustor: trustor,
Asset: asset,
SetFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities},
ClearFlags: []TrustLineFlag{TrustLineClawbackEnabled},
SourceAccount: source,
},
},
{
name: "Only set",
op: SetTrustLineFlags{
Trustor: trustor,
Asset: asset,
SetFlags: []TrustLineFlag{TrustLineClawbackEnabled},
ClearFlags: nil,
SourceAccount: source,
},
},
{
name: "Only clear",
op: SetTrustLineFlags{
Trustor: trustor,
Asset: asset,
SetFlags: nil,
ClearFlags: []TrustLineFlag{TrustLineClawbackEnabled},
SourceAccount: source,
},
},
{
name: "No set nor clear",
op: SetTrustLineFlags{
Trustor: trustor,
Asset: asset,
SetFlags: nil,
ClearFlags: nil,
SourceAccount: source,
},
},
{
name: "No source",
op: SetTrustLineFlags{
Trustor: trustor,
Asset: asset,
SetFlags: []TrustLineFlag{TrustLineClawbackEnabled},
ClearFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities},
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
op := testcase.op
assert.NoError(t, op.Validate())
xdrOp, err := op.BuildXDR()
assert.NoError(t, err)
xdrBin, err := xdrOp.MarshalBinary()
assert.NoError(t, err)
var xdrOp2 xdr.Operation
assert.NoError(t, xdr.SafeUnmarshal(xdrBin, &xdrOp2))
var op2 SetTrustLineFlags
assert.NoError(t, op2.FromXDR(xdrOp2))
assert.Equal(t, op, op2)
testOperationsMarshallingRoundtrip(t, []Operation{&testcase.op})
})
}
} | explode_data.jsonl/57467 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1144
} | [
2830,
3393,
1649,
45548,
2460,
9195,
1155,
353,
8840,
836,
8,
341,
197,
9852,
1669,
16267,
16604,
4913,
1867,
6484,
497,
330,
38,
13669,
63391,
44,
4103,
35462,
37,
22,
20,
17,
22242,
76686,
1317,
22246,
43,
22,
44,
1285,
41,
15594,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGetOrderbook(t *testing.T) {
_, err := h.GetOrderbook("BTCUSD", 50)
if err != nil {
t.Error("Test faild - HitBTC GetOrderbook() error", err)
}
} | explode_data.jsonl/9481 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 63
} | [
2830,
3393,
1949,
4431,
2190,
1155,
353,
8840,
836,
8,
341,
197,
6878,
1848,
1669,
305,
2234,
4431,
2190,
445,
59118,
26749,
497,
220,
20,
15,
340,
743,
1848,
961,
2092,
341,
197,
3244,
6141,
445,
2271,
3690,
67,
481,
15882,
59118,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 2 |
func TestDel(t *testing.T) {
s, err := Run()
ok(t, err)
defer s.Close()
c, err := proto.Dial(s.Addr())
ok(t, err)
defer c.Close()
t.Run("simple", func(t *testing.T) {
s.Set("foo", "bar")
s.HSet("aap", "noot", "mies")
s.Set("one", "two")
s.SetTTL("one", time.Second*1234)
s.Set("three", "four")
mustDo(t, c,
"DEL", "one", "aap", "nosuch",
proto.Int(2),
)
equals(t, time.Duration(0), s.TTL("one"))
})
t.Run("failure cases", func(t *testing.T) {
mustDo(t, c,
"DEL",
proto.Error("ERR wrong number of arguments for 'del' command"),
)
})
t.Run("direct", func(t *testing.T) {
s.Set("foo", "bar")
s.Del("foo")
got, err := s.Get("foo")
equals(t, ErrKeyNotFound, err)
equals(t, "", got)
})
} | explode_data.jsonl/44814 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 372
} | [
2830,
3393,
16532,
1155,
353,
8840,
836,
8,
341,
1903,
11,
1848,
1669,
6452,
741,
59268,
1155,
11,
1848,
340,
16867,
274,
10421,
741,
1444,
11,
1848,
1669,
18433,
98462,
1141,
93626,
2398,
59268,
1155,
11,
1848,
340,
16867,
272,
10421,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestExpirePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
nodeName := "node"
testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
}
now := time.Now()
ttl := 10 * time.Second
tests := []struct {
pods []*testExpirePodStruct
cleanupTime time.Time
wNodeInfo *NodeInfo
}{{ // assumed pod would expires
pods: []*testExpirePodStruct{
{pod: testPods[0], assumedTime: now},
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: nil,
}, { // first one would expire, second one would not.
pods: []*testExpirePodStruct{
{pod: testPods[0], assumedTime: now},
{pod: testPods[1], assumedTime: now.Add(3 * ttl / 2)},
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
MilliCPU: 200,
Memory: 1024,
},
nonzeroRequest: &Resource{
MilliCPU: 200,
Memory: 1024,
},
TransientInfo: newTransientSchedulerInfo(),
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[1]},
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
imageStates: make(map[string]*ImageStateSummary),
},
}}
for i, tt := range tests {
cache := newSchedulerCache(ttl, time.Second, nil)
for _, pod := range tt.pods {
if err := assumeAndFinishBinding(cache, pod.pod, pod.assumedTime); err != nil {
t.Fatalf("assumePod failed: %v", err)
}
}
// pods that have assumedTime + ttl < cleanupTime will get expired and removed
cache.cleanupAssumedPods(tt.cleanupTime)
n := cache.nodes[nodeName]
deepEqualWithoutGeneration(t, i, n, tt.wNodeInfo)
}
} | explode_data.jsonl/19643 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 847
} | [
2830,
3393,
8033,
554,
23527,
1155,
353,
8840,
836,
8,
341,
197,
322,
18567,
26282,
1925,
1955,
2461,
37889,
8974,
311,
653,
23831,
5101,
23757,
198,
79138,
12753,
13275,
13859,
42318,
4202,
28197,
17305,
4430,
82,
11265,
497,
4419,
1785,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestParseNamedStats(t *testing.T) {
t.Parallel()
s := &IllumosNfsServer{
Fields: []string{"read", "write", "remove", "create"},
NfsVersions: []string{"v4"},
}
testData := helpers.FromFixture("nfs--0--rfsproccnt_v4.kstat")
fields := parseNamedStats(s, testData)
require.Equal(
t,
fields,
map[string]interface{}{
"read": float64(902),
"write": float64(1310),
"remove": float64(94),
"create": float64(6),
},
)
} | explode_data.jsonl/45328 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 206
} | [
2830,
3393,
14463,
15810,
16635,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
1903,
1669,
609,
40,
5448,
436,
45,
3848,
5475,
515,
197,
197,
8941,
25,
414,
3056,
917,
4913,
878,
497,
330,
4934,
497,
330,
5399,
497,
330,
31... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestFunctionality(t *testing.T) {
var w Websocket
if w.FormatFunctionality() != NoWebsocketSupportText {
t.Fatalf("Test Failed - FormatFunctionality error expected %s but received %s",
NoWebsocketSupportText, w.FormatFunctionality())
}
w.Functionality = 1 << 31
if w.FormatFunctionality() != UnknownWebsocketFunctionality+"[1<<31]" {
t.Fatal("Test Failed - GetFunctionality error incorrect error returned")
}
w.Functionality = WebsocketOrderbookSupported
if w.GetFunctionality() != WebsocketOrderbookSupported {
t.Fatal("Test Failed - GetFunctionality error incorrect bitmask returned")
}
if !w.SupportsFunctionality(WebsocketOrderbookSupported) {
t.Fatal("Test Failed - SupportsFunctionality error should be true")
}
} | explode_data.jsonl/29307 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 227
} | [
2830,
3393,
5152,
2719,
1155,
353,
8840,
836,
8,
341,
2405,
289,
4895,
9556,
271,
743,
289,
9978,
5152,
2719,
368,
961,
2308,
5981,
9556,
7916,
1178,
341,
197,
3244,
30762,
445,
2271,
21379,
481,
15042,
5152,
2719,
1465,
3601,
1018,
8... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestGCPOpts_Run(t *testing.T) {
ctrl := gomock.NewController(t)
mockStore := mocks.NewMockGCPPeeringConnectionCreator(ctrl)
defer ctrl.Finish()
opts := &GCPOpts{
store: mockStore,
network: "test",
}
t.Run("container exists", func(t *testing.T) {
containers := []mongodbatlas.Container{
{
ID: "containerID",
AtlasCIDRBlock: opts.atlasCIDRBlock,
},
}
mockStore.
EXPECT().
GCPContainers(opts.ProjectID).
Return(containers, nil).
Times(1)
request := opts.newPeer(containers[0].ID)
mockStore.
EXPECT().
CreatePeeringConnection(opts.ProjectID, request).
Return(&mongodbatlas.Peer{}, nil).
Times(1)
if err := opts.Run(); err != nil {
t.Fatalf("Run() unexpected error: %v", err)
}
})
t.Run("container does not exist", func(t *testing.T) {
mockStore.
EXPECT().
GCPContainers(opts.ProjectID).
Return(nil, nil).
Times(1)
containerRequest := opts.newContainer()
mockStore.
EXPECT().
CreateContainer(opts.ProjectID, containerRequest).
Return(&mongodbatlas.Container{ID: "ID"}, nil).
Times(1)
request := opts.newPeer("ID")
mockStore.
EXPECT().
CreatePeeringConnection(opts.ProjectID, request).
Return(&mongodbatlas.Peer{}, nil).
Times(1)
if err := opts.Run(); err != nil {
t.Fatalf("Run() unexpected error: %v", err)
}
})
} | explode_data.jsonl/1543 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 597
} | [
2830,
3393,
22863,
2045,
12754,
84158,
1155,
353,
8840,
836,
8,
341,
84381,
1669,
342,
316,
1176,
7121,
2051,
1155,
340,
77333,
6093,
1669,
68909,
7121,
11571,
38,
15855,
68,
4671,
4526,
31865,
62100,
340,
16867,
23743,
991,
18176,
2822,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestVarAfterMain2(t *testing.T) {
gopClTest(t, `
package main
func main() {
println(i)
}
var i = 100
`, `package main
import fmt "fmt"
func main() {
fmt.Println(i)
}
var i = 100
`)
} | explode_data.jsonl/73598 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 89
} | [
2830,
3393,
3962,
6025,
6202,
17,
1155,
353,
8840,
836,
8,
341,
3174,
453,
5066,
2271,
1155,
11,
22074,
1722,
1887,
271,
2830,
1887,
368,
341,
81168,
1956,
340,
630,
947,
600,
284,
220,
16,
15,
15,
198,
7808,
1565,
1722,
1887,
271,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestCopyWithTarInvalidSrc(t *testing.T) {
tempFolder, err := os.MkdirTemp("", "docker-archive-test")
if err != nil {
t.Fatal(nil)
}
destFolder := filepath.Join(tempFolder, "dest")
invalidSrc := filepath.Join(tempFolder, "doesnotexists")
err = os.MkdirAll(destFolder, 0740)
if err != nil {
t.Fatal(err)
}
err = defaultCopyWithTar(invalidSrc, destFolder)
if err == nil {
t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
}
} | explode_data.jsonl/79241 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 184
} | [
2830,
3393,
12106,
2354,
62733,
7928,
20360,
1155,
353,
8840,
836,
8,
341,
16280,
13682,
11,
1848,
1669,
2643,
1321,
12438,
12151,
19814,
330,
28648,
95100,
16839,
1138,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
27907,
340,
197,
532,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestGetSeenInboundMessages(t *testing.T) {
maxWait := 5 * time.Second
s := NewTestServer()
go s.Start()
api := slack.New("ABCDEFG", slack.OptionAPIURL(s.GetAPIURL()))
rtm := api.NewRTM()
go rtm.ManageConnection()
rtm.SendMessage(&slack.OutgoingMessage{
Channel: "foo",
Text: "should see this inbound message",
})
time.Sleep(maxWait)
seenInbound := s.GetSeenInboundMessages()
assert.True(t, len(seenInbound) > 0)
hadMessage := false
for _, msg := range seenInbound {
var m = slack.Message{}
jerr := json.Unmarshal([]byte(msg), &m)
assert.NoError(t, jerr, "messages should decode as slack.Message")
if m.Text == "should see this inbound message" {
hadMessage = true
break
}
}
assert.True(t, hadMessage, "did not see my sent message")
assert.True(t, s.SawMessage("should see this inbound message"))
} | explode_data.jsonl/7549 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 324
} | [
2830,
3393,
1949,
85675,
641,
10891,
15820,
1155,
353,
8840,
836,
8,
341,
22543,
14190,
1669,
220,
20,
353,
882,
32435,
198,
1903,
1669,
1532,
2271,
5475,
741,
30680,
274,
12101,
2822,
54299,
1669,
45619,
7121,
445,
67004,
497,
45619,
5... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestPause(t *testing.T) {
ss, cmdr, sm, done := setup(t, "test-pause")
pause := func() {
if err := cmdr.Send("test-pause", PauseMessage()); err != nil {
t.Fatalf("Error sending pause command to test-pause: %v", err)
}
newstate := <-ss.Stored
if newstate.State.Code != Paused {
t.Fatalf("Expected paused state but found: %s", newstate.State)
}
if state, err := ss.Load(task("test-pause")); err != nil || state.Code != Paused {
t.Fatalf("Failed to load expected pause state for task: state=%s err=%v", state, err)
}
// Task should not be Done; pausing doesn't exit the statemachine
select {
case <-done:
t.Fatal("Task exited unexpectedly.")
case <-time.After(100 * time.Millisecond):
}
}
// Pause the work
pause()
// Should be able to resume paused work
if err := cmdr.Send("test-pause", RunMessage()); err != nil {
t.Fatalf("Error sending run command to test-pause: %v", err)
}
newstate := <-ss.Stored
if newstate.State.Code != Runnable {
t.Fatalf("Expected runnable state but found: %s", newstate.State)
}
if state, err := ss.Load(task("test-pause")); err != nil || state.Code != Runnable {
t.Fatalf("Failed to load expected runnable state for task: state=%s err=%v", state, err)
}
// Re-pause the work
pause()
// Pausing paused work is silly but fine
pause()
// Releasing paused work should make it exit but leave it in the paused state
sm.Stop()
newstate = <-ss.Stored
if newstate.State.Code != Paused {
t.Fatalf("Releasing should not have changed paused state but stored: %s", newstate.State)
}
select {
case d := <-done:
if d {
t.Fatal("Releasing task should not have marked it as done.")
}
case <-time.After(100 * time.Millisecond):
t.Fatal("Releasing paused task should have exited the statemachine, but didn't.")
}
// Ensure task is stored with the paused state
if state, err := ss.Load(task("test-pause")); err != nil || state.Code != Paused {
t.Fatalf("Failed to load expected paused state for task: state=%s err=%v", state, err)
}
} | explode_data.jsonl/23106 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 725
} | [
2830,
3393,
28391,
1155,
353,
8840,
836,
8,
341,
34472,
11,
5439,
81,
11,
1525,
11,
2814,
1669,
6505,
1155,
11,
330,
1944,
2268,
3454,
5130,
3223,
3454,
1669,
2915,
368,
341,
197,
743,
1848,
1669,
5439,
81,
20176,
445,
1944,
2268,
3... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestReconcileProbeError(t *testing.T) {
theError := errors.New("this is the error")
table := TableTest{{
Name: "first reconcile basic ingress",
Key: "ns/name",
WantErr: true,
Objects: append([]runtime.Object{
ing("name", "ns", withBasicSpec, withContour),
}, servicesAndEndpoints...),
WantCreates: mustMakeProxies(t, ing("name", "ns", withBasicSpec, withContour)),
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: ing("name", "ns", withBasicSpec, withContour, func(i *v1alpha1.Ingress) {
// These are the things we expect to change in status.
i.Status.InitializeConditions()
i.Status.MarkNetworkConfigured()
}),
}},
WantDeleteCollections: []clientgotesting.DeleteCollectionActionImpl{{
ListRestrictions: clientgotesting.ListRestrictions{
Labels: deleteSelector(t, 0),
Fields: fields.Everything(),
},
}},
WantEvents: []string{
Eventf(corev1.EventTypeWarning, "InternalError", fmt.Sprintf("failed to probe Ingress ns/name: %v", theError)),
},
}}
table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler {
r := &Reconciler{
contourClient: fakecontourclient.Get(ctx),
contourLister: listers.GetHTTPProxyLister(),
serviceLister: listers.GetK8sServiceLister(),
endpointsLister: listers.GetEndpointsLister(),
tracker: &NullTracker{},
statusManager: &fakeStatusManager{
FakeIsReady: func(context.Context, *v1alpha1.Ingress) (bool, error) {
return false, theError
},
},
}
return ingressreconciler.NewReconciler(ctx, logging.FromContext(ctx), servingclient.Get(ctx),
listers.GetIngressLister(), controller.GetEventRecorder(ctx), r,
controller.Options{
ConfigStore: &testConfigStore{
config: defaultConfig,
}})
}))
} | explode_data.jsonl/72442 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 702
} | [
2830,
3393,
693,
40446,
457,
81426,
1454,
1155,
353,
8840,
836,
8,
341,
32088,
1454,
1669,
5975,
7121,
445,
574,
374,
279,
1465,
5130,
26481,
1669,
6633,
2271,
90,
515,
197,
21297,
25,
262,
330,
3896,
63408,
6770,
78559,
756,
197,
552... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestNewConfigNoAuthType(t *testing.T) {
os.Setenv("AVP_TYPE", "vault")
viper := viper.New()
_, err := config.New(viper, &config.Options{})
expectedError := "Must provide a supported Authentication Type"
if err.Error() != expectedError {
t.Errorf("expected error %s to be thrown, got %s", expectedError, err)
}
os.Unsetenv("AVP_TYPE")
} | explode_data.jsonl/54093 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 127
} | [
2830,
3393,
3564,
2648,
2753,
5087,
929,
1155,
353,
8840,
836,
8,
341,
25078,
4202,
3160,
445,
8093,
47,
4189,
497,
330,
82983,
1138,
5195,
12858,
1669,
95132,
7121,
741,
197,
6878,
1848,
1669,
2193,
7121,
3747,
12858,
11,
609,
1676,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestServerShutdown(t *testing.T) {
const ncalls = 5
var (
ctx = context.Background()
server = mustServer(t)(NewServer())
addr, listener = newTestListener(t)
shutdownStarted = make(chan struct{})
shutdownFinished = make(chan struct{})
handlersStarted = make(chan struct{})
handlersStartedCloseOnce sync.Once
proceed = make(chan struct{})
serveErrs = make(chan error, 1)
callwg sync.WaitGroup
callErrs = make(chan error, ncalls)
shutdownErrs = make(chan error, 1)
client, cleanup = newTestClient(t, addr)
_, cleanup2 = newTestClient(t, addr) // secondary connection
)
defer cleanup()
defer cleanup2()
// register a service that takes until we tell it to stop
server.Register(serviceName, map[string]Method{
"Test": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
var req internal.TestPayload
if err := unmarshal(&req); err != nil {
return nil, err
}
handlersStartedCloseOnce.Do(func() { close(handlersStarted) })
<-proceed
return &internal.TestPayload{Foo: "waited"}, nil
},
})
go func() {
serveErrs <- server.Serve(ctx, listener)
}()
// send a series of requests that will get blocked
for i := 0; i < 5; i++ {
callwg.Add(1)
go func(i int) {
callwg.Done()
tp := internal.TestPayload{Foo: "half" + fmt.Sprint(i)}
callErrs <- client.Call(ctx, serviceName, "Test", &tp, &tp)
}(i)
}
<-handlersStarted
go func() {
close(shutdownStarted)
shutdownErrs <- server.Shutdown(ctx)
// server.Close()
close(shutdownFinished)
}()
<-shutdownStarted
close(proceed)
<-shutdownFinished
for i := 0; i < ncalls; i++ {
if err := <-callErrs; err != nil && err != ErrClosed {
t.Fatal(err)
}
}
if err := <-shutdownErrs; err != nil {
t.Fatal(err)
}
if err := <-serveErrs; err != ErrServerClosed {
t.Fatal(err)
}
checkServerShutdown(t, server)
} | explode_data.jsonl/41078 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 924
} | [
2830,
3393,
5475,
62004,
1155,
353,
8840,
836,
8,
341,
4777,
25126,
5583,
284,
220,
20,
198,
2405,
2399,
197,
20985,
2549,
284,
2266,
19047,
741,
197,
41057,
4293,
284,
1969,
5475,
1155,
2376,
3564,
5475,
2398,
197,
53183,
11,
11446,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestExtractNotNullCols(t *testing.T) {
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
testData := []struct {
c string
e []opt.ColumnID
}{
{ // 0
c: "/1: [/2 - ]",
e: []opt.ColumnID{1},
},
{ // 1
c: "/1: [ - /2]",
e: []opt.ColumnID{},
},
{ // 2
c: "/1: [/NULL - /4]",
e: []opt.ColumnID{},
},
{ // 3
c: "/1: (/NULL - /4]",
e: []opt.ColumnID{1},
},
{ // 4
c: "/-1: [ - /2]",
e: []opt.ColumnID{1},
},
{ // 5
c: "/-1: [/2 - ]",
e: []opt.ColumnID{},
},
{ // 6
c: "/-1: [/4 - /NULL]",
e: []opt.ColumnID{},
},
{ // 7
c: "/-1: [/4 - /NULL)",
e: []opt.ColumnID{1},
},
{ // 8
c: "/1/2/3: [/1/1/1 - /1/1/2] [/3/3/3 - /3/3/4]",
e: []opt.ColumnID{1, 2, 3},
},
{ // 9
c: "/1/2/3/4: [/1/1/1/1 - /1/1/2/1] [/3/3/3/1 - /3/3/4/1]",
e: []opt.ColumnID{1, 2, 3},
},
{ // 10
c: "/1/2/3: [/1/1 - /1/1/2] [/3/3/3 - /3/3/4]",
e: []opt.ColumnID{1, 2},
},
{ // 11
c: "/1/-2/-3: [/1/1/2 - /1/1] [/3/3/4 - /3/3/3]",
e: []opt.ColumnID{1, 2},
},
{ // 12
c: "/1/2/3: [/1/1/1 - /1/1/2] [/3/3/3 - /3/3/4] [/4/4/1 - /5]",
e: []opt.ColumnID{1},
},
{ // 13
c: "/1/2/3: [/1/1/NULL - /1/1/2] [/3/3/3 - /3/3/4]",
e: []opt.ColumnID{1, 2},
},
{ // 13
c: "/1/2/3: [/1/1/1 - /1/1/1] [/2/NULL/2 - /2/NULL/3]",
e: []opt.ColumnID{1, 3},
},
}
for i, tc := range testData {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
c := ParseConstraint(&evalCtx, tc.c)
cols := c.ExtractNotNullCols(&evalCtx)
if exp := opt.MakeColSet(tc.e...); !cols.Equals(exp) {
t.Errorf("expected %s; got %s", exp, cols)
}
})
}
} | explode_data.jsonl/59312 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1022
} | [
2830,
3393,
28959,
11005,
37567,
1155,
353,
8840,
836,
8,
341,
18388,
1669,
10652,
50133,
16451,
28678,
6086,
741,
93413,
23684,
1669,
4916,
50133,
16451,
54469,
1972,
5895,
692,
18185,
1043,
1669,
3056,
1235,
341,
197,
1444,
914,
198,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestAccStateChangeRollback(t *testing.T) {
cleanAndPrepare()
randVar := rand.New(rand.NewSource(time.Now().Unix()))
var testSize uint32
testSize = 1000
var accs []*protocol.AccTx
//Store accs that are to be changed and rolled back in a accTx slice
nullAddress := [64]byte{}
loopMax := int(randVar.Uint32()%testSize) + 1
for i := 0; i < loopMax; i++ {
tx, _, _ := protocol.ConstrAccTx(0, randVar.Uint64()%1000, nullAddress, PrivKeyRoot, nil, nil)
accs = append(accs, tx)
}
accStateChange(accs)
for _, acc := range accs {
accHash := protocol.SerializeHashContent(acc.PubKey)
acc := storage.State[accHash]
if acc == nil {
t.Errorf("Account State failed to update for the following account: %v\n", acc)
}
}
accStateChangeRollback(accs)
for _, acc := range accs {
accHash := protocol.SerializeHashContent(acc.PubKey)
acc := storage.State[accHash]
if acc != nil {
t.Errorf("Account State failed to rollback the following account: %v\n", acc)
}
}
} | explode_data.jsonl/41097 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 374
} | [
2830,
3393,
14603,
1397,
4072,
32355,
1419,
1155,
353,
8840,
836,
8,
341,
1444,
2675,
3036,
50590,
2822,
7000,
437,
3962,
1669,
10382,
7121,
37595,
7121,
3608,
9730,
13244,
1005,
55832,
49962,
2405,
1273,
1695,
2622,
18,
17,
198,
18185,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestHandler(t *testing.T) {
c, _ := newTestClient()
_, w, r, _, _ := tu.NewTestInstance("", c.DefaultPathConfigs, 200, "{}", nil, "rpc", "/health", "debug")
c.Handler(w, r)
if r.Header.Get("Test") == "" {
t.Error("expected non-empty header")
}
} | explode_data.jsonl/22514 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 106
} | [
2830,
3393,
3050,
1155,
353,
8840,
836,
8,
341,
1444,
11,
716,
1669,
501,
2271,
2959,
741,
197,
6878,
289,
11,
435,
11,
8358,
716,
1669,
9765,
7121,
2271,
2523,
19814,
272,
13275,
1820,
84905,
11,
220,
17,
15,
15,
11,
35503,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestExecuteInstall(t *testing.T) {
scc := New(NewMockProvider(), mockAclProvider)
assert.NotNil(t, scc)
stub := shim.NewMockStub("lscc", scc)
res := stub.MockInit("1", nil)
assert.Equal(t, int32(shim.OK), res.Status, res.Message)
err := scc.executeInstall(stub, []byte("barf"))
assert.Error(t, err)
} | explode_data.jsonl/46563 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 133
} | [
2830,
3393,
17174,
24690,
1155,
353,
8840,
836,
8,
341,
1903,
638,
1669,
1532,
35063,
11571,
5179,
1507,
7860,
32,
564,
5179,
340,
6948,
93882,
1155,
11,
274,
638,
340,
18388,
392,
1669,
62132,
7121,
11571,
33838,
445,
4730,
638,
497,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRuntimeBreakpoint(t *testing.T) {
withTestProcess("testruntimebreakpoint", t, func(p *proc.Target, fixture protest.Fixture) {
err := p.Continue()
if err != nil {
t.Fatal(err)
}
regs, err := p.CurrentThread().Registers()
assertNoError(err, t, "Registers")
pc := regs.PC()
f, l, _ := p.BinInfo().PCToLine(pc)
if l != 10 {
t.Fatalf("did not respect breakpoint %s:%d", f, l)
}
})
} | explode_data.jsonl/56211 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 176
} | [
2830,
3393,
15123,
22524,
2768,
1155,
353,
8840,
836,
8,
341,
46948,
2271,
7423,
445,
1944,
22255,
8960,
2768,
497,
259,
11,
2915,
1295,
353,
15782,
35016,
11,
12507,
8665,
991,
12735,
8,
341,
197,
9859,
1669,
281,
2451,
6232,
741,
19... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestFilterTracing(t *testing.T) {
for _, tc := range []struct {
name string
operation string
filters []string
params *OpenTracingParams
expectLogs string
}{
{
name: "enable log filter events",
operation: "request_filters",
filters: []string{"f1", "f2"},
params: &OpenTracingParams{LogFilterEvents: true},
expectLogs: "f1: start, f1: end, f2: start, f2: end",
},
{
name: "disable log filter events",
operation: "request_filters",
filters: []string{"f1", "f2"},
params: &OpenTracingParams{LogFilterEvents: false},
expectLogs: "",
},
{
name: "disable filter span (ignores log events)",
operation: "request_filters",
filters: []string{"f1", "f2"},
params: &OpenTracingParams{DisableFilterSpans: true, LogFilterEvents: true},
},
} {
t.Run(tc.name, func(t *testing.T) {
tracer := mocktracer.New()
tc.params.Tracer = tracer
tracing := newProxyTracing(tc.params)
ctx := &context{request: &http.Request{}}
ft := tracing.startFilterTracing(tc.operation, ctx)
for _, f := range tc.filters {
ft.logStart(f)
ft.logEnd(f)
}
ft.finish()
spans := tracer.FinishedSpans()
if tc.params.DisableFilterSpans {
assert.Nil(t, ctx.parentSpan)
assert.Len(t, spans, 0)
return
}
require.Len(t, spans, 1)
span := spans[0]
assert.Equal(t, span, ctx.parentSpan)
assert.Equal(t, tc.operation, span.OperationName)
assert.Equal(t, tc.expectLogs, spanLogs(span))
})
}
} | explode_data.jsonl/50652 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 697
} | [
2830,
3393,
5632,
1282,
4527,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
17130,
1669,
2088,
3056,
1235,
341,
197,
11609,
981,
914,
198,
197,
197,
9262,
220,
914,
198,
197,
1166,
8612,
262,
3056,
917,
198,
197,
25856,
257,
353,
5002,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestBrokerInitializeConditions(t *testing.T) {
tests := []struct {
name string
bs *BrokerStatus
want *BrokerStatus
}{{
name: "empty",
bs: &BrokerStatus{},
want: &BrokerStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: BrokerConditionAddressable,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionFilter,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionIngress,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionTriggerChannel,
Status: corev1.ConditionUnknown,
}},
},
},
}, {
name: "one false",
bs: &BrokerStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: BrokerConditionTriggerChannel,
Status: corev1.ConditionFalse,
}},
},
},
want: &BrokerStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: BrokerConditionAddressable,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionFilter,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionIngress,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionTriggerChannel,
Status: corev1.ConditionFalse,
}},
},
},
}, {
name: "one true",
bs: &BrokerStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: BrokerConditionFilter,
Status: corev1.ConditionTrue,
}},
},
},
want: &BrokerStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: BrokerConditionAddressable,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionFilter,
Status: corev1.ConditionTrue,
}, {
Type: BrokerConditionIngress,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: BrokerConditionTriggerChannel,
Status: corev1.ConditionUnknown,
}},
},
}},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.bs.InitializeConditions()
if diff := cmp.Diff(test.want, test.bs, ignoreAllButTypeAndStatus); diff != "" {
t.Error("unexpected conditions (-want, +got) =", diff)
}
})
}
} | explode_data.jsonl/46025 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1075
} | [
2830,
3393,
65545,
9928,
35435,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
93801,
256,
353,
65545,
2522,
198,
197,
50780,
353,
65545,
2522,
198,
197,
15170,
515,
197,
11609,
25,
330,
3194,
7... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestGetContainerInfoForMirrorPods(t *testing.T) {
// pods contain one static and one mirror pod with the same name but
// different UIDs.
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "1234",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "5678",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
}
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorapi.ContainerInfo{
ContainerReference: cadvisorapi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
kubelet := testKubelet.kubelet
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
}
kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
mockCadvisor.AssertExpectations(t)
} | explode_data.jsonl/43340 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 835
} | [
2830,
3393,
1949,
4502,
1731,
2461,
54216,
23527,
82,
1155,
353,
8840,
836,
8,
341,
197,
322,
54587,
6644,
825,
1099,
323,
825,
17846,
7509,
448,
279,
1852,
829,
714,
198,
197,
322,
2155,
547,
30466,
624,
3223,
29697,
1669,
29838,
206... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestSelectHost(t *testing.T) {
scheduler := genericScheduler{}
tests := []struct {
name string
list framework.NodeScoreList
possibleHosts sets.String
expectsErr bool
}{
{
name: "unique properly ordered scores",
list: []framework.NodeScore{
{Name: "machine1.1", Score: 1},
{Name: "machine2.1", Score: 2},
},
possibleHosts: sets.NewString("machine2.1"),
expectsErr: false,
},
{
name: "equal scores",
list: []framework.NodeScore{
{Name: "machine1.1", Score: 1},
{Name: "machine1.2", Score: 2},
{Name: "machine1.3", Score: 2},
{Name: "machine2.1", Score: 2},
},
possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
expectsErr: false,
},
{
name: "out of order scores",
list: []framework.NodeScore{
{Name: "machine1.1", Score: 3},
{Name: "machine1.2", Score: 3},
{Name: "machine2.1", Score: 2},
{Name: "machine3.1", Score: 1},
{Name: "machine1.3", Score: 3},
},
possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
expectsErr: false,
},
{
name: "empty priority list",
list: []framework.NodeScore{},
possibleHosts: sets.NewString(),
expectsErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// increase the randomness
for i := 0; i < 10; i++ {
got, err := scheduler.selectHost(test.list)
if test.expectsErr {
if err == nil {
t.Error("Unexpected non-error")
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !test.possibleHosts.Has(got) {
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts)
}
}
}
})
}
} | explode_data.jsonl/2388 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 847
} | [
2830,
3393,
3379,
9296,
1155,
353,
8840,
836,
8,
341,
1903,
15222,
1669,
13954,
38878,
16094,
78216,
1669,
3056,
1235,
341,
197,
11609,
688,
914,
198,
197,
14440,
688,
12626,
21714,
10570,
852,
198,
197,
197,
10031,
9296,
82,
7289,
6431... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestCambpellTime(t *testing.T) {
for _, test := range timeTest {
mytime, err := time.Parse(time.RFC3339, test.in)
if err != nil {
log.Fatal(err)
}
year_rtm, day_rtm, hourminute := CampbellTime(mytime)
assert.Equal(t, test.year_rtm, year_rtm)
assert.Equal(t, test.day_rtm, day_rtm)
assert.Equal(t, test.hourminute, hourminute)
}
} | explode_data.jsonl/10751 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 160
} | [
2830,
3393,
91496,
37522,
1462,
1155,
353,
8840,
836,
8,
341,
2023,
8358,
1273,
1669,
2088,
882,
2271,
341,
197,
13624,
1678,
11,
1848,
1669,
882,
8937,
9730,
2013,
6754,
18,
18,
18,
24,
11,
1273,
1858,
340,
197,
743,
1848,
961,
209... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestFuncDecodeArgs(t *testing.T) {
t.Parallel()
tests := []struct {
Func core.Func
Input []byte
Args []interface{}
WantArgs []interface{}
}{
{
Func: MustNewFunc("test(address)", ""),
Input: B("0xffffffff000000000000000000000000000000000000000000000000000000000000c0fe"),
Args: []interface{}{new(common.Address)},
WantArgs: []interface{}{APtr("0x000000000000000000000000000000000000c0Fe")},
},
{
Func: MustNewFunc("test(uint256)", ""),
Input: B("0xffffffff000000000000000000000000000000000000000000000000000000000000002a"),
Args: []interface{}{new(big.Int)},
WantArgs: []interface{}{big.NewInt(42)},
},
{
Func: MustNewFunc("test(bool)", ""),
Input: B("0xffffffff0000000000000000000000000000000000000000000000000000000000000001"),
Args: []interface{}{boolPtr(false)},
WantArgs: []interface{}{boolPtr(true)},
},
{
Func: MustNewFunc("test(bytes32)", ""),
Input: B("0xffffffff0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
Args: []interface{}{&[32]byte{}},
WantArgs: []interface{}{&[32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}},
},
{
Func: MustNewFunc("test(bytes32)", ""),
Input: B("0xffffffff0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
Args: []interface{}{new(common.Hash)},
WantArgs: []interface{}{hashPtr(H("0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"))},
},
{
Func: MustNewFunc("test(bytes)", ""),
Input: B("0xffffffff000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000030102030000000000000000000000000000000000000000000000000000000000"),
Args: []interface{}{&[]byte{}},
WantArgs: []interface{}{&[]byte{1, 2, 3}},
},
{
Func: MustNewFunc("test((address arg0, uint256 arg1))", ""),
Input: B("0xffffffff000000000000000000000000000000000000000000000000000000000000c0fe000000000000000000000000000000000000000000000000000000000000002a"),
Args: []interface{}{new(tuple)},
WantArgs: []interface{}{&tuple{
Arg0: A("0x000000000000000000000000000000000000c0Fe"),
Arg1: big.NewInt(42),
}},
},
{
Func: MustNewFunc("test((address arg0, uint256 arg1))", ""),
Input: B("0xffffffff000000000000000000000000000000000000000000000000000000000000c0fe000000000000000000000000000000000000000000000000000000000000002a"),
Args: []interface{}{new(tupleWithWrongOrder)},
WantArgs: []interface{}{&tupleWithWrongOrder{
Arg1: big.NewInt(42),
Arg0: A("0x000000000000000000000000000000000000c0Fe"),
}},
},
{
Func: MustNewFunc("test((address arg0, uint256 arg1))", ""),
Input: B("0xffffffff000000000000000000000000000000000000000000000000000000000000c0fe000000000000000000000000000000000000000000000000000000000000002a"),
Args: []interface{}{new(tupleWithMoreArgs)},
WantArgs: []interface{}{&tupleWithMoreArgs{
Arg0: A("0x000000000000000000000000000000000000c0Fe"),
Arg1: big.NewInt(42),
}},
},
}
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
if err := test.Func.DecodeArgs(test.Input, test.Args...); err != nil {
t.Fatalf("Failed to decode args: %v", err)
}
if diff := cmp.Diff(test.WantArgs, test.Args, cmp.AllowUnexported(big.Int{})); diff != "" {
t.Fatalf("(-want, +got)\n%s", diff)
}
})
}
} | explode_data.jsonl/68055 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1412
} | [
2830,
3393,
9626,
32564,
4117,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
78216,
1669,
3056,
1235,
341,
197,
197,
9626,
257,
6200,
69845,
198,
197,
66588,
262,
3056,
3782,
198,
197,
197,
4117,
257,
3056,
4970,
16094,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestGiveUp(t *testing.T) {
ctx := createSweeperTestContext(t)
resultChan0, err := ctx.sweeper.SweepInput(
spendableInputs[0], defaultFeePref,
)
if err != nil {
t.Fatal(err)
}
ctx.tick()
// We expect a sweep to be published at height 100 (mockChainIOHeight).
ctx.receiveTx()
// Because of MaxSweepAttemps, two more sweeps will be attempted. We
// configured exponential back-off without randomness for the test. The
// second attempt, we expect to happen at 101. The third attempt at 103.
// At that point, the input is expected to be failed.
// Second attempt
ctx.notifier.NotifyEpoch(101)
ctx.tick()
ctx.receiveTx()
// Third attempt
ctx.notifier.NotifyEpoch(103)
ctx.tick()
ctx.receiveTx()
ctx.expectResult(resultChan0, ErrTooManyAttempts)
ctx.backend.mine()
ctx.finish(1)
} | explode_data.jsonl/34232 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 288
} | [
2830,
3393,
35127,
2324,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
1855,
50,
896,
10436,
2271,
1972,
1155,
692,
9559,
46019,
15,
11,
1848,
1669,
5635,
514,
896,
10436,
808,
48542,
2505,
1006,
197,
1903,
3740,
480,
31946,
58,
15,
1125... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestDeployKEDA(t *testing.T) {
out, err := ExecuteCommandWithDir("make deploy", "..")
require.NoErrorf(t, err, "error deploying KEDA - %s", err)
t.Log(string(out))
t.Log("KEDA deployed successfully using 'make deploy' command")
} | explode_data.jsonl/1939 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 86
} | [
2830,
3393,
69464,
42,
86761,
1155,
353,
8840,
836,
8,
341,
13967,
11,
1848,
1669,
20848,
4062,
2354,
6184,
445,
6927,
10517,
497,
32213,
1138,
17957,
35699,
69,
1155,
11,
1848,
11,
330,
841,
60317,
730,
86761,
481,
1018,
82,
497,
184... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestPositionalPointer(t *testing.T) {
var args struct {
Input string `arg:"positional"`
Output []*string `arg:"positional"`
}
err := parse("foo bar baz", &args)
require.NoError(t, err)
assert.Equal(t, "foo", args.Input)
bar := "bar"
baz := "baz"
assert.Equal(t, []*string{&bar, &baz}, args.Output)
} | explode_data.jsonl/13001 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 136
} | [
2830,
3393,
3812,
278,
9084,
1155,
353,
8840,
836,
8,
341,
2405,
2827,
2036,
341,
197,
66588,
220,
914,
262,
1565,
858,
2974,
966,
3005,
8805,
197,
80487,
29838,
917,
1565,
858,
2974,
966,
3005,
8805,
197,
532,
9859,
1669,
4715,
445,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestValidateV1Beta1MissingCharts(t *testing.T) {
manifest := `---
apiVersion: manifests/v1beta1
metadata:
name: test-manifest
spec: {}
`
_, err := Validate(manifest)
if err == nil || !strings.Contains(err.Error(), "manifest validation errors") {
t.Errorf("Didn't get expected error from manifest.TestValidateV1Beta1MissingCharts(), instead got: %s", err)
}
} | explode_data.jsonl/80477 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 130
} | [
2830,
3393,
17926,
53,
16,
64811,
16,
25080,
64878,
1155,
353,
8840,
836,
8,
341,
197,
42315,
1669,
1565,
10952,
2068,
5637,
25,
83232,
5457,
16,
19127,
16,
198,
17637,
510,
220,
829,
25,
1273,
20477,
6962,
198,
9535,
25,
5613,
3989,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestNamespaceIndexHighConcurrentQueriesWithTimeouts(t *testing.T) {
testNamespaceIndexHighConcurrentQueries(t,
testNamespaceIndexHighConcurrentQueriesOptions{
withTimeouts: true,
})
} | explode_data.jsonl/34831 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 65
} | [
2830,
3393,
22699,
1552,
11976,
1109,
3231,
55261,
2354,
7636,
82,
1155,
353,
8840,
836,
8,
341,
18185,
22699,
1552,
11976,
1109,
3231,
55261,
1155,
345,
197,
18185,
22699,
1552,
11976,
1109,
3231,
55261,
3798,
515,
298,
46948,
7636,
82,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestValidateServerPort(t *testing.T) {
tests := []struct {
name string
in *networking.Port
out string
}{
{"empty", &networking.Port{}, "invalid protocol"},
{"empty", &networking.Port{}, "port name"},
{"happy",
&networking.Port{
Protocol: "http",
Number: 1,
Name: "Henry",
},
""},
{"invalid protocol",
&networking.Port{
Protocol: "kafka",
Number: 1,
Name: "Henry",
},
"invalid protocol"},
{"invalid number",
&networking.Port{
Protocol: "http",
Number: uint32(1 << 30),
Name: "http",
},
"port number"},
{"name, no number",
&networking.Port{
Protocol: "http",
Number: 0,
Name: "Henry",
},
""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateServerPort(tt.in)
if err == nil && tt.out != "" {
t.Fatalf("validateServerPort(%v) = nil, wanted %q", tt.in, tt.out)
} else if err != nil && tt.out == "" {
t.Fatalf("validateServerPort(%v) = %v, wanted nil", tt.in, err)
} else if err != nil && !strings.Contains(err.Error(), tt.out) {
t.Fatalf("validateServerPort(%v) = %v, wanted %q", tt.in, err, tt.out)
}
})
}
} | explode_data.jsonl/56907 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 580
} | [
2830,
3393,
17926,
5475,
7084,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
17430,
256,
353,
17511,
287,
43013,
198,
197,
13967,
220,
914,
198,
197,
59403,
197,
197,
4913,
3194,
497,
609,
1751... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestErrorStringRepresentation(t *testing.T) {
e := Error{
Type: ErrorThrottling,
Exception: "ThrottlingException",
AmazonRawType: "dynamodb#ThrottlingBlah",
Message: "FooBar",
}
assert.Equal(t, "dynago.Error(ErrorThrottling): ThrottlingException: FooBar", e.Error())
e.Exception = ""
assert.Equal(t, "dynago.Error(ErrorThrottling): dynamodb#ThrottlingBlah: FooBar", e.Error())
} | explode_data.jsonl/68121 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 178
} | [
2830,
3393,
1454,
703,
55903,
1155,
353,
8840,
836,
8,
341,
7727,
1669,
4600,
515,
197,
27725,
25,
688,
4600,
1001,
46689,
2718,
345,
197,
197,
1354,
25,
257,
330,
1001,
46689,
2718,
1354,
756,
197,
197,
25863,
20015,
929,
25,
330,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAlloc(t *testing.T) {
s := NewScope()
items := []ScopeItem{{550, 250}, {900, 600}, {10, 80}, {100, 400}}
size := uint64(0)
for _, v := range items {
s.Insert(v.Address, v.Size)
size += v.Size
}
//{10, 80}, {100, 400}, {550, 250}, {900, 600},
items = []ScopeItem{{10, 85}, {100, 400}, {550, 250}, {900, 600}}
size += 5
shouldAlloc(t, s, items, size, 5, 90)
size += 5
items = []ScopeItem{{10, 490}, {550, 250}, {900, 600}}
shouldAlloc(t, s, items, size, 5, 95)
size += 90
items = []ScopeItem{{10, 490}, {550, 340}, {900, 600}}
shouldAlloc(t, s, items, size, 90, 800)
size += 1000
items = []ScopeItem{{10, 490}, {550, 340}, {900, 1600}}
shouldAlloc(t, s, items, size, 1000, 1500)
} | explode_data.jsonl/17764 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 301
} | [
2830,
3393,
25154,
1155,
353,
8840,
836,
8,
341,
1903,
1669,
1532,
10803,
741,
46413,
1669,
3056,
10803,
1234,
2979,
20,
20,
15,
11,
220,
17,
20,
15,
2137,
314,
24,
15,
15,
11,
220,
21,
15,
15,
2137,
314,
16,
15,
11,
220,
23,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestCreateChain(t *testing.T) {
domain, err := makeTrivialDomain()
if err != nil {
log.Fatal(err)
}
tmpDir, err := ioutil.TempDir("", domainPath)
if err != nil {
log.Fatal(err)
}
st, err := tao.NewSoftTao(tmpDir, []byte("xxx"))
if err != nil {
t.Fatal(err)
}
numServers := 2
servers := make([]config.Server, numServers)
for i := 0; i < numServers; i++ {
server, err := NewServer(domain.ConfigPath, network, 8000+i, &x509Identity, st)
if err != nil {
log.Fatal(err)
}
go server.ServeForever()
serverCfg := config.Server{
Name: fmt.Sprintf("Test%d", i),
PublicKeyType: "ed25519",
PublicKey: server.publicKey,
Addresses: []config.ServerAddress{config.ServerAddress{network, fmt.Sprintf("localhost:%d", 8000+i)}},
}
servers[i] = serverCfg
}
time.Sleep(time.Second)
quorum := numServers
client, err := NewClient(domain.ConfigPath, network, quorum, servers)
if err != nil {
t.Fatal(err)
}
chain := &config.Chain{}
chain, err = client.Do(chain)
if err != nil {
t.Fatal(err)
}
} | explode_data.jsonl/24100 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 453
} | [
2830,
3393,
4021,
18837,
1155,
353,
8840,
836,
8,
341,
2698,
3121,
11,
1848,
1669,
1281,
1282,
26658,
13636,
741,
743,
1848,
961,
2092,
341,
197,
6725,
26133,
3964,
340,
197,
630,
20082,
6184,
11,
1848,
1669,
43144,
65009,
6184,
19814,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 8 |
func Test_hcsTask_DeleteExec_InitExecID_2ndExec_ExitedState_Success(t *testing.T) {
lt, init, second := setupTestHcsTask(t)
// put the init exec into the exited state
_ = init.Kill(context.TODO(), 0xf)
// put the 2nd exec into the exited state
_ = second.Kill(context.TODO(), 0xf)
// try to delete the init exec
pid, status, at, err := lt.DeleteExec(context.TODO(), "")
if err != nil {
t.Fatalf("expected nil err got: %v", err)
}
verifyDeleteSuccessValues(t, pid, status, at, init)
} | explode_data.jsonl/56383 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 185
} | [
2830,
3393,
1523,
4837,
6262,
57418,
10216,
15644,
10216,
915,
62,
17,
303,
10216,
62531,
1608,
1397,
87161,
1155,
353,
8840,
836,
8,
341,
197,
4832,
11,
2930,
11,
2086,
1669,
6505,
2271,
39,
4837,
6262,
1155,
692,
197,
322,
2182,
279... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestStorageLast(t *testing.T) {
storage := Init(driver.NewMemory())
const name = "angry-bird"
// Set up storage with test releases.
setup := func() {
// release records
rls0 := ReleaseTestData{Name: name, Version: 1, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls1 := ReleaseTestData{Name: name, Version: 2, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls2 := ReleaseTestData{Name: name, Version: 3, Status: rspb.Status_SUPERSEDED}.ToRelease()
rls3 := ReleaseTestData{Name: name, Version: 4, Status: rspb.Status_FAILED}.ToRelease()
// create the release records in the storage
assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
}
setup()
h, err := storage.Last(name)
if err != nil {
t.Fatalf("Failed to query for release history (%q): %s\n", name, err)
}
if h.Version != 4 {
t.Errorf("Expected revision 4, got %d", h.Version)
}
} | explode_data.jsonl/35131 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 446
} | [
2830,
3393,
5793,
5842,
1155,
353,
8840,
836,
8,
341,
197,
16172,
1669,
15690,
24032,
7121,
10642,
12367,
4777,
829,
284,
330,
524,
884,
1455,
2603,
1837,
197,
322,
2573,
705,
5819,
448,
1273,
19232,
624,
84571,
1669,
2915,
368,
341,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func Test_UpdateSecrets_DoesNotAddVolumeIfRequestSecretsIsNil(t *testing.T) {
request := requests.CreateFunctionRequest{
Service: "testfunc",
Secrets: nil,
}
existingSecrets := map[string]*apiv1.Secret{
"pullsecret": {Type: apiv1.SecretTypeDockercfg},
"testsecret": {Type: apiv1.SecretTypeOpaque, Data: map[string][]byte{"filename": []byte("contents")}},
}
deployment := &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Template: apiv1.PodTemplateSpec{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{Name: "testfunc", Image: "alpine:latest"},
},
},
},
},
}
err := UpdateSecrets(request, deployment, existingSecrets)
if err != nil {
t.Errorf("unexpected error %s", err.Error())
}
validateEmptySecretVolumesAndMounts(t, deployment)
} | explode_data.jsonl/57940 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 317
} | [
2830,
3393,
47393,
19773,
82,
1557,
7072,
2623,
2212,
18902,
2679,
1900,
19773,
82,
3872,
19064,
1155,
353,
8840,
836,
8,
341,
23555,
1669,
7388,
7251,
5152,
1900,
515,
197,
91619,
25,
330,
1944,
2830,
756,
197,
7568,
50856,
82,
25,
2... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestPEXReactorReceive(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
defer teardownReactor(book)
peer := p2p.CreateRandomPeer(false)
// we have to send a request to receive responses
r.RequestAddrs(peer)
size := book.Size()
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
r.Receive(PexChannel, peer, msg)
assert.Equal(t, size+1, book.Size())
msg = cdc.MustMarshalBinary(&pexRequestMessage{})
r.Receive(PexChannel, peer, msg) // should not panic.
} | explode_data.jsonl/6126 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 209
} | [
2830,
3393,
1740,
55,
693,
5621,
14742,
1155,
353,
8840,
836,
8,
341,
7000,
11,
2311,
1669,
1855,
693,
5621,
2099,
1740,
55,
693,
5621,
2648,
37790,
16867,
49304,
693,
5621,
33130,
692,
197,
16537,
1669,
281,
17,
79,
7251,
13999,
3088... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestInt32_IsSuperset(t *testing.T) {
testcases := []struct {
name string
s Int32
t Int32
expect bool
}{
{
name: "test Int32 IsSuperset, s is empty",
s: Int32{},
t: map[int32]struct{}{2: {}, 9: {}, 4: {}},
expect: false,
},
{
name: "test Int32 IsSuperset, s is empty",
s: map[int32]struct{}{2: {}, 9: {}, 4: {}},
t: Int32{},
expect: true,
},
{
name: "test Int32 IsSuperset, s ⊂ s",
s: map[int32]struct{}{2: {}, 9: {}},
t: map[int32]struct{}{2: {}, 9: {}, 4: {}},
expect: false,
},
{
name: "test Int32 IsSuperset, s ⊃ s",
s: map[int32]struct{}{2: {}, 9: {}, 4: {}},
t: map[int32]struct{}{2: {}, 9: {}},
expect: true,
},
{
name: "test Int32 IsSuperset, s = s",
s: map[int32]struct{}{2: {}, 9: {}, 4: {}},
t: map[int32]struct{}{2: {}, 9: {}, 4: {}},
expect: true,
},
{
name: "test Int32 IsSuperset, s ∩ s = Ø",
s: map[int32]struct{}{1: {}, 4: {}},
t: map[int32]struct{}{2: {}, 6: {}},
expect: false,
},
{
name: "test Int32 IsSuperset, s ∩ s ≠ Ø && s ∩ s ≠ s",
s: map[int32]struct{}{1: {}, 4: {}},
t: map[int32]struct{}{1: {}, 6: {}},
expect: false,
},
}
for _, tc := range testcases {
t.Logf("running scenario: %s", tc.name)
actual := tc.s.IsSuperset(tc.t)
if actual != tc.expect {
t.Errorf("expect retrun: %v, but got: %v", tc.expect, actual)
}
}
} | explode_data.jsonl/62336 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 828
} | [
2830,
3393,
1072,
18,
17,
31879,
10048,
44146,
1155,
353,
8840,
836,
8,
341,
18185,
23910,
1669,
3056,
1235,
341,
197,
11609,
256,
914,
198,
197,
1903,
414,
1333,
18,
17,
198,
197,
3244,
414,
1333,
18,
17,
198,
197,
24952,
1807,
198... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestCaffe(t *testing.T) {
checkNet := func(t *testing.T, net Net) {
img := IMRead("images/space_shuttle.jpg", IMReadColor)
if img.Empty() {
t.Error("Invalid Mat in Caffe test")
}
defer img.Close()
blob := BlobFromImage(img, 1.0, image.Pt(224, 224), NewScalar(104, 117, 123, 0), false, false)
if blob.Empty() {
t.Error("Invalid blob in Caffe test")
}
defer blob.Close()
net.SetInput(blob, "data")
prob := net.Forward("prob")
defer prob.Close()
if prob.Empty() {
t.Error("Invalid prob in Caffe test")
}
probMat := prob.Reshape(1, 1)
defer probMat.Close()
_, maxVal, minLoc, maxLoc := MinMaxLoc(probMat)
if round(float64(maxVal), 0.00005) != 0.99995 {
t.Errorf("Caffe maxVal incorrect: %v\n", round(float64(maxVal), 0.00005))
}
if minLoc.X != 793 || minLoc.Y != 0 {
t.Errorf("Caffe minLoc incorrect: %v\n", minLoc)
}
if maxLoc.X != 812 || maxLoc.Y != 0 {
t.Errorf("Caffe maxLoc incorrect: %v\n", maxLoc)
}
}
path := os.Getenv("GOCV_CAFFE_TEST_FILES")
if path == "" {
t.Skip("Unable to locate Caffe model files for tests")
}
t.Run("net from disk", func(t *testing.T) {
net := ReadNetFromCaffe(path+"/bvlc_googlenet.prototxt", path+"/bvlc_googlenet.caffemodel")
if net.Empty() {
t.Errorf("Unable to load Caffe model")
}
defer net.Close()
checkNet(t, net)
})
t.Run("net from memory", func(t *testing.T) {
bPrototxt, err := ioutil.ReadFile(path + "/bvlc_googlenet.prototxt")
if err != nil {
t.Errorf("Failed to load Caffe prototxt from file: %v", err)
}
bCaffeModel, err := ioutil.ReadFile(path + "/bvlc_googlenet.caffemodel")
if err != nil {
t.Errorf("Failed to load Caffe caffemodel from file: %v", err)
}
net, err := ReadNetFromCaffeBytes(bPrototxt, bCaffeModel)
if err != nil {
t.Errorf("Error reading caffe from bytes: %v", err)
}
if net.Empty() {
t.Errorf("Unable to load Caffe model")
}
defer net.Close()
checkNet(t, net)
})
} | explode_data.jsonl/31265 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 879
} | [
2830,
3393,
34,
37780,
1155,
353,
8840,
836,
8,
341,
25157,
6954,
1669,
2915,
1155,
353,
8840,
836,
11,
4179,
9374,
8,
341,
197,
39162,
1669,
6517,
4418,
445,
3642,
2687,
1306,
3712,
98243,
4819,
497,
6517,
4418,
1636,
340,
197,
743,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 9 |
func TestMakeEmailPrimary(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
email := &EmailAddress{
Email: "user567890@example.com",
}
err := MakeEmailPrimary(email)
assert.Error(t, err)
assert.EqualError(t, err, ErrEmailAddressNotExist{Email: email.Email}.Error())
email = &EmailAddress{
Email: "user11@example.com",
}
err = MakeEmailPrimary(email)
assert.Error(t, err)
assert.EqualError(t, err, ErrEmailNotActivated.Error())
email = &EmailAddress{
Email: "user9999999@example.com",
}
err = MakeEmailPrimary(email)
assert.Error(t, err)
assert.True(t, IsErrUserNotExist(err))
email = &EmailAddress{
Email: "user101@example.com",
}
err = MakeEmailPrimary(email)
assert.NoError(t, err)
user, _ := GetUserByID(int64(10))
assert.Equal(t, "user101@example.com", user.Email)
} | explode_data.jsonl/67890 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 315
} | [
2830,
3393,
8078,
4781,
15972,
1155,
353,
8840,
836,
8,
341,
6948,
35699,
1155,
11,
19905,
28770,
3380,
2271,
5988,
12367,
57549,
1669,
609,
79986,
515,
197,
197,
4781,
25,
330,
872,
20,
21,
22,
23,
24,
15,
35487,
905,
756,
197,
532... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestSessionsController_Destroy(t *testing.T) {
t.Parallel()
app := cltest.NewApplicationEVMDisabled(t)
require.NoError(t, app.Start())
correctSession := sessions.NewSession()
q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig())
mustInsertSession(t, q, &correctSession)
config := app.GetConfig()
client := http.Client{}
tests := []struct {
name, sessionID string
success bool
}{
{"correct cookie", correctSession.ID, true},
{"incorrect cookie", "wrongsessionid", false},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cookie := cltest.MustGenerateSessionCookie(t, test.sessionID)
request, err := http.NewRequest("DELETE", config.ClientNodeURL()+"/sessions", nil)
assert.NoError(t, err)
request.AddCookie(cookie)
resp, err := client.Do(request)
assert.NoError(t, err)
_, err = app.SessionORM().AuthorizedUserWithSession(test.sessionID)
assert.Error(t, err)
if test.success {
assert.Equal(t, http.StatusOK, resp.StatusCode)
} else {
assert.True(t, resp.StatusCode >= 400, "Should get an erroneous status code for deleting a nonexistent session id")
}
})
}
} | explode_data.jsonl/12810 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 442
} | [
2830,
3393,
59062,
2051,
79266,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
28236,
1669,
1185,
1944,
7121,
4988,
36,
11187,
25907,
1155,
340,
17957,
35699,
1155,
11,
906,
12101,
12367,
1444,
27034,
5283,
1669,
15704,
7121,
5283... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestMarkEnd(t *testing.T) {
assert := assert.New(t)
assert.NoError(db.ClearCollections(task.Collection, build.Collection, VersionCollection),
"Error clearing collections")
displayName := "testName"
userName := "testUser"
b := &build.Build{
Id: "buildtest",
Status: evergreen.BuildStarted,
Version: "abc",
}
v := &Version{
Id: b.Version,
Status: evergreen.VersionStarted,
Config: "identifier: sample",
}
testTask := task.Task{
Id: "testone",
DisplayName: displayName,
Activated: true,
BuildId: b.Id,
Project: "sample",
Status: evergreen.TaskStarted,
Version: b.Version,
}
b.Tasks = []build.TaskCache{
{
Id: testTask.Id,
Status: evergreen.TaskStarted,
Activated: true,
},
}
assert.NoError(b.Insert())
assert.NoError(testTask.Insert())
assert.NoError(v.Insert())
updates := StatusChanges{}
details := apimodels.TaskEndDetail{
Status: evergreen.TaskFailed,
}
assert.NoError(MarkEnd(&testTask, userName, time.Now(), &details, false, &updates))
assert.Equal(evergreen.BuildFailed, updates.BuildNewStatus)
Convey("with a task that is part of a display task", t, func() {
p := &Project{
Identifier: "sample",
}
b := &build.Build{
Id: "displayBuild",
Project: p.Identifier,
Version: "version1",
Tasks: []build.TaskCache{
{Id: "displayTask", Activated: true, Status: evergreen.TaskStarted},
},
}
So(b.Insert(), ShouldBeNil)
v := &Version{
Id: b.Version,
Status: evergreen.VersionStarted,
}
So(v.Insert(), ShouldBeNil)
dt := &task.Task{
Id: "displayTask",
Activated: true,
BuildId: b.Id,
Status: evergreen.TaskStarted,
DisplayOnly: true,
ExecutionTasks: []string{"execTask"},
}
So(dt.Insert(), ShouldBeNil)
t1 := &task.Task{
Id: "execTask",
Activated: true,
BuildId: b.Id,
Status: evergreen.TaskStarted,
}
So(t1.Insert(), ShouldBeNil)
detail := &apimodels.TaskEndDetail{
Status: evergreen.TaskSucceeded,
}
So(MarkEnd(t1, "test", time.Now(), detail, false, &updates), ShouldBeNil)
t1FromDb, err := task.FindOne(task.ById(t1.Id))
So(err, ShouldBeNil)
So(t1FromDb.Status, ShouldEqual, evergreen.TaskSucceeded)
dtFromDb, err := task.FindOne(task.ById(dt.Id))
So(err, ShouldBeNil)
So(dtFromDb.Status, ShouldEqual, evergreen.TaskSucceeded)
dbBuild, err := build.FindOne(build.ById(b.Id))
So(err, ShouldBeNil)
So(dbBuild.Tasks[0].Status, ShouldEqual, evergreen.TaskSucceeded)
})
} | explode_data.jsonl/60431 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1128
} | [
2830,
3393,
8949,
3727,
1155,
353,
8840,
836,
8,
341,
6948,
1669,
2060,
7121,
1155,
340,
6948,
35699,
9791,
13524,
52730,
17483,
28629,
11,
1936,
28629,
11,
6079,
6482,
1326,
197,
197,
1,
1454,
32750,
15302,
5130,
31271,
675,
1669,
330,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestGC_TrackDeletedLayers(t *testing.T) {
require.NoError(t, testutil.TruncateAllTables(suite.db))
// disable other triggers that also insert on gc_blob_review_queue so that they don't interfere with this test
enable, err := testutil.GCTrackBlobUploadsTrigger.Disable(suite.db)
require.NoError(t, err)
defer enable()
// create repo
r := randomRepository(t)
rs := datastore.NewRepositoryStore(suite.db)
r, err = rs.CreateByPath(suite.ctx, r.Path)
require.NoError(t, err)
// create layer blob
bs := datastore.NewBlobStore(suite.db)
b := randomBlob(t)
err = bs.Create(suite.ctx, b)
require.NoError(t, err)
err = rs.LinkBlob(suite.ctx, r, b.Digest)
require.NoError(t, err)
// create manifest
ms := datastore.NewManifestStore(suite.db)
m := randomManifest(t, r, nil)
err = ms.Create(suite.ctx, m)
require.NoError(t, err)
// associate layer with manifest
err = ms.AssociateLayerBlob(suite.ctx, m, b)
require.NoError(t, err)
// confirm that the review queue remains empty
brs := datastore.NewGCBlobTaskStore(suite.db)
count, err := brs.Count(suite.ctx)
require.NoError(t, err)
require.Zero(t, count)
// dissociate layer blob
err = ms.DissociateLayerBlob(suite.ctx, m, b)
require.NoError(t, err)
// check that a corresponding task was created for the layer blob and scheduled for 1 day ahead
tt, err := brs.FindAll(suite.ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tt))
require.Equal(t, 0, tt[0].ReviewCount)
require.Equal(t, b.Digest, tt[0].Digest)
// ignore the few milliseconds between blob creation and queueing for review in response to the layer dissociation
require.WithinDuration(t, tt[0].ReviewAfter, b.CreatedAt.Add(24*time.Hour), 200*time.Millisecond)
} | explode_data.jsonl/48567 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 645
} | [
2830,
3393,
22863,
21038,
473,
26039,
40235,
1155,
353,
8840,
836,
8,
341,
17957,
35699,
1155,
11,
1273,
1314,
8240,
26900,
2403,
21670,
89516,
7076,
4390,
197,
322,
11156,
1008,
30754,
429,
1083,
5656,
389,
22122,
45908,
38661,
10841,
77... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestConfirmBlocks_67_33_4(t *testing.T) {
testConfirmBlocks(t, []pos.Weight{11, 11, 11, 67}, 3)
} | explode_data.jsonl/41397 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 45
} | [
2830,
3393,
16728,
29804,
62,
21,
22,
62,
18,
18,
62,
19,
1155,
353,
8840,
836,
8,
341,
18185,
16728,
29804,
1155,
11,
3056,
966,
73791,
90,
16,
16,
11,
220,
16,
16,
11,
220,
16,
16,
11,
220,
21,
22,
2137,
220,
18,
340,
92
] | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
] | 1 |
func TestServer_Response_Automatic100Continue(t *testing.T) {
const msg = "foo"
const reply = "bar"
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
if v := r.Header.Get("Expect"); v != "" {
t.Errorf("Expect header = %q; want empty", v)
}
buf := make([]byte, len(msg))
// This read should trigger the 100-continue being sent.
if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
}
_, err := io.WriteString(w, reply)
return err
}, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
EndStream: false,
EndHeaders: true,
})
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "100"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Fatalf("Got headers %v; want %v", goth, wanth)
}
// Okay, they sent status 100, so we can send our
// gigantic and/or sensitive "foo" payload now.
st.writeData(1, true, []byte(msg))
st.wantWindowUpdate(0, uint32(len(msg)))
hf = st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("expected data to follow")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth = st.decodeHeader(hf.HeaderBlockFragment())
wanth = [][2]string{
{":status", "200"},
{"content-type", "text/plain; charset=utf-8"},
{"content-length", strconv.Itoa(len(reply))},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
df := st.wantData()
if string(df.Data()) != reply {
t.Errorf("Client read %q; want %q", df.Data(), reply)
}
if !df.StreamEnded() {
t.Errorf("expect data stream end")
}
})
} | explode_data.jsonl/71673 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 866
} | [
2830,
3393,
5475,
65873,
1566,
332,
13487,
16,
15,
15,
23526,
1155,
353,
8840,
836,
8,
341,
4777,
3750,
284,
330,
7975,
698,
4777,
9851,
284,
330,
2257,
698,
18185,
5475,
2582,
1155,
11,
2915,
3622,
1758,
37508,
11,
435,
353,
1254,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestObserverExcludesImagesIntegration(t *testing.T) {
c := container.New(t)
c.StartImage("docker.io/library/nginx:1.17", container.WithPortReady(80))
config := NewFactory().CreateDefaultConfig().(*Config)
config.ExcludedImages = []string{"*nginx*"}
mn := &mockNotifier{endpointsMap: map[observer.EndpointID]observer.Endpoint{}}
obvs := startObserverWithConfig(t, mn, config)
defer stopObserver(t, obvs)
time.Sleep(2 * time.Second) // wait for endpoints to sync
require.Equal(t, 0, mn.AddCount())
require.Equal(t, 0, mn.ChangeCount())
require.Empty(t, mn.EndpointsMap())
} | explode_data.jsonl/32813 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 216
} | [
2830,
3393,
17151,
840,
7396,
14228,
52464,
1155,
353,
8840,
836,
8,
341,
1444,
1669,
5476,
7121,
1155,
340,
1444,
12101,
1906,
445,
28648,
4245,
45446,
69261,
25,
16,
13,
16,
22,
497,
5476,
26124,
7084,
19202,
7,
23,
15,
4390,
25873,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestRecursiveMapType(t *testing.T) {
type recursiveMap map[string]recursiveMap
r1 := recursiveMap{"A": recursiveMap{"B": nil, "C": nil}, "D": nil}
r2 := make(recursiveMap)
if err := encAndDec(r1, &r2); err != nil {
t.Error(err)
}
} | explode_data.jsonl/43389 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 98
} | [
2830,
3393,
78542,
2227,
929,
1155,
353,
8840,
836,
8,
341,
13158,
30819,
2227,
2415,
14032,
60,
49512,
2227,
198,
7000,
16,
1669,
30819,
2227,
4913,
32,
788,
30819,
2227,
4913,
33,
788,
2092,
11,
330,
34,
788,
2092,
2137,
330,
35,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestKubeStateMetrics(t *testing.T) {
c, err := NewConfigFromString(``)
if err != nil {
t.Fatal(err)
}
c.SetImages(map[string]string{
"kube-state-metrics": "docker.io/openshift/origin-kube-state-metrics:latest",
"kube-rbac-proxy": "docker.io/openshift/origin-kube-rbac-proxy:latest",
})
f := NewFactory("openshift-monitoring", c)
d, err := f.KubeStateMetricsDeployment()
if err != nil {
t.Fatal(err)
}
if d.Spec.Template.Spec.Containers[0].Image != "docker.io/openshift/origin-kube-rbac-proxy:latest" {
t.Fatal("kube-rbac-proxy image incorrectly configured")
}
if d.Spec.Template.Spec.Containers[1].Image != "docker.io/openshift/origin-kube-rbac-proxy:latest" {
t.Fatal("kube-rbac-proxy image incorrectly configured")
}
if d.Spec.Template.Spec.Containers[2].Image != "docker.io/openshift/origin-kube-state-metrics:latest" {
t.Fatal("kube-state-metrics image incorrectly configured")
}
} | explode_data.jsonl/68661 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 367
} | [
2830,
3393,
42,
3760,
1397,
27328,
1155,
353,
8840,
836,
8,
341,
1444,
11,
1848,
1669,
1532,
2648,
44491,
5809,
24183,
743,
1848,
961,
2092,
341,
197,
3244,
26133,
3964,
340,
197,
532,
1444,
4202,
14228,
9147,
14032,
30953,
515,
197,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
func TestEtcdDeleteService(t *testing.T) {
ctx := api.NewDefaultContext()
fakeClient := tools.NewFakeEtcdClient(t)
registry := NewTestEtcdRegistry(fakeClient)
err := registry.DeleteService(ctx, "foo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fakeClient.DeletedKeys) != 2 {
t.Errorf("Expected 2 delete, found %#v", fakeClient.DeletedKeys)
}
key, _ := makeServiceKey(ctx, "foo")
if fakeClient.DeletedKeys[0] != key {
t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key)
}
key, _ = makeServiceEndpointsKey(ctx, "foo")
if fakeClient.DeletedKeys[1] != key {
t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[1], key)
}
} | explode_data.jsonl/8170 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 271
} | [
2830,
3393,
31860,
4385,
6435,
1860,
1155,
353,
8840,
836,
8,
341,
20985,
1669,
6330,
7121,
3675,
1972,
741,
1166,
726,
2959,
1669,
7375,
7121,
52317,
31860,
4385,
2959,
1155,
340,
197,
29172,
1669,
1532,
2271,
31860,
4385,
15603,
74138,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestScanForKeys(t *testing.T) {
numKeys := 1000
fixtures := []keyFixture{}
// Make 1000 keys that match
for i := 0; i < numKeys; i++ {
key := fmt.Sprintf("get_keys_test_shouldmatch_%v", i)
fixtures = append(fixtures, newKeyFixture("SET", key, "Woohoo!"))
}
// And 1000 that don't
for i := 0; i < numKeys; i++ {
key := fmt.Sprintf("get_keys_test_shouldnotmatch_%v", i)
fixtures = append(fixtures, newKeyFixture("SET", key, "Rats!"))
}
addr := os.Getenv("TEST_REDIS_URI")
db := dbNumStr
c, err := redis.DialURL(addr)
if err != nil {
t.Fatalf("Couldn't connect to %#v: %#v", addr, err)
}
_, err = c.Do("SELECT", db)
if err != nil {
t.Errorf("Couldn't select database %#v", db)
}
defer func() {
deleteKeyFixtures(t, c, fixtures)
c.Close()
}()
createKeyFixtures(t, c, fixtures)
matches, err := scanForKeys(c, "get_keys_test_*shouldmatch*")
if err != nil {
t.Errorf("Error getting keys matching a pattern: %#v", err)
}
numMatches := len(matches)
if numMatches != numKeys {
t.Errorf("Expected %#v matches, got %#v.", numKeys, numMatches)
}
for _, match := range matches {
if !strings.HasPrefix(match, "get_keys_test_shouldmatch") {
t.Errorf("Expected match to have prefix: get_keys_test_shouldmatch")
}
}
} | explode_data.jsonl/46988 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 518
} | [
2830,
3393,
26570,
2461,
8850,
1155,
353,
8840,
836,
8,
341,
22431,
8850,
1669,
220,
16,
15,
15,
15,
198,
1166,
941,
18513,
1669,
3056,
792,
18930,
31483,
197,
322,
7405,
220,
16,
15,
15,
15,
6894,
429,
2432,
198,
2023,
600,
1669,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestIamRolesLoadingFromDifferentModules(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_IAM_ROLES_MULTIPLE_MODULES)
// Execution outputs to be verified
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
// Invoke terragrunt and verify used IAM roles for each dependency
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt init --terragrunt-log-level debug --terragrunt-debugreset --terragrunt-working-dir %s", TEST_FIXTURE_IAM_ROLES_MULTIPLE_MODULES), &stdout, &stderr)
// Taking all outputs in one string
output := fmt.Sprintf("%v %v %v", string(stderr.Bytes()), string(stdout.Bytes()), err.Error())
component1 := ""
component2 := ""
// scan each output line and get lines for component1 and component2
for _, line := range strings.Split(output, "\n") {
if strings.Contains(line, "Assuming IAM role arn:aws:iam::component1:role/terragrunt") {
component1 = line
continue
}
if strings.Contains(line, "Assuming IAM role arn:aws:iam::component2:role/terragrunt") {
component2 = line
continue
}
}
assert.NotEmptyf(t, component1, "Missing role for component 1")
assert.NotEmptyf(t, component2, "Missing role for component 2")
assert.Contains(t, component1, "iam_roles_multiple_modules/component")
assert.Contains(t, component2, "iam_roles_multiple_modules/component2")
} | explode_data.jsonl/10176 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 479
} | [
2830,
3393,
40,
309,
25116,
8578,
3830,
69123,
28201,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
1444,
60639,
51,
13886,
627,
13682,
1155,
11,
13602,
42635,
41486,
7959,
1402,
8302,
14017,
29207,
52819,
95742,
692,
197,
322,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 4 |
func TestMemberAddEmailBulk(t *testing.T) {
tc, _, name := memberSetup(t)
defer tc.Cleanup()
blob := "u1@keybase.io, u2@keybase.io\nu3@keybase.io,u4@keybase.io, u5@keybase.io,u6@keybase.io, u7@keybase.io\n\n\nFull Name <fullname@keybase.io>, Someone Else <someone@keybase.io>,u8@keybase.io\n\nXXXXXXXXXXXX"
res, err := AddEmailsBulk(context.TODO(), tc.G, name, blob, keybase1.TeamRole_WRITER)
if err != nil {
t.Fatal(err)
}
emails := []string{"u1@keybase.io", "u2@keybase.io", "u3@keybase.io", "u4@keybase.io", "u5@keybase.io", "u6@keybase.io", "u7@keybase.io", "fullname@keybase.io", "someone@keybase.io", "u8@keybase.io"}
if len(res.Invited) != len(emails) {
t.Logf("invited: %+v", res.Invited)
t.Errorf("num invited: %d, expected %d", len(res.Invited), len(emails))
}
if len(res.AlreadyInvited) != 0 {
t.Errorf("num already invited: %d, expected 0", len(res.AlreadyInvited))
}
require.Len(t, res.Malformed, 1)
for _, e := range emails {
assertInvite(tc, name, e, "email", keybase1.TeamRole_WRITER)
}
} | explode_data.jsonl/13525 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 463
} | [
2830,
3393,
9366,
2212,
4781,
88194,
1155,
353,
8840,
836,
8,
341,
78255,
11,
8358,
829,
1669,
4462,
21821,
1155,
340,
16867,
17130,
727,
60639,
2822,
2233,
1684,
1669,
330,
84,
16,
31,
792,
3152,
4245,
11,
575,
17,
31,
792,
3152,
4... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestTaskRunSpec_Validate(t *testing.T) {
tests := []struct {
name string
spec v1beta1.TaskRunSpec
}{{
name: "taskspec without a taskRef",
spec: v1beta1.TaskRunSpec{
TaskSpec: &v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "mystep",
Image: "myimage",
}}},
},
},
}, {
name: "no timeout",
spec: v1beta1.TaskRunSpec{
Timeout: &metav1.Duration{Duration: 0},
TaskSpec: &v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "mystep",
Image: "myimage",
}}},
},
},
}, {
name: "parameters",
spec: v1beta1.TaskRunSpec{
Timeout: &metav1.Duration{Duration: 0},
Params: []v1beta1.Param{{
Name: "name",
Value: *v1beta1.NewArrayOrString("value"),
}},
TaskSpec: &v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "mystep",
Image: "myimage",
}}},
},
},
}, {
name: "task spec with credentials.path variable",
spec: v1beta1.TaskRunSpec{
TaskSpec: &v1beta1.TaskSpec{
Steps: []v1beta1.Step{{
Container: corev1.Container{
Name: "mystep",
Image: "myimage",
},
Script: `echo "creds-init writes to $(credentials.path)"`,
}},
},
},
}}
for _, ts := range tests {
t.Run(ts.name, func(t *testing.T) {
if err := ts.spec.Validate(context.Background()); err != nil {
t.Errorf("TaskRunSpec.Validate()/%s error = %v", ts.name, err)
}
})
}
} | explode_data.jsonl/82030 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 709
} | [
2830,
3393,
6262,
6727,
8327,
62,
17926,
1155,
353,
8840,
836,
8,
341,
78216,
1669,
3056,
1235,
341,
197,
11609,
914,
198,
197,
98100,
348,
16,
19127,
16,
28258,
6727,
8327,
198,
197,
15170,
515,
197,
11609,
25,
330,
24760,
992,
2041,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 2 |
func TestTearDown(t *testing.T) {
f := newFixture(t)
defer f.teardown()
t1 := time.Unix(1, 0)
f.resource("foo", "foo.sh", t1)
f.resource("bar", "bar.sh", t1)
f.step()
f.c.TearDown(f.ctx)
f.fe.RequireNoKnownProcess(t, "foo.sh")
f.fe.RequireNoKnownProcess(t, "bar.sh")
} | explode_data.jsonl/53810 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 138
} | [
2830,
3393,
51,
682,
4454,
1155,
353,
8840,
836,
8,
341,
1166,
1669,
501,
18930,
1155,
340,
16867,
282,
31853,
37496,
2822,
3244,
16,
1669,
882,
10616,
941,
7,
16,
11,
220,
15,
340,
1166,
24013,
445,
7975,
497,
330,
7975,
2395,
497,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAbsURLify(t *testing.T) {
t.Parallel()
sources := []source.ByteSource{
{Name: filepath.FromSlash("sect/doc1.html"), Content: []byte("<!doctype html><html><head></head><body><a href=\"#frag1\">link</a></body></html>")},
{Name: filepath.FromSlash("blue/doc2.html"), Content: []byte("---\nf: t\n---\n<!doctype html><html><body>more content</body></html>")},
}
for _, baseURL := range []string{"http://auth/bub", "http://base", "//base"} {
for _, canonify := range []bool{true, false} {
cfg, fs := newTestCfg()
cfg.Set("defaultExtension", "html")
cfg.Set("uglyURLs", true)
cfg.Set("canonifyURLs", canonify)
cfg.Set("baseURL", baseURL)
for _, src := range sources {
writeSource(t, fs, filepath.Join("content", src.Name), string(src.Content))
}
writeSource(t, fs, filepath.Join("layouts", "blue/single.html"), templateWithURLAbs)
s := buildSingleSite(t, deps.DepsCfg{Fs: fs, Cfg: cfg}, BuildCfg{})
th := testHelper{s.Cfg, s.Fs, t}
tests := []struct {
file, expected string
}{
{"public/blue/doc2.html", "<a href=\"%s/foobar.jpg\">Going</a>"},
{"public/sect/doc1.html", "<!doctype html><html><head></head><body><a href=\"#frag1\">link</a></body></html>"},
}
for _, test := range tests {
expected := test.expected
if strings.Contains(expected, "%s") {
expected = fmt.Sprintf(expected, baseURL)
}
if !canonify {
expected = strings.Replace(expected, baseURL, "", -1)
}
th.assertFileContent(test.file, expected)
}
}
}
} | explode_data.jsonl/40672 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 640
} | [
2830,
3393,
27778,
3144,
1437,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
741,
1903,
2360,
1669,
3056,
2427,
32119,
3608,
515,
197,
197,
63121,
25,
26054,
11439,
88004,
445,
9687,
39510,
16,
2564,
3975,
8883,
25,
3056,
3782,
9639,... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 7 |
func TestBuildResolvers(t *testing.T) {
ipOne := net.ParseIP("192.0.0.1")
ipTwo := net.ParseIP("2001:db8:1234:0000:0000:0000:0000:0000")
ipList := []net.IP{ipOne, ipTwo}
invalidType := &ingress.Ingress{}
expected := ""
actual := buildResolvers(invalidType, false)
// Invalid Type for []net.IP
if expected != actual {
t.Errorf("Expected '%v' but returned '%v'", expected, actual)
}
actual = buildResolvers(ipList, invalidType)
// Invalid Type for bool
if expected != actual {
t.Errorf("Expected '%v' but returned '%v'", expected, actual)
}
validResolver := "resolver 192.0.0.1 [2001:db8:1234::] valid=30s;"
resolver := buildResolvers(ipList, false)
if resolver != validResolver {
t.Errorf("Expected '%v' but returned '%v'", validResolver, resolver)
}
validResolver = "resolver 192.0.0.1 valid=30s ipv6=off;"
resolver = buildResolvers(ipList, true)
if resolver != validResolver {
t.Errorf("Expected '%v' but returned '%v'", validResolver, resolver)
}
} | explode_data.jsonl/80589 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 385
} | [
2830,
3393,
11066,
1061,
39435,
1155,
353,
8840,
836,
8,
341,
46531,
3966,
1669,
4179,
8937,
3298,
445,
16,
24,
17,
13,
15,
13,
15,
13,
16,
1138,
46531,
11613,
1669,
4179,
8937,
3298,
445,
17,
15,
15,
16,
25,
1999,
23,
25,
16,
1... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 5 |
func TestPatchOptionReturnsNotFound(t *testing.T) {
t.Parallel()
Convey("Given a Dataset API instance with a mocked data store that fails to update dimension node ID due to DimensionNodeNotFound error", t, func() {
w := httptest.NewRecorder()
mockedDataStore, isLocked := storeMockWithLock(true)
mockedDataStore.UpdateETagForOptionsFunc = func(ctx context.Context, currentInstance *models.Instance, upserts []*models.CachedDimensionOption, updates []*models.DimensionOption, eTagSelector string) (string, error) {
So(*isLocked, ShouldBeTrue)
return testETag, nil
}
mockedDataStore.UpdateDimensionsNodeIDAndOrderFunc = func(ctx context.Context, updates []*models.DimensionOption) error {
So(*isLocked, ShouldBeTrue)
return errs.ErrDimensionNodeNotFound
}
datasetAPI := getAPIWithCMDMocks(testContext, mockedDataStore, &mocks.DownloadsGeneratorMock{})
Convey("Then patch dimension option returns status not found", func() {
body := strings.NewReader(`[
{"op": "add", "path": "/node_id", "value": "11"}
]`)
r, err := createRequestWithToken(http.MethodPatch, "http://localhost:21800/instances/123/dimensions/age/options/55", body)
r.Header.Set("If-Match", testIfMatch)
So(err, ShouldBeNil)
datasetAPI.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
Convey("And the expected database calls are performed to update nodeID", func() {
validateDimensionUpdates(mockedDataStore, []*models.DimensionOption{
{
InstanceID: "123",
Name: "age",
NodeID: "11",
Option: "55",
Order: nil,
},
}, testIfMatch)
})
Convey("Then the db lock is acquired and released as expected", func() {
validateLock(mockedDataStore, "123")
So(*isLocked, ShouldBeFalse)
})
})
})
} | explode_data.jsonl/20826 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 687
} | [
2830,
3393,
43622,
5341,
16446,
10372,
1155,
353,
8840,
836,
8,
341,
3244,
41288,
7957,
2822,
93070,
5617,
445,
22043,
264,
39183,
5333,
2867,
448,
264,
46149,
821,
3553,
429,
14525,
311,
2647,
12871,
2436,
3034,
4152,
311,
27923,
1955,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestMemory_GenerateCsr(t *testing.T) {
m, err := NewWithDefault()
require.NoError(t, err)
generateCsrResp, err := m.GenerateCsr(&ca.GenerateCsrRequest{})
require.NoError(t, err)
assert.NotEmpty(t, generateCsrResp.Csr)
} | explode_data.jsonl/73854 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 93
} | [
2830,
3393,
10642,
2646,
13220,
34,
15094,
1155,
353,
8840,
836,
8,
341,
2109,
11,
1848,
1669,
1532,
2354,
3675,
741,
17957,
35699,
1155,
11,
1848,
692,
3174,
13220,
34,
15094,
36555,
11,
1848,
1669,
296,
57582,
34,
15094,
2099,
924,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestDeliverServiceServiceUnavailable(t *testing.T) {
orgEndpointDisableInterval := comm.EndpointDisableInterval
comm.EndpointDisableInterval = time.Millisecond * 1500
defer func() { comm.EndpointDisableInterval = orgEndpointDisableInterval }()
defer ensureNoGoroutineLeak(t)()
// Scenario: bring up 2 ordering service instances,
// Make the instance the client connects to fail after a delivery of a block and send SERVICE_UNAVAILABLE
// whenever subsequent seeks are sent to it.
// The client is expected to connect to the other instance, and to ask for a block sequence that is the next block
// after the last block it got from the first ordering service node.
// Wait endpoint disable interval
// After that resurrect failed node (first node) and fail instance client currently connect - send SERVICE_UNAVAILABLE
// The client should reconnect to original instance and ask for next block.
os1 := mocks.NewOrderer(5615, t)
os2 := mocks.NewOrderer(5616, t)
gossipServiceAdapter := &mocks.MockGossipServiceAdapter{GossipBlockDisseminations: make(chan uint64)}
service, err := NewDeliverService(&Config{
Endpoints: []string{"localhost:5615", "localhost:5616"},
Gossip: gossipServiceAdapter,
CryptoSvc: &mockMCS{},
ABCFactory: DefaultABCFactory,
ConnFactory: DefaultConnectionFactory,
})
assert.NoError(t, err)
li := &mocks.MockLedgerInfo{Height: 100}
os1.SetNextExpectedSeek(li.Height)
os2.SetNextExpectedSeek(li.Height)
err = service.StartDeliverForChannel("TEST_CHAINID", li, func() {})
assert.NoError(t, err, "can't start delivery")
waitForConnectionToSomeOSN := func() (*mocks.Orderer, *mocks.Orderer) {
for {
if os1.ConnCount() > 0 {
return os1, os2
}
if os2.ConnCount() > 0 {
return os2, os1
}
time.Sleep(time.Millisecond * 100)
}
}
activeInstance, backupInstance := waitForConnectionToSomeOSN()
assert.NotNil(t, activeInstance)
assert.NotNil(t, backupInstance)
// Check that delivery client get connected to active
assert.Equal(t, activeInstance.ConnCount(), 1)
// and not connected to backup instances
assert.Equal(t, backupInstance.ConnCount(), 0)
// Send first block
go activeInstance.SendBlock(li.Height)
assertBlockDissemination(li.Height, gossipServiceAdapter.GossipBlockDisseminations, t)
li.Height++
// Backup instance should expect a seek of 101 since we got 100
backupInstance.SetNextExpectedSeek(li.Height)
// Have backup instance prepare to send a block
backupInstance.SendBlock(li.Height)
// Fail instance delivery client connected to
activeInstance.Fail()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
wg := sync.WaitGroup{}
wg.Add(1)
go func(ctx context.Context) {
defer wg.Done()
for {
select {
case <-time.After(time.Millisecond * 100):
if backupInstance.ConnCount() > 0 {
return
}
case <-ctx.Done():
return
}
}
}(ctx)
wg.Wait()
assert.NoError(t, ctx.Err(), "Delivery client has not failed over to alive ordering service")
// Check that delivery client was indeed connected
assert.Equal(t, backupInstance.ConnCount(), 1)
// Ensure the client asks blocks from the other ordering service node
assertBlockDissemination(li.Height, gossipServiceAdapter.GossipBlockDisseminations, t)
// Wait until first endpoint enabled again
time.Sleep(time.Millisecond * 1600)
li.Height++
activeInstance.Resurrect()
backupInstance.Fail()
resurrectCtx, resCancel := context.WithTimeout(context.Background(), time.Second)
defer resCancel()
go func() {
// Resurrected instance should expect a seek of 102 since we got 101
activeInstance.SetNextExpectedSeek(li.Height)
// Have resurrected instance prepare to send a block
activeInstance.SendBlock(li.Height)
}()
reswg := sync.WaitGroup{}
reswg.Add(1)
go func() {
defer reswg.Done()
for {
select {
case <-time.After(time.Millisecond * 100):
if activeInstance.ConnCount() > 0 {
return
}
case <-resurrectCtx.Done():
return
}
}
}()
reswg.Wait()
assert.NoError(t, resurrectCtx.Err(), "Delivery client has not failed over to alive ordering service")
// Check that delivery client was indeed connected
assert.Equal(t, activeInstance.ConnCount(), 1)
// Ensure the client asks blocks from the other ordering service node
assertBlockDissemination(li.Height, gossipServiceAdapter.GossipBlockDisseminations, t)
// Cleanup
os1.Shutdown()
os2.Shutdown()
service.Stop()
} | explode_data.jsonl/10580 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 1488
} | [
2830,
3393,
16532,
1524,
1860,
1860,
92928,
1155,
353,
8840,
836,
8,
341,
87625,
27380,
25479,
10256,
1669,
1063,
90409,
25479,
10256,
198,
197,
3621,
90409,
25479,
10256,
284,
882,
71482,
353,
220,
16,
20,
15,
15,
198,
16867,
2915,
368... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestAssertXML(t *testing.T) {
cases := []struct {
e, a string
asserts bool
container struct {
XMLName xml.Name `xml:"OperationRequest"`
NS string `xml:"xmlns,attr"`
RecursiveStruct struct {
RecursiveMap struct {
Entries []struct {
XMLName xml.Name `xml:"entries"`
Key string `xml:"key"`
Value struct {
XMLName xml.Name `xml:"value"`
NoRecurse string
}
}
}
}
}
}{
{
e: `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveMap xmlns="https://foo/"><entry xmlns="https://foo/"><key xmlns="https://foo/">foo</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></value></entry><entry xmlns="https://foo/"><key xmlns="https://foo/">bar</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></value></entry></RecursiveMap></RecursiveStruct></OperationRequest>`,
a: `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveMap xmlns="https://foo/"><entry xmlns="https://foo/"><key xmlns="https://foo/">bar</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></value></entry><entry xmlns="https://foo/"><key xmlns="https://foo/">foo</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></value></entry></RecursiveMap></RecursiveStruct></OperationRequest>`,
asserts: true,
},
}
for i, c := range cases {
// mockT := &testing.T{}
if awstesting.AssertXML(t, c.e, c.a, c.container) != c.asserts {
t.Error("Assert XML result was not expected.", i)
}
}
} | explode_data.jsonl/6220 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 682
} | [
2830,
3393,
8534,
10609,
1155,
353,
8840,
836,
8,
341,
1444,
2264,
1669,
3056,
1235,
341,
197,
7727,
11,
264,
414,
914,
198,
197,
6948,
82,
256,
1807,
198,
197,
53290,
2036,
341,
298,
197,
10609,
675,
260,
8396,
2967,
1565,
6455,
29... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 3 |
func TestTimer_AfterFuncWithAfterFuncFinishedCallback(t *testing.T) {
i := int32(0)
testCount := int32(2000)
testTimer(testCount, func(timer *s_timer.Timer, index int32, finish chan int64) {
defer func() {
finish <- 0
}()
time1 := time.Now().UnixNano()
resultChan := make(chan int64, 1)
timer.AfterFuncWithAfterFuncFinishedCallback(time.Second, func() {
time2 := time.Now().UnixNano()
i++
resultChan <- time2 - time1
}, func(id uint64) {
})
consumeTime := <-resultChan
if consumeTime > (time.Second + time.Millisecond*precision).Nanoseconds() {
t.Error("timeout", index, consumeTime)
}
})
if i != testCount {
t.Error("no sync!")
}
} | explode_data.jsonl/7628 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 274
} | [
2830,
3393,
10105,
1566,
1046,
9626,
2354,
6025,
9626,
24890,
7494,
1155,
353,
8840,
836,
8,
341,
8230,
1669,
526,
18,
17,
7,
15,
340,
18185,
2507,
1669,
526,
18,
17,
7,
17,
15,
15,
15,
340,
18185,
10105,
8623,
2507,
11,
2915,
428... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestPrepareForGroupByMultiItems(t *testing.T) {
store, clean := testkit.CreateMockStore(t)
defer clean()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int , index idx(a));")
tk.MustExec("insert into t values(1,2, -1), (1,2, 1), (1,2, -1), (4,4,3);")
tk.MustExec("set @a=1")
tk.MustExec("set @b=3")
tk.MustExec(`set sql_mode=""`)
tk.MustExec(`prepare stmt from "select a, sum(b), c from t group by ?, ? order by ?, ?"`)
tk.MustQuery("select a, sum(b), c from t group by 1,3 order by 1,3;").Check(testkit.Rows("1 4 -1", "1 2 1", "4 4 3"))
tk.MustQuery(`execute stmt using @a, @b, @a, @b`).Check(testkit.Rows("1 4 -1", "1 2 1", "4 4 3"))
tk.MustExec("set @c=10")
require.EqualError(t, tk.ExecToErr("execute stmt using @a, @c, @a, @c"), "Unknown column '10' in 'group statement'")
tk.MustExec("set @v1=1.0")
tk.MustExec("set @v2=3.0")
tk.MustExec(`prepare stmt2 from "select sum(b) from t group by ?, ?"`)
tk.MustQuery(`execute stmt2 using @v1, @v2`).Check(testkit.Rows("10"))
} | explode_data.jsonl/5524 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 479
} | [
2830,
3393,
50590,
2461,
2808,
1359,
20358,
4353,
1155,
353,
8840,
836,
8,
341,
57279,
11,
4240,
1669,
1273,
8226,
7251,
11571,
6093,
1155,
340,
16867,
4240,
741,
3244,
74,
1669,
1273,
8226,
7121,
2271,
7695,
1155,
11,
3553,
692,
3244,
... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestHyphenInMultiOption(t *testing.T) {
var args struct {
Foo []string
Bar int
}
err := parse("--foo --- x - y --bar 3", &args)
require.NoError(t, err)
assert.Equal(t, []string{"---", "x", "-", "y"}, args.Foo)
assert.Equal(t, 3, args.Bar)
} | explode_data.jsonl/13069 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 110
} | [
2830,
3393,
30816,
14769,
641,
20358,
5341,
1155,
353,
8840,
836,
8,
341,
2405,
2827,
2036,
341,
197,
12727,
2624,
3056,
917,
198,
197,
197,
3428,
526,
198,
197,
532,
9859,
1669,
4715,
21549,
7975,
12448,
856,
481,
379,
1177,
2257,
22... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 1 |
func TestBuildConditions(t *testing.T) {
rev := &Build{}
foo := &BuildCondition{
Type: "Foo",
Status: "True",
}
bar := &BuildCondition{
Type: "Bar",
Status: "True",
}
// Add a new condition.
rev.Status.SetCondition(foo)
if len(rev.Status.Conditions) != 1 {
t.Fatalf("Unexpected Condition length; want 1, got %d", len(rev.Status.Conditions))
}
// Remove a non-existent condition.
rev.Status.RemoveCondition(bar.Type)
if len(rev.Status.Conditions) != 1 {
t.Fatalf("Unexpected Condition length; want 1, got %d", len(rev.Status.Conditions))
}
if got, want := rev.Status.GetCondition(foo.Type), foo; !reflect.DeepEqual(got, want) {
t.Errorf("GetCondition() = %v, want %v", got, want)
}
// Add a second condition.
rev.Status.SetCondition(bar)
if len(rev.Status.Conditions) != 2 {
t.Fatalf("Unexpected Condition length; want 2, got %d", len(rev.Status.Conditions))
}
// Remove an existing condition.
rev.Status.RemoveCondition(bar.Type)
if len(rev.Status.Conditions) != 1 {
t.Fatalf("Unexpected Condition length; want 1, got %d", len(rev.Status.Conditions))
}
} | explode_data.jsonl/15626 | {
"file_path": "/home/dung/Study/Code/Cross_test_gen/training_dataset/dedup_data/clean_data_go/data/explode_data.jsonl",
"token_count": 410
} | [
2830,
3393,
11066,
35435,
1155,
353,
8840,
836,
8,
341,
197,
7282,
1669,
609,
11066,
16094,
197,
7975,
1669,
609,
11066,
10547,
515,
197,
27725,
25,
256,
330,
40923,
756,
197,
58321,
25,
330,
2514,
756,
197,
532,
90709,
1669,
609,
110... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.